[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240905150543.3766895-6-roberto.sassu@huaweicloud.com>
Date: Thu, 5 Sep 2024 17:05:34 +0200
From: Roberto Sassu <roberto.sassu@...weicloud.com>
To: zohar@...ux.ibm.com,
dmitry.kasatkin@...il.com,
eric.snowberg@...cle.com,
corbet@....net,
akpm@...ux-foundation.org,
paul@...l-moore.com,
jmorris@...ei.org,
serge@...lyn.com,
shuah@...nel.org,
mcoquelin.stm32@...il.com,
alexandre.torgue@...s.st.com
Cc: linux-integrity@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-security-module@...r.kernel.org,
linux-kselftest@...r.kernel.org,
wufan@...ux.microsoft.com,
pbrobinson@...il.com,
zbyszek@...waw.pl,
hch@....de,
mjg59@...f.ucam.org,
pmatilai@...hat.com,
jannh@...gle.com,
dhowells@...hat.com,
jikos@...nel.org,
mkoutny@...e.com,
ppavlu@...e.com,
petr.vorel@...il.com,
mzerqung@...inter.de,
kgold@...ux.ibm.com,
Roberto Sassu <roberto.sassu@...wei.com>
Subject: [PATCH v5 05/14] digest_cache: Add hash tables and operations
From: Roberto Sassu <roberto.sassu@...wei.com>
Add a linked list of hash tables to the digest cache, one per algorithm,
containing the digests extracted from digest lists.
The number of hash table slots is determined by dividing the number of
digests to add to the average depth of the collision list defined with
CONFIG_DIGEST_CACHE_HTABLE_DEPTH (currently set to 30). It can be changed
in the kernel configuration.
Add digest_cache_htable_init() and digest_cache_htable_add(), to be called
by digest list parsers, in order to allocate the hash tables and to add
extracted digests.
Add digest_cache_htable_free(), to let the Integrity Digest Cache free the
hash tables at the time a digest cache is freed.
Add digest_cache_htable_lookup() to search a digest in the hash table of a
digest cache for a given algorithm.
Add digest_cache_lookup() to the public API, to let users of the Integrity
Digest Cache search a digest in a digest cache and, in a subsequent patch,
to search it in the digest caches for each directory entry.
Return the digest cache containing the digest as uintptr_t, to avoid it
being accidentally put. It should be cast back to (struct digest_cache *)
in the functions that require it.
Finally, add digest_cache_hash_key() to compute the hash table key from the
first two bytes of the digest (modulo the number of slots).
Signed-off-by: Roberto Sassu <roberto.sassu@...wei.com>
---
include/linux/digest_cache.h | 11 +
security/integrity/digest_cache/Kconfig | 11 +
security/integrity/digest_cache/Makefile | 2 +-
security/integrity/digest_cache/htable.c | 250 +++++++++++++++++++++
security/integrity/digest_cache/internal.h | 43 ++++
security/integrity/digest_cache/main.c | 3 +
6 files changed, 319 insertions(+), 1 deletion(-)
create mode 100644 security/integrity/digest_cache/htable.c
diff --git a/include/linux/digest_cache.h b/include/linux/digest_cache.h
index 86e6c5a0b896..53a7edc04310 100644
--- a/include/linux/digest_cache.h
+++ b/include/linux/digest_cache.h
@@ -11,10 +11,14 @@
#define _LINUX_DIGEST_CACHE_H
#include <linux/fs.h>
+#include <crypto/hash_info.h>
#ifdef CONFIG_INTEGRITY_DIGEST_CACHE
struct digest_cache *digest_cache_get(struct dentry *dentry);
void digest_cache_put(struct digest_cache *digest_cache);
+uintptr_t digest_cache_lookup(struct dentry *dentry,
+ struct digest_cache *digest_cache,
+ u8 *digest, enum hash_algo algo);
#else
static inline struct digest_cache *digest_cache_get(struct dentry *dentry)
@@ -26,5 +30,12 @@ static inline void digest_cache_put(struct digest_cache *digest_cache)
{
}
+static inline uintptr_t digest_cache_lookup(struct dentry *dentry,
+ struct digest_cache *digest_cache,
+ u8 *digest, enum hash_algo algo)
+{
+ return 0UL;
+}
+
#endif /* CONFIG_INTEGRITY_DIGEST_CACHE */
#endif /* _LINUX_DIGEST_CACHE_H */
diff --git a/security/integrity/digest_cache/Kconfig b/security/integrity/digest_cache/Kconfig
index 6e61089acbe2..11097464540a 100644
--- a/security/integrity/digest_cache/Kconfig
+++ b/security/integrity/digest_cache/Kconfig
@@ -19,3 +19,14 @@ config DIGEST_LIST_DEFAULT_PATH
It can be changed at run-time, by writing the new path to the
securityfs interface. Digest caches created with the old path are
not affected by the change.
+
+config DIGEST_CACHE_HTABLE_DEPTH
+ int
+ default 30
+ help
+ Desired average depth of the collision list in the digest cache
+ hash tables.
+
+ A smaller number will increase the amount of hash table slots, and
+ make the search faster. A bigger number will decrease the number of
+ hash table slots, but make the search slower.
diff --git a/security/integrity/digest_cache/Makefile b/security/integrity/digest_cache/Makefile
index c351186d4e1e..0092c913979d 100644
--- a/security/integrity/digest_cache/Makefile
+++ b/security/integrity/digest_cache/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_INTEGRITY_DIGEST_CACHE) += digest_cache.o
-digest_cache-y := main.o secfs.o
+digest_cache-y := main.o secfs.o htable.o
diff --git a/security/integrity/digest_cache/htable.c b/security/integrity/digest_cache/htable.c
new file mode 100644
index 000000000000..385f81047b0c
--- /dev/null
+++ b/security/integrity/digest_cache/htable.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@...wei.com>
+ *
+ * Implement hash table operations for the digest cache.
+ */
+
+#define pr_fmt(fmt) "digest_cache: "fmt
+#include "internal.h"
+
+/**
+ * digest_cache_hash_key - Compute hash key
+ * @digest: Digest cache
+ * @num_slots: Number of slots in the hash table
+ *
+ * This function computes a hash key based on the first two bytes of the
+ * digest and the number of slots of the hash table.
+ *
+ * Return: Hash key.
+ */
+static inline unsigned int digest_cache_hash_key(u8 *digest,
+ unsigned int num_slots)
+{
+ /* Same as ima_hash_key() but parametrized. */
+ return (digest[0] | digest[1] << 8) % num_slots;
+}
+
+/**
+ * lookup_htable - Lookup a hash table
+ * @digest_cache: Digest cache
+ * @algo: Algorithm of the desired hash table
+ *
+ * This function searches the hash table for a given algorithm in the digest
+ * cache.
+ *
+ * Return: A hash table if found, NULL otherwise.
+ */
+static struct htable *lookup_htable(struct digest_cache *digest_cache,
+ enum hash_algo algo)
+{
+ struct htable *h;
+
+ list_for_each_entry(h, &digest_cache->htables, next)
+ if (h->algo == algo)
+ return h;
+
+ return NULL;
+}
+
+/**
+ * digest_cache_htable_init - Allocate and initialize the hash table
+ * @digest_cache: Digest cache
+ * @num_digests: Number of digests to add to the hash table
+ * @algo: Algorithm of the digests
+ *
+ * This function allocates and initializes the hash table for a given algorithm.
+ * The number of slots depends on the number of digests to add to the digest
+ * cache, and the constant CONFIG_DIGEST_CACHE_HTABLE_DEPTH stating the desired
+ * average depth of the collision list.
+ *
+ * Return: Zero on success, a POSIX error code otherwise.
+ */
+int digest_cache_htable_init(struct digest_cache *digest_cache, u64 num_digests,
+ enum hash_algo algo)
+{
+ struct htable *h;
+ unsigned int i;
+
+ h = lookup_htable(digest_cache, algo);
+ if (h)
+ return 0;
+
+ h = kmalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->num_slots = DIV_ROUND_UP(num_digests,
+ CONFIG_DIGEST_CACHE_HTABLE_DEPTH);
+ h->slots = kmalloc_array(h->num_slots, sizeof(*h->slots), GFP_KERNEL);
+ if (!h->slots) {
+ kfree(h);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < h->num_slots; i++)
+ INIT_HLIST_HEAD(&h->slots[i]);
+
+ h->num_digests = 0;
+ h->algo = algo;
+
+ list_add_tail(&h->next, &digest_cache->htables);
+
+ pr_debug("Initialized hash table for digest list %s, digests: %llu, slots: %u, algo: %s\n",
+ digest_cache->path_str, num_digests, h->num_slots,
+ hash_algo_name[algo]);
+ return 0;
+}
+
+/**
+ * digest_cache_htable_add - Add a new digest to the digest cache
+ * @digest_cache: Digest cache
+ * @digest: Digest to add
+ * @algo: Algorithm of digest
+ *
+ * This function, invoked by a digest list parser, adds a digest extracted
+ * from a digest list to the digest cache.
+ *
+ * Return: Zero on success, a POSIX error code otherwise.
+ */
+int digest_cache_htable_add(struct digest_cache *digest_cache, u8 *digest,
+ enum hash_algo algo)
+{
+ struct htable *h;
+ struct digest_cache_entry *entry;
+ unsigned int key;
+ int digest_len;
+
+ h = lookup_htable(digest_cache, algo);
+ if (!h) {
+ pr_debug("No hash table for algorithm %s was found in digest cache %s, initialize one\n",
+ hash_algo_name[algo], digest_cache->path_str);
+ return -ENOENT;
+ }
+
+ digest_len = hash_digest_size[algo];
+
+ entry = kmalloc(sizeof(*entry) + digest_len, GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->digest, digest, digest_len);
+
+ key = digest_cache_hash_key(digest, h->num_slots);
+ hlist_add_head(&entry->hnext, &h->slots[key]);
+ h->num_digests++;
+ pr_debug("Added digest %s:%*phN to digest cache %s, num of %s digests: %llu\n",
+ hash_algo_name[algo], digest_len, digest,
+ digest_cache->path_str, hash_algo_name[algo], h->num_digests);
+ return 0;
+}
+
+/**
+ * digest_cache_htable_lookup - Search a digest in the digest cache
+ * @dentry: Dentry of the file whose digest is looked up
+ * @digest_cache: Digest cache
+ * @digest: Digest to search
+ * @algo: Algorithm of the digest to search
+ *
+ * This function searches the passed digest and algorithm in the digest cache.
+ *
+ * Return: Zero if the digest is found, a POSIX error code otherwise.
+ */
+int digest_cache_htable_lookup(struct dentry *dentry,
+ struct digest_cache *digest_cache, u8 *digest,
+ enum hash_algo algo)
+{
+ struct digest_cache_entry *entry;
+ struct htable *h;
+ unsigned int key;
+ int digest_len = hash_digest_size[algo];
+ int search_depth = 0, ret = -ENOENT;
+
+ h = lookup_htable(digest_cache, algo);
+ if (!h)
+ goto out;
+
+ key = digest_cache_hash_key(digest, h->num_slots);
+
+ hlist_for_each_entry(entry, &h->slots[key], hnext) {
+ if (!memcmp(entry->digest, digest, digest_len)) {
+ pr_debug("Cache hit at depth %d for file %s, digest %s:%*phN in digest cache %s\n",
+ search_depth, dentry->d_name.name,
+ hash_algo_name[algo], digest_len, digest,
+ digest_cache->path_str);
+
+ return 0;
+ }
+
+ search_depth++;
+ }
+out:
+ pr_debug("Cache miss for file %s, digest %s:%*phN not in digest cache %s\n",
+ dentry->d_name.name, hash_algo_name[algo], digest_len, digest,
+ digest_cache->path_str);
+ return ret;
+}
+
+/**
+ * digest_cache_lookup - Search a digest in the digest cache
+ * @dentry: Dentry of the file whose digest is looked up
+ * @digest_cache: Digest cache
+ * @digest: Digest to search
+ * @algo: Algorithm of the digest to search
+ *
+ * This function calls digest_cache_htable_lookup() to search a digest in the
+ * passed digest cache, obtained with digest_cache_get().
+ *
+ * It returns the digest cache reference as uintptr_t, to avoid that the digest
+ * cache is accidentally put. It should be cast to a digest_cache pointer where
+ * the function requires that.
+ *
+ * Return: A positive uintptr_t if the digest is found, zero if not.
+ */
+uintptr_t digest_cache_lookup(struct dentry *dentry,
+ struct digest_cache *digest_cache,
+ u8 *digest, enum hash_algo algo)
+{
+ int ret;
+
+ ret = digest_cache_htable_lookup(dentry, digest_cache, digest, algo);
+ if (ret < 0)
+ return 0UL;
+
+ return (uintptr_t)digest_cache;
+}
+EXPORT_SYMBOL_GPL(digest_cache_lookup);
+
+/**
+ * digest_cache_htable_free - Free the hash tables
+ * @digest_cache: Digest cache
+ *
+ * This function removes all digests from all hash tables in the digest cache,
+ * and frees the memory.
+ */
+void digest_cache_htable_free(struct digest_cache *digest_cache)
+{
+ struct htable *h, *h_tmp;
+ struct digest_cache_entry *p;
+ struct hlist_node *q;
+ unsigned int i;
+
+ list_for_each_entry_safe(h, h_tmp, &digest_cache->htables, next) {
+ for (i = 0; i < h->num_slots; i++) {
+ hlist_for_each_entry_safe(p, q, &h->slots[i], hnext) {
+ hlist_del(&p->hnext);
+ pr_debug("Removed digest %s:%*phN from digest cache %s\n",
+ hash_algo_name[h->algo],
+ hash_digest_size[h->algo], p->digest,
+ digest_cache->path_str);
+ kfree(p);
+ }
+ }
+
+ list_del(&h->next);
+ kfree(h->slots);
+ kfree(h);
+ }
+}
diff --git a/security/integrity/digest_cache/internal.h b/security/integrity/digest_cache/internal.h
index 2fcfa9b4cf13..f4b146a1bbaf 100644
--- a/security/integrity/digest_cache/internal.h
+++ b/security/integrity/digest_cache/internal.h
@@ -18,8 +18,40 @@
#define INIT_STARTED 1 /* Digest cache init started. */
#define INVALID 2 /* Digest cache marked as invalid. */
+/**
+ * struct digest_cache_entry - Entry of a digest cache hash table
+ * @hnext: Pointer to the next element in the collision list
+ * @digest: Stored digest
+ *
+ * This structure represents an entry of a digest cache hash table, storing a
+ * digest.
+ */
+struct digest_cache_entry {
+ struct hlist_node hnext;
+ u8 digest[];
+};
+
+/**
+ * struct htable - Hash table
+ * @next: Next hash table in the linked list
+ * @slots: Hash table slots
+ * @num_slots: Number of slots
+ * @num_digests: Number of digests stored in the hash table
+ * @algo: Algorithm of the digests
+ *
+ * This structure is a hash table storing digests of file data or metadata.
+ */
+struct htable {
+ struct list_head next;
+ struct hlist_head *slots;
+ unsigned int num_slots;
+ u64 num_digests;
+ enum hash_algo algo;
+};
+
/**
* struct digest_cache - Digest cache
+ * @htables: Hash tables (one per algorithm)
* @ref_count: Number of references to the digest cache
* @path_str: Path of the digest list the digest cache was created from
* @flags: Control flags
@@ -28,6 +60,7 @@
* This structure represents a cache of digests extracted from a digest list.
*/
struct digest_cache {
+ struct list_head htables;
atomic_t ref_count;
char *path_str;
unsigned long flags;
@@ -100,4 +133,14 @@ int __init digest_cache_do_init(const struct lsm_id *lsm_id,
/* secfs.c */
int __init digest_cache_secfs_init(struct dentry *dir);
+/* htable.c */
+int digest_cache_htable_init(struct digest_cache *digest_cache, u64 num_digests,
+ enum hash_algo algo);
+int digest_cache_htable_add(struct digest_cache *digest_cache, u8 *digest,
+ enum hash_algo algo);
+int digest_cache_htable_lookup(struct dentry *dentry,
+ struct digest_cache *digest_cache, u8 *digest,
+ enum hash_algo algo);
+void digest_cache_htable_free(struct digest_cache *digest_cache);
+
#endif /* _DIGEST_CACHE_INTERNAL_H */
diff --git a/security/integrity/digest_cache/main.c b/security/integrity/digest_cache/main.c
index 0e25f573166a..a74fc1183332 100644
--- a/security/integrity/digest_cache/main.c
+++ b/security/integrity/digest_cache/main.c
@@ -50,6 +50,7 @@ static struct digest_cache *digest_cache_alloc_init(char *path_str,
atomic_set(&digest_cache->ref_count, 1);
digest_cache->flags = 0UL;
+ INIT_LIST_HEAD(&digest_cache->htables);
pr_debug("New digest cache %s (ref count: %d)\n",
digest_cache->path_str, atomic_read(&digest_cache->ref_count));
@@ -65,6 +66,8 @@ static struct digest_cache *digest_cache_alloc_init(char *path_str,
*/
static void digest_cache_free(struct digest_cache *digest_cache)
{
+ digest_cache_htable_free(digest_cache);
+
pr_debug("Freed digest cache %s\n", digest_cache->path_str);
kfree(digest_cache->path_str);
kmem_cache_free(digest_cache_cache, digest_cache);
--
2.34.1
Powered by blists - more mailing lists