[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1490852289-11913-5-git-send-email-iamjoonsoo.kim@lge.com>
Date: Thu, 30 Mar 2017 14:38:09 +0900
From: js1304@...il.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
linux-kernel@...r.kernel.org, kernel-team@....com,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH v2 4/4] zram: compare all the entries with same checksum for deduplication
From: Joonsoo Kim <iamjoonsoo.kim@....com>
Until now, we compare just one entry with same checksum when
checking duplication since it is the simplest way to implement.
However, for the completeness, checking all the entries is better
so this patch implement to compare all the entries with same checksum.
Since this event would be rare so there would be no performance loss.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
drivers/block/zram/zram_dedup.c | 59 +++++++++++++++++++++++++++++++++--------
1 file changed, 48 insertions(+), 11 deletions(-)
diff --git a/drivers/block/zram/zram_dedup.c b/drivers/block/zram/zram_dedup.c
index 1df1ce1..c4dfd21 100644
--- a/drivers/block/zram/zram_dedup.c
+++ b/drivers/block/zram/zram_dedup.c
@@ -125,6 +125,51 @@ static unsigned long zram_dedup_put(struct zram *zram, struct zram_meta *meta,
return refcount;
}
+static struct zram_entry *__zram_dedup_get(struct zram *zram,
+ struct zram_hash *hash, unsigned char *mem,
+ struct zram_entry *entry)
+{
+ struct zram_entry *tmp, *prev = NULL;
+ struct rb_node *rb_node;
+
+ /* find left-most entry with same checksum */
+ while ((rb_node = rb_prev(&entry->rb_node))) {
+ tmp = rb_entry(rb_node, struct zram_entry, rb_node);
+ if (tmp->checksum != entry->checksum)
+ break;
+
+ entry = tmp;
+ }
+
+again:
+ entry->refcount++;
+ atomic64_add(entry->len, &zram->stats.dup_data_size);
+ spin_unlock(&hash->lock);
+
+ if (prev)
+ zram_entry_free(zram, zram->meta, prev);
+
+ if (zram_dedup_match(zram, entry, mem))
+ return entry;
+
+ spin_lock(&hash->lock);
+ tmp = NULL;
+ rb_node = rb_next(&entry->rb_node);
+ if (rb_node)
+ tmp = rb_entry(rb_node, struct zram_entry, rb_node);
+
+ if (tmp && (tmp->checksum == entry->checksum)) {
+ prev = entry;
+ entry = tmp;
+ goto again;
+ }
+
+ spin_unlock(&hash->lock);
+ zram_entry_free(zram, zram->meta, entry);
+
+ return NULL;
+}
+
static struct zram_entry *zram_dedup_get(struct zram *zram,
unsigned char *mem, u32 checksum)
{
@@ -139,18 +184,10 @@ static struct zram_entry *zram_dedup_get(struct zram *zram,
rb_node = hash->rb_root.rb_node;
while (rb_node) {
entry = rb_entry(rb_node, struct zram_entry, rb_node);
- if (checksum == entry->checksum) {
- entry->refcount++;
- atomic64_add(entry->len, &zram->stats.dup_data_size);
- spin_unlock(&hash->lock);
-
- if (zram_dedup_match(zram, entry, mem))
- return entry;
-
- zram_entry_free(zram, meta, entry);
- return NULL;
- }
+ /* lock will be released in the following function */
+ if (checksum == entry->checksum)
+ return __zram_dedup_get(zram, hash, mem, entry);
if (checksum < entry->checksum)
rb_node = rb_node->rb_left;
--
2.7.4
Powered by blists - more mailing lists