[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1494556204-25796-5-git-send-email-iamjoonsoo.kim@lge.com>
Date: Fri, 12 May 2017 11:30:04 +0900
From: js1304@...il.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
linux-kernel@...r.kernel.org, kernel-team@....com,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [PATCH v5 4/4] zram: compare all the entries with same checksum for deduplication
From: Joonsoo Kim <iamjoonsoo.kim@....com>
Until now, we compare just one entry with same checksum when
checking duplication since it is the simplest way to implement.
However, for the completeness, checking all the entries is better
so this patch implement to compare all the entries with same checksum.
Since this event would be rare so there would be no performance loss.
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Acked-by: Minchan Kim <minchan@...nel.org>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
drivers/block/zram/zram_dedup.c | 59 ++++++++++++++++++++++++++++++++---------
1 file changed, 47 insertions(+), 12 deletions(-)
diff --git a/drivers/block/zram/zram_dedup.c b/drivers/block/zram/zram_dedup.c
index 560b1f5..14c4988 100644
--- a/drivers/block/zram/zram_dedup.c
+++ b/drivers/block/zram/zram_dedup.c
@@ -109,6 +109,51 @@ static unsigned long zram_dedup_put(struct zram *zram,
return entry->refcount;
}
+static struct zram_entry *__zram_dedup_get(struct zram *zram,
+ struct zram_hash *hash, unsigned char *mem,
+ struct zram_entry *entry)
+{
+ struct zram_entry *tmp, *prev = NULL;
+ struct rb_node *rb_node;
+
+ /* find left-most entry with same checksum */
+ while ((rb_node = rb_prev(&entry->rb_node))) {
+ tmp = rb_entry(rb_node, struct zram_entry, rb_node);
+ if (tmp->checksum != entry->checksum)
+ break;
+
+ entry = tmp;
+ }
+
+again:
+ entry->refcount++;
+ atomic64_add(entry->len, &zram->stats.dup_data_size);
+ spin_unlock(&hash->lock);
+
+ if (prev)
+ zram_entry_free(zram, prev);
+
+ if (zram_dedup_match(zram, entry, mem))
+ return entry;
+
+ spin_lock(&hash->lock);
+ tmp = NULL;
+ rb_node = rb_next(&entry->rb_node);
+ if (rb_node)
+ tmp = rb_entry(rb_node, struct zram_entry, rb_node);
+
+ if (tmp && (tmp->checksum == entry->checksum)) {
+ prev = entry;
+ entry = tmp;
+ goto again;
+ }
+
+ spin_unlock(&hash->lock);
+ zram_entry_free(zram, entry);
+
+ return NULL;
+}
+
static struct zram_entry *zram_dedup_get(struct zram *zram,
unsigned char *mem, u32 checksum)
{
@@ -122,18 +167,8 @@ static struct zram_entry *zram_dedup_get(struct zram *zram,
rb_node = hash->rb_root.rb_node;
while (rb_node) {
entry = rb_entry(rb_node, struct zram_entry, rb_node);
- if (checksum == entry->checksum) {
- entry->refcount++;
- atomic64_add(entry->len, &zram->stats.dup_data_size);
- spin_unlock(&hash->lock);
-
- if (zram_dedup_match(zram, entry, mem))
- return entry;
-
- zram_entry_free(zram, entry);
-
- return NULL;
- }
+ if (checksum == entry->checksum)
+ return __zram_dedup_get(zram, hash, mem, entry);
if (checksum < entry->checksum)
rb_node = rb_node->rb_left;
--
2.7.4
Powered by blists - more mailing lists