Skip to content

Commit 222effc

Browse files
JoonsooKimDhineshCool
authored andcommitted
zram: compare all the entries with same checksum for deduplication
Until now, we compare just one entry with same checksum when checking duplication since it is the simplest way to implement. However, for the completeness, checking all the entries is better so this patch implement to compare all the entries with same checksum. Since this event would be rare so there would be no performance loss. Change-Id: Ie7d61c14d127a28f5a06d85b0ca66b9fada20cbb Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Link: https://lore.kernel.org/patchwork/patch/787163/ Patch-mainline: linux-kernel@ Thu, 11 May 2017 22:30:29 Signed-off-by: Charan Teja Reddy <charante@codeaurora.org> Signed-off-by: Marco Zanin <mrczn.bb@gmail.com> Signed-off-by: snnbyyds <snnbyyds@gmail.com>
1 parent 0dfcc58 commit 222effc

1 file changed

Lines changed: 47 additions & 12 deletions

File tree

drivers/block/zram/zram_dedup.c

Lines changed: 47 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,51 @@ static unsigned long zram_dedup_put(struct zram *zram,
109109
return entry->refcount;
110110
}
111111

112+
static struct zram_entry *__zram_dedup_get(struct zram *zram,
113+
struct zram_hash *hash, unsigned char *mem,
114+
struct zram_entry *entry)
115+
{
116+
struct zram_entry *tmp, *prev = NULL;
117+
struct rb_node *rb_node;
118+
119+
/* find left-most entry with same checksum */
120+
while ((rb_node = rb_prev(&entry->rb_node))) {
121+
tmp = rb_entry(rb_node, struct zram_entry, rb_node);
122+
if (tmp->checksum != entry->checksum)
123+
break;
124+
125+
entry = tmp;
126+
}
127+
128+
again:
129+
entry->refcount++;
130+
atomic64_add(entry->len, &zram->stats.dup_data_size);
131+
spin_unlock(&hash->lock);
132+
133+
if (prev)
134+
zram_entry_free(zram, prev);
135+
136+
if (zram_dedup_match(zram, entry, mem))
137+
return entry;
138+
139+
spin_lock(&hash->lock);
140+
tmp = NULL;
141+
rb_node = rb_next(&entry->rb_node);
142+
if (rb_node)
143+
tmp = rb_entry(rb_node, struct zram_entry, rb_node);
144+
145+
if (tmp && (tmp->checksum == entry->checksum)) {
146+
prev = entry;
147+
entry = tmp;
148+
goto again;
149+
}
150+
151+
spin_unlock(&hash->lock);
152+
zram_entry_free(zram, entry);
153+
154+
return NULL;
155+
}
156+
112157
static struct zram_entry *zram_dedup_get(struct zram *zram,
113158
unsigned char *mem, u32 checksum)
114159
{
@@ -122,18 +167,8 @@ static struct zram_entry *zram_dedup_get(struct zram *zram,
122167
rb_node = hash->rb_root.rb_node;
123168
while (rb_node) {
124169
entry = rb_entry(rb_node, struct zram_entry, rb_node);
125-
if (checksum == entry->checksum) {
126-
entry->refcount++;
127-
atomic64_add(entry->len, &zram->stats.dup_data_size);
128-
spin_unlock(&hash->lock);
129-
130-
if (zram_dedup_match(zram, entry, mem))
131-
return entry;
132-
133-
zram_entry_free(zram, entry);
134-
135-
return NULL;
136-
}
170+
if (checksum == entry->checksum)
171+
return __zram_dedup_get(zram, hash, mem, entry);
137172

138173
if (checksum < entry->checksum)
139174
rb_node = rb_node->rb_left;

0 commit comments

Comments
 (0)