|
| 1 | +/* |
| 2 | + * Copyright (C) 2017 Joonsoo Kim. |
| 3 | + * |
| 4 | + * This program is free software; you can redistribute it and/or |
| 5 | + * modify it under the terms of the GNU General Public License |
| 6 | + * as published by the Free Software Foundation; either version |
| 7 | + * 2 of the License, or (at your option) any later version. |
| 8 | + */ |
| 9 | + |
| 10 | +#include <linux/vmalloc.h> |
| 11 | +#include <linux/jhash.h> |
| 12 | +#include <linux/highmem.h> |
| 13 | + |
| 14 | +#include "zram_drv.h" |
| 15 | + |
| 16 | +/* One slot will contain 128 pages theoretically */ |
| 17 | +#define ZRAM_HASH_SHIFT 7 |
| 18 | +#define ZRAM_HASH_SIZE_MIN (1 << 10) |
| 19 | +#define ZRAM_HASH_SIZE_MAX (1 << 31) |
| 20 | + |
| 21 | +u64 zram_dedup_dup_size(struct zram *zram) |
| 22 | +{ |
| 23 | + return (u64)atomic64_read(&zram->stats.dup_data_size); |
| 24 | +} |
| 25 | + |
| 26 | +u64 zram_dedup_meta_size(struct zram *zram) |
| 27 | +{ |
| 28 | + return (u64)atomic64_read(&zram->stats.meta_data_size); |
| 29 | +} |
| 30 | + |
| 31 | +static u32 zram_dedup_checksum(unsigned char *mem) |
| 32 | +{ |
| 33 | + return jhash(mem, PAGE_SIZE, 0); |
| 34 | +} |
| 35 | + |
| 36 | +void zram_dedup_insert(struct zram *zram, struct zram_entry *new, |
| 37 | + u32 checksum) |
| 38 | +{ |
| 39 | + struct zram_hash *hash; |
| 40 | + struct rb_root *rb_root; |
| 41 | + struct rb_node **rb_node, *parent = NULL; |
| 42 | + struct zram_entry *entry; |
| 43 | + |
| 44 | + new->checksum = checksum; |
| 45 | + hash = &zram->hash[checksum % zram->hash_size]; |
| 46 | + rb_root = &hash->rb_root; |
| 47 | + |
| 48 | + spin_lock(&hash->lock); |
| 49 | + rb_node = &rb_root->rb_node; |
| 50 | + while (*rb_node) { |
| 51 | + parent = *rb_node; |
| 52 | + entry = rb_entry(parent, struct zram_entry, rb_node); |
| 53 | + if (checksum < entry->checksum) |
| 54 | + rb_node = &parent->rb_left; |
| 55 | + else if (checksum > entry->checksum) |
| 56 | + rb_node = &parent->rb_right; |
| 57 | + else |
| 58 | + rb_node = &parent->rb_left; |
| 59 | + } |
| 60 | + |
| 61 | + rb_link_node(&new->rb_node, parent, rb_node); |
| 62 | + rb_insert_color(&new->rb_node, rb_root); |
| 63 | + spin_unlock(&hash->lock); |
| 64 | +} |
| 65 | + |
| 66 | +static bool zram_dedup_match(struct zram *zram, struct zram_entry *entry, |
| 67 | + unsigned char *mem) |
| 68 | +{ |
| 69 | + bool match = false; |
| 70 | + unsigned char *cmem; |
| 71 | + struct zcomp_strm *zstrm; |
| 72 | + |
| 73 | + cmem = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_RO); |
| 74 | + if (entry->len == PAGE_SIZE) { |
| 75 | + match = !memcmp(mem, cmem, PAGE_SIZE); |
| 76 | + } else { |
| 77 | + zstrm = zcomp_stream_get(zram->comp); |
| 78 | + if (!zcomp_decompress(zstrm, cmem, entry->len, zstrm->buffer)) |
| 79 | + match = !memcmp(mem, zstrm->buffer, PAGE_SIZE); |
| 80 | + zcomp_stream_put(zram->comp); |
| 81 | + } |
| 82 | + zs_unmap_object(zram->mem_pool, entry->handle); |
| 83 | + |
| 84 | + return match; |
| 85 | +} |
| 86 | + |
| 87 | +static unsigned long zram_dedup_put(struct zram *zram, |
| 88 | + struct zram_entry *entry) |
| 89 | +{ |
| 90 | + struct zram_hash *hash; |
| 91 | + u32 checksum; |
| 92 | + |
| 93 | + checksum = entry->checksum; |
| 94 | + hash = &zram->hash[checksum % zram->hash_size]; |
| 95 | + |
| 96 | + spin_lock(&hash->lock); |
| 97 | + |
| 98 | + entry->refcount--; |
| 99 | + if (!entry->refcount) |
| 100 | + rb_erase(&entry->rb_node, &hash->rb_root); |
| 101 | + else |
| 102 | + atomic64_sub(entry->len, &zram->stats.dup_data_size); |
| 103 | + |
| 104 | + spin_unlock(&hash->lock); |
| 105 | + |
| 106 | + return entry->refcount; |
| 107 | +} |
| 108 | + |
| 109 | +static struct zram_entry *zram_dedup_get(struct zram *zram, |
| 110 | + unsigned char *mem, u32 checksum) |
| 111 | +{ |
| 112 | + struct zram_hash *hash; |
| 113 | + struct zram_entry *entry; |
| 114 | + struct rb_node *rb_node; |
| 115 | + |
| 116 | + hash = &zram->hash[checksum % zram->hash_size]; |
| 117 | + |
| 118 | + spin_lock(&hash->lock); |
| 119 | + rb_node = hash->rb_root.rb_node; |
| 120 | + while (rb_node) { |
| 121 | + entry = rb_entry(rb_node, struct zram_entry, rb_node); |
| 122 | + if (checksum == entry->checksum) { |
| 123 | + entry->refcount++; |
| 124 | + atomic64_add(entry->len, &zram->stats.dup_data_size); |
| 125 | + spin_unlock(&hash->lock); |
| 126 | + |
| 127 | + if (zram_dedup_match(zram, entry, mem)) |
| 128 | + return entry; |
| 129 | + |
| 130 | + zram_entry_free(zram, entry); |
| 131 | + |
| 132 | + return NULL; |
| 133 | + } |
| 134 | + |
| 135 | + if (checksum < entry->checksum) |
| 136 | + rb_node = rb_node->rb_left; |
| 137 | + else |
| 138 | + rb_node = rb_node->rb_right; |
| 139 | + } |
| 140 | + spin_unlock(&hash->lock); |
| 141 | + |
| 142 | + return NULL; |
| 143 | +} |
| 144 | + |
| 145 | +struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page, |
| 146 | + u32 *checksum) |
| 147 | +{ |
| 148 | + void *mem; |
| 149 | + struct zram_entry *entry; |
| 150 | + |
| 151 | + mem = kmap_atomic(page); |
| 152 | + *checksum = zram_dedup_checksum(mem); |
| 153 | + |
| 154 | + entry = zram_dedup_get(zram, mem, *checksum); |
| 155 | + kunmap_atomic(mem); |
| 156 | + |
| 157 | + return entry; |
| 158 | +} |
| 159 | + |
| 160 | +void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry, |
| 161 | + unsigned long handle, unsigned int len) |
| 162 | +{ |
| 163 | + entry->handle = handle; |
| 164 | + entry->refcount = 1; |
| 165 | + entry->len = len; |
| 166 | +} |
| 167 | + |
| 168 | +bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry) |
| 169 | +{ |
| 170 | + if (zram_dedup_put(zram, entry)) |
| 171 | + return false; |
| 172 | + |
| 173 | + return true; |
| 174 | +} |
| 175 | + |
| 176 | +int zram_dedup_init(struct zram *zram, size_t num_pages) |
| 177 | +{ |
| 178 | + int i; |
| 179 | + struct zram_hash *hash; |
| 180 | + |
| 181 | + zram->hash_size = num_pages >> ZRAM_HASH_SHIFT; |
| 182 | + zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size); |
| 183 | + zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size); |
| 184 | + zram->hash = vzalloc(zram->hash_size * sizeof(struct zram_hash)); |
| 185 | + if (!zram->hash) { |
| 186 | + pr_err("Error allocating zram entry hash\n"); |
| 187 | + return -ENOMEM; |
| 188 | + } |
| 189 | + |
| 190 | + for (i = 0; i < zram->hash_size; i++) { |
| 191 | + hash = &zram->hash[i]; |
| 192 | + spin_lock_init(&hash->lock); |
| 193 | + hash->rb_root = RB_ROOT; |
| 194 | + } |
| 195 | + |
| 196 | + return 0; |
| 197 | +} |
| 198 | + |
| 199 | +void zram_dedup_fini(struct zram *zram) |
| 200 | +{ |
| 201 | + vfree(zram->hash); |
| 202 | + zram->hash = NULL; |
| 203 | + zram->hash_size = 0; |
| 204 | +} |
0 commit comments