@@ -86,14 +86,15 @@ static inline struct zram *dev_to_zram(struct device *dev)
8686 return (struct zram * )dev_to_disk (dev )-> private_data ;
8787}
8888
89- static unsigned long zram_get_handle (struct zram * zram , u32 index )
89+ static struct zram_entry * zram_get_entry (struct zram * zram , u32 index )
9090{
91- return zram -> table [index ].handle ;
91+ return zram -> table [index ].entry ;
9292}
9393
94- static void zram_set_handle (struct zram * zram , u32 index , unsigned long handle )
94+ static void zram_set_entry (struct zram * zram , u32 index ,
95+ struct zram_entry * entry )
9596{
96- zram -> table [index ].handle = handle ;
97+ zram -> table [index ].entry = entry ;
9798}
9899
99100/* flag operations require table entry bit_spin_lock() being held */
@@ -1142,6 +1143,33 @@ static DEVICE_ATTR_RO(bd_stat);
11421143#endif
11431144static DEVICE_ATTR_RO (debug_stat );
11441145
1146+
1147+ static struct zram_entry * zram_entry_alloc (struct zram * zram ,
1148+ unsigned int len , gfp_t flags )
1149+ {
1150+ struct zram_entry * entry ;
1151+
1152+ entry = kzalloc (sizeof (* entry ),
1153+ flags & ~(__GFP_HIGHMEM |__GFP_MOVABLE |__GFP_CMA ));
1154+ if (!entry )
1155+ return NULL ;
1156+
1157+ entry -> handle = zs_malloc (zram -> mem_pool , len , flags );
1158+ if (!entry -> handle ) {
1159+ kfree (entry );
1160+ return NULL ;
1161+ }
1162+
1163+ return entry ;
1164+ }
1165+
1166+ static inline void zram_entry_free (struct zram * zram ,
1167+ struct zram_entry * entry )
1168+ {
1169+ zs_free (zram -> mem_pool , entry -> handle );
1170+ kfree (entry );
1171+ }
1172+
11451173static void zram_meta_free (struct zram * zram , u64 disksize )
11461174{
11471175 size_t num_pages = disksize >> PAGE_SHIFT ;
@@ -1182,7 +1210,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
11821210 */
11831211static void zram_free_page (struct zram * zram , size_t index )
11841212{
1185- unsigned long handle ;
1213+ struct zram_entry * entry ;
11861214
11871215#ifdef CONFIG_ZRAM_MEMORY_TRACKING
11881216 zram -> table [index ].ac_time .tv64 = 0 ;
@@ -1211,17 +1239,18 @@ static void zram_free_page(struct zram *zram, size_t index)
12111239 goto out ;
12121240 }
12131241
1214- handle = zram_get_handle (zram , index );
1215- if (!handle )
1242+ entry = zram_get_entry (zram , index );
1243+ if (!entry )
12161244 return ;
12171245
1218- zs_free (zram -> mem_pool , handle );
1246+ zram_entry_free (zram , entry );
12191247
12201248 atomic64_sub (zram_get_obj_size (zram , index ),
12211249 & zram -> stats .compr_data_size );
12221250out :
12231251 atomic64_dec (& zram -> stats .pages_stored );
1224- zram_set_handle (zram , index , 0 );
1252+
1253+ zram_set_entry (zram , index , NULL );
12251254 zram_set_obj_size (zram , index , 0 );
12261255 WARN_ON_ONCE (zram -> table [index ].flags &
12271256 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB ));
@@ -1231,7 +1260,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12311260 struct bio * bio , bool partial_io )
12321261{
12331262 int ret ;
1234- unsigned long handle ;
1263+ struct zram_entry * entry ;
12351264 unsigned int size ;
12361265 void * src , * dst ;
12371266
@@ -1249,12 +1278,14 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12491278 bio , partial_io );
12501279 }
12511280
1252- handle = zram_get_handle (zram , index );
1253- if (!handle || zram_test_flag (zram , index , ZRAM_SAME )) {
1281+
1282+
1283+ entry = zram_get_entry (zram , index );
1284+ if (!entry || zram_test_flag (zram , index , ZRAM_SAME )) {
12541285 unsigned long value ;
12551286 void * mem ;
12561287
1257- value = handle ? zram_get_element (zram , index ) : 0 ;
1288+ value = entry ? zram_get_element (zram , index ) : 0 ;
12581289 mem = kmap_atomic (page );
12591290 zram_fill_page (mem , PAGE_SIZE , value );
12601291 kunmap_atomic (mem );
@@ -1264,7 +1295,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12641295
12651296 size = zram_get_obj_size (zram , index );
12661297
1267- src = zs_map_object (zram -> mem_pool , handle , ZS_MM_RO );
1298+ src = zs_map_object (zram -> mem_pool , entry -> handle , ZS_MM_RO );
12681299 if (size == PAGE_SIZE ) {
12691300 dst = kmap_atomic (page );
12701301 memcpy (dst , src , PAGE_SIZE );
@@ -1278,7 +1309,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12781309 kunmap_atomic (dst );
12791310 zcomp_stream_put (zram -> comp );
12801311 }
1281- zs_unmap_object (zram -> mem_pool , handle );
1312+ zs_unmap_object (zram -> mem_pool , entry -> handle );
12821313 zram_slot_unlock (zram , index );
12831314
12841315 /* Should NEVER happen. Return bio error if it does. */
@@ -1326,7 +1357,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13261357{
13271358 int ret = 0 ;
13281359 unsigned long alloced_pages ;
1329- unsigned long handle = 0 ;
1360+ struct zram_entry * entry = NULL ;
13301361 unsigned int comp_len = 0 ;
13311362 void * src , * dst , * mem ;
13321363 struct zcomp_strm * zstrm ;
@@ -1353,38 +1384,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13531384 if (unlikely (ret )) {
13541385 zcomp_stream_put (zram -> comp );
13551386 pr_err ("Compression failed! err=%d\n" , ret );
1356- zs_free (zram -> mem_pool , handle );
1387+ if (entry )
1388+ zram_entry_free (zram , entry );
13571389 return ret ;
13581390 }
13591391
13601392 if (comp_len >= huge_class_size )
13611393 comp_len = PAGE_SIZE ;
13621394 /*
1363- * handle allocation has 2 paths:
1395+ * entry allocation has 2 paths:
13641396 * a) fast path is executed with preemption disabled (for
13651397 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
13661398 * since we can't sleep;
13671399 * b) slow path enables preemption and attempts to allocate
13681400 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
13691401 * put per-cpu compression stream and, thus, to re-do
1370- * the compression once handle is allocated.
1402+ * the compression once entry is allocated.
13711403 *
1372- * if we have a 'non-null' handle here then we are coming
1373- * from the slow path and handle has already been allocated.
1404+ * if we have a 'non-null' entry here then we are coming
1405+ * from the slow path and entry has already been allocated.
13741406 */
1375- if (!handle )
1376- handle = zs_malloc (zram -> mem_pool , comp_len ,
1407+ if (!entry )
1408+ entry = zram_entry_alloc (zram , comp_len ,
13771409 __GFP_KSWAPD_RECLAIM |
13781410 __GFP_NOWARN |
13791411 __GFP_HIGHMEM |
13801412 __GFP_MOVABLE );
1381- if (!handle ) {
1413+ if (!entry ) {
13821414 zcomp_stream_put (zram -> comp );
13831415 atomic64_inc (& zram -> stats .writestall );
1384- handle = zs_malloc (zram -> mem_pool , comp_len ,
1416+ entry = zram_entry_alloc (zram , comp_len ,
13851417 GFP_NOIO | __GFP_HIGHMEM |
13861418 __GFP_MOVABLE );
1387- if (handle )
1419+ if (entry )
13881420 goto compress_again ;
13891421 return - ENOMEM ;
13901422 }
@@ -1394,11 +1426,11 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
13941426
13951427 if (zram -> limit_pages && alloced_pages > zram -> limit_pages ) {
13961428 zcomp_stream_put (zram -> comp );
1397- zs_free (zram -> mem_pool , handle );
1429+ zram_entry_free (zram , entry );
13981430 return - ENOMEM ;
13991431 }
14001432
1401- dst = zs_map_object (zram -> mem_pool , handle , ZS_MM_WO );
1433+ dst = zs_map_object (zram -> mem_pool , entry -> handle , ZS_MM_WO );
14021434
14031435 src = zstrm -> buffer ;
14041436 if (comp_len == PAGE_SIZE )
@@ -1408,7 +1440,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
14081440 kunmap_atomic (src );
14091441
14101442 zcomp_stream_put (zram -> comp );
1411- zs_unmap_object (zram -> mem_pool , handle );
1443+ zs_unmap_object (zram -> mem_pool , entry -> handle );
14121444 atomic64_add (comp_len , & zram -> stats .compr_data_size );
14131445out :
14141446 /*
@@ -1427,7 +1459,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
14271459 zram_set_flag (zram , index , flags );
14281460 zram_set_element (zram , index , element );
14291461 } else {
1430- zram_set_handle (zram , index , handle );
1462+ zram_set_entry (zram , index , entry );
14311463 zram_set_obj_size (zram , index , comp_len );
14321464 }
14331465 zram_slot_unlock (zram , index );
@@ -2141,4 +2173,4 @@ MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
21412173
21422174MODULE_LICENSE ("Dual BSD/GPL" );
21432175MODULE_AUTHOR ("Nitin Gupta <ngupta@vflare.org>" );
2144- MODULE_DESCRIPTION ("Compressed RAM Block Device" );
2176+ MODULE_DESCRIPTION ("Compressed RAM Block Device" );
0 commit comments