@@ -239,6 +239,11 @@ struct zs_pool {
239239 /* Compact classes */
240240 struct shrinker shrinker ;
241241
242+ #ifdef CONFIG_ZPOOL
243+ /* List tracking the zspages in LRU order by most recently added object */
244+ struct list_head lru ;
245+ #endif
246+
242247#ifdef CONFIG_ZSMALLOC_STAT
243248 struct dentry * stat_dentry ;
244249#endif
@@ -260,6 +265,12 @@ struct zspage {
260265 unsigned int freeobj ;
261266 struct page * first_page ;
262267 struct list_head list ; /* fullness list */
268+
269+ #ifdef CONFIG_ZPOOL
270+ /* links the zspage to the lru list in the pool */
271+ struct list_head lru ;
272+ #endif
273+
263274 struct zs_pool * pool ;
264275#ifdef CONFIG_COMPACTION
265276 rwlock_t lock ;
@@ -953,6 +964,9 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
953964 }
954965
955966 remove_zspage (class , zspage , ZS_EMPTY );
967+ #ifdef CONFIG_ZPOOL
968+ list_del (& zspage -> lru );
969+ #endif
956970 __free_zspage (pool , class , zspage );
957971}
958972
@@ -998,6 +1012,10 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
9981012 off %= PAGE_SIZE ;
9991013 }
10001014
1015+ #ifdef CONFIG_ZPOOL
1016+ INIT_LIST_HEAD (& zspage -> lru );
1017+ #endif
1018+
10011019 set_freeobj (zspage , 0 );
10021020}
10031021
@@ -1270,6 +1288,31 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
12701288 obj_to_location (obj , & page , & obj_idx );
12711289 zspage = get_zspage (page );
12721290
1291+ #ifdef CONFIG_ZPOOL
1292+ /*
1293+ * Move the zspage to front of pool's LRU.
1294+ *
1295+ * Note that this is swap-specific, so by definition there are no ongoing
1296+ * accesses to the memory while the page is swapped out that would make
1297+ * it "hot". A new entry is hot, then ages to the tail until it gets either
1298+ * written back or swaps back in.
1299+ *
1300+ * Furthermore, map is also called during writeback. We must not put an
1301+ * isolated page on the LRU mid-reclaim.
1302+ *
1303+ * As a result, only update the LRU when the page is mapped for write
1304+ * when it's first instantiated.
1305+ *
1306+ * This is a deviation from the other backends, which perform this update
1307+ * in the allocation function (zbud_alloc, z3fold_alloc).
1308+ */
1309+ if (mm == ZS_MM_WO ) {
1310+ if (!list_empty (& zspage -> lru ))
1311+ list_del (& zspage -> lru );
1312+ list_add (& zspage -> lru , & pool -> lru );
1313+ }
1314+ #endif
1315+
12731316 /*
12741317 * migration cannot move any zpages in this zspage. Here, pool->lock
12751318 * is too heavy since callers would take some time until they calls
@@ -1988,6 +2031,9 @@ static void async_free_zspage(struct work_struct *work)
19882031 VM_BUG_ON (fullness != ZS_EMPTY );
19892032 class = pool -> size_class [class_idx ];
19902033 spin_lock (& pool -> lock );
2034+ #ifdef CONFIG_ZPOOL
2035+ list_del (& zspage -> lru );
2036+ #endif
19912037 __free_zspage (pool , class , zspage );
19922038 spin_unlock (& pool -> lock );
19932039 }
@@ -2299,6 +2345,10 @@ struct zs_pool *zs_create_pool(const char *name)
22992345 */
23002346 zs_register_shrinker (pool );
23012347
2348+ #ifdef CONFIG_ZPOOL
2349+ INIT_LIST_HEAD (& pool -> lru );
2350+ #endif
2351+
23022352 return pool ;
23032353
23042354err :
0 commit comments