File tree Expand file tree Collapse file tree 1 file changed +16
-9
lines changed Expand file tree Collapse file tree 1 file changed +16
-9
lines changed Original file line number Diff line number Diff line change @@ -1331,15 +1331,22 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
13311331 if (!gfp_has_io_fs (sc -> gfp_mask ))
13321332 return 0 ;
13331333
1334- #ifdef CONFIG_MEMCG_KMEM
1335- mem_cgroup_flush_stats (memcg );
1336- nr_backing = memcg_page_state (memcg , MEMCG_ZSWAP_B ) >> PAGE_SHIFT ;
1337- nr_stored = memcg_page_state (memcg , MEMCG_ZSWAPPED );
1338- #else
1339- /* use pool stats instead of memcg stats */
1340- nr_backing = zswap_pool_total_size >> PAGE_SHIFT ;
1341- nr_stored = atomic_read (& zswap_nr_stored );
1342- #endif
1334+ /*
1335+ * For memcg, use the cgroup-wide ZSWAP stats since we don't
1336+ * have them per-node and thus per-lruvec. Careful if memcg is
1337+ * runtime-disabled: we can get sc->memcg == NULL, which is ok
1338+ * for the lruvec, but not for memcg_page_state().
1339+ *
1340+ * Without memcg, use the zswap pool-wide metrics.
1341+ */
1342+ if (!mem_cgroup_disabled ()) {
1343+ mem_cgroup_flush_stats (memcg );
1344+ nr_backing = memcg_page_state (memcg , MEMCG_ZSWAP_B ) >> PAGE_SHIFT ;
1345+ nr_stored = memcg_page_state (memcg , MEMCG_ZSWAPPED );
1346+ } else {
1347+ nr_backing = zswap_pool_total_size >> PAGE_SHIFT ;
1348+ nr_stored = atomic_read (& zswap_nr_stored );
1349+ }
13431350
13441351 if (!nr_stored )
13451352 return 0 ;
You can’t perform that action at this time.
0 commit comments