zsmalloc: account the number of compacted pages correctly
commit 2395928158059b8f9858365fce7713ce7fef62e4 upstream.
There exists multiple path may do zram compaction concurrently.
1. auto-compaction triggered during memory reclaim
2. userspace utils write zram<id>/compaction node
So, multiple threads may call zs_shrinker_scan/zs_compact concurrently.
But pages_compacted is a per zsmalloc pool variable and modification
of the variable is not serialized(through under class->lock).
There are two issues here:
1. the pages_compacted may not equal to total number of pages
freed(due to concurrently add).
2. zs_shrinker_scan may not return the correct number of pages
freed(issued by current shrinker).
The fix is simple:
1. account the number of pages freed in zs_compact locally.
2. use actomic variable pages_compacted to accumulate total number.
Link: https://lkml.kernel.org/r/20210202122235.26885-1-wu-yan@tcl.com
Fixes: 860c707dca
("zsmalloc: account the number of compacted pages")
Signed-off-by: Rokudo Yan <wu-yan@tcl.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f038a22632
commit
bebf5e8327
|
@ -1072,7 +1072,7 @@ static ssize_t mm_stat_show(struct device *dev,
|
||||||
zram->limit_pages << PAGE_SHIFT,
|
zram->limit_pages << PAGE_SHIFT,
|
||||||
max_used << PAGE_SHIFT,
|
max_used << PAGE_SHIFT,
|
||||||
(u64)atomic64_read(&zram->stats.same_pages),
|
(u64)atomic64_read(&zram->stats.same_pages),
|
||||||
pool_stats.pages_compacted,
|
atomic_long_read(&pool_stats.pages_compacted),
|
||||||
(u64)atomic64_read(&zram->stats.huge_pages));
|
(u64)atomic64_read(&zram->stats.huge_pages));
|
||||||
up_read(&zram->init_lock);
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ enum zs_mapmode {
|
||||||
|
|
||||||
struct zs_pool_stats {
|
struct zs_pool_stats {
|
||||||
/* How many pages were migrated (freed) */
|
/* How many pages were migrated (freed) */
|
||||||
unsigned long pages_compacted;
|
atomic_long_t pages_compacted;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct zs_pool;
|
struct zs_pool;
|
||||||
|
|
|
@ -2216,11 +2216,13 @@ static unsigned long zs_can_compact(struct size_class *class)
|
||||||
return obj_wasted * class->pages_per_zspage;
|
return obj_wasted * class->pages_per_zspage;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
static unsigned long __zs_compact(struct zs_pool *pool,
|
||||||
|
struct size_class *class)
|
||||||
{
|
{
|
||||||
struct zs_compact_control cc;
|
struct zs_compact_control cc;
|
||||||
struct zspage *src_zspage;
|
struct zspage *src_zspage;
|
||||||
struct zspage *dst_zspage = NULL;
|
struct zspage *dst_zspage = NULL;
|
||||||
|
unsigned long pages_freed = 0;
|
||||||
|
|
||||||
spin_lock(&class->lock);
|
spin_lock(&class->lock);
|
||||||
while ((src_zspage = isolate_zspage(class, true))) {
|
while ((src_zspage = isolate_zspage(class, true))) {
|
||||||
|
@ -2250,7 +2252,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
||||||
putback_zspage(class, dst_zspage);
|
putback_zspage(class, dst_zspage);
|
||||||
if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
|
if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
|
||||||
free_zspage(pool, class, src_zspage);
|
free_zspage(pool, class, src_zspage);
|
||||||
pool->stats.pages_compacted += class->pages_per_zspage;
|
pages_freed += class->pages_per_zspage;
|
||||||
}
|
}
|
||||||
spin_unlock(&class->lock);
|
spin_unlock(&class->lock);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -2261,12 +2263,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
||||||
putback_zspage(class, src_zspage);
|
putback_zspage(class, src_zspage);
|
||||||
|
|
||||||
spin_unlock(&class->lock);
|
spin_unlock(&class->lock);
|
||||||
|
|
||||||
|
return pages_freed;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long zs_compact(struct zs_pool *pool)
|
unsigned long zs_compact(struct zs_pool *pool)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct size_class *class;
|
struct size_class *class;
|
||||||
|
unsigned long pages_freed = 0;
|
||||||
|
|
||||||
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
|
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
|
||||||
class = pool->size_class[i];
|
class = pool->size_class[i];
|
||||||
|
@ -2274,10 +2279,11 @@ unsigned long zs_compact(struct zs_pool *pool)
|
||||||
continue;
|
continue;
|
||||||
if (class->index != i)
|
if (class->index != i)
|
||||||
continue;
|
continue;
|
||||||
__zs_compact(pool, class);
|
pages_freed += __zs_compact(pool, class);
|
||||||
}
|
}
|
||||||
|
atomic_long_add(pages_freed, &pool->stats.pages_compacted);
|
||||||
|
|
||||||
return pool->stats.pages_compacted;
|
return pages_freed;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(zs_compact);
|
EXPORT_SYMBOL_GPL(zs_compact);
|
||||||
|
|
||||||
|
@ -2294,13 +2300,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
|
||||||
struct zs_pool *pool = container_of(shrinker, struct zs_pool,
|
struct zs_pool *pool = container_of(shrinker, struct zs_pool,
|
||||||
shrinker);
|
shrinker);
|
||||||
|
|
||||||
pages_freed = pool->stats.pages_compacted;
|
|
||||||
/*
|
/*
|
||||||
* Compact classes and calculate compaction delta.
|
* Compact classes and calculate compaction delta.
|
||||||
* Can run concurrently with a manually triggered
|
* Can run concurrently with a manually triggered
|
||||||
* (by user) compaction.
|
* (by user) compaction.
|
||||||
*/
|
*/
|
||||||
pages_freed = zs_compact(pool) - pages_freed;
|
pages_freed = zs_compact(pool);
|
||||||
|
|
||||||
return pages_freed ? pages_freed : SHRINK_STOP;
|
return pages_freed ? pages_freed : SHRINK_STOP;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue