mm: factor out page cache page freeing into a separate function

Factor out page freeing from delete_from_page_cache() into a separate
function.  We will need to call the same when batching pagecache
deletion operations.

invalidate_complete_page2() and replace_page_cache_page() might want to
call this function as well however they currently don't seem to handle
THPs so it's unnecessary for them to take the hit of checking whether a
page is THP or not.

Link: http://lkml.kernel.org/r/20171010151937.26984-4-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Acked-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jan Kara 2017-11-15 17:37:18 -08:00 committed by Linus Torvalds
parent 9f4e41f471
commit 59c66c5f8c
1 changed files with 18 additions and 13 deletions

View File

@ -254,6 +254,23 @@ void __delete_from_page_cache(struct page *page, void *shadow)
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
}
static void page_cache_free_page(struct address_space *mapping,
struct page *page)
{
void (*freepage)(struct page *);
freepage = mapping->a_ops->freepage;
if (freepage)
freepage(page);
if (PageTransHuge(page) && !PageHuge(page)) {
page_ref_sub(page, HPAGE_PMD_NR);
VM_BUG_ON_PAGE(page_count(page) <= 0, page);
} else {
put_page(page);
}
}
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
@ -266,25 +283,13 @@ void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long flags;
void (*freepage)(struct page *);
BUG_ON(!PageLocked(page));
freepage = mapping->a_ops->freepage;
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (freepage)
freepage(page);
if (PageTransHuge(page) && !PageHuge(page)) {
page_ref_sub(page, HPAGE_PMD_NR);
VM_BUG_ON_PAGE(page_count(page) <= 0, page);
} else {
put_page(page);
}
page_cache_free_page(mapping, page);
}
EXPORT_SYMBOL(delete_from_page_cache);