mm: memory_hotplug: memory hotremove supports thp migration

This patch enables thp migration for memory hotremove.

Link: http://lkml.kernel.org/r/20170717193955.20207-11-zi.yan@sent.com
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Naoya Horiguchi 2017-09-08 16:11:15 -07:00 committed by Linus Torvalds
parent e8db67eb0d
commit 8135d8926c
2 changed files with 17 additions and 2 deletions

View File

@ -35,15 +35,28 @@ static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
unsigned int order = 0;
struct page *new_page = NULL;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
preferred_nid, nodemask);
if (thp_migration_supported() && PageTransHuge(page)) {
order = HPAGE_PMD_ORDER;
gfp_mask |= GFP_TRANSHUGE;
}
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
new_page = __alloc_pages_nodemask(gfp_mask, order,
preferred_nid, nodemask);
if (new_page && PageTransHuge(page))
prep_transhuge_page(new_page);
return new_page;
}
#ifdef CONFIG_MIGRATION

View File

@ -1380,7 +1380,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (isolate_huge_page(page, &source))
move_pages -= 1 << compound_order(head);
continue;
}
} else if (thp_migration_supported() && PageTransHuge(page))
pfn = page_to_pfn(compound_head(page))
+ hpage_nr_pages(page) - 1;
if (!get_page_unless_zero(page))
continue;