mm/hotplug: mark memory hotplug code in page_alloc.c as __meminit

Mark functions used by both boot and memory hotplug as __meminit to reduce
memory footprint when memory hotplug is disabled.

Alos guard zone_pcp_update() with CONFIG_MEMORY_HOTPLUG because it's only
used by memory hotplug code.

Signed-off-by: Jiang Liu <liuj97@gmail.com>
Cc: Wei Wang <Bessel.Wang@huawei.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Keping Chen <chenkeping@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jiang Liu 2012-07-31 16:43:35 -07:00 committed by Linus Torvalds
parent 340175b7d1
commit 4ed7e02222
1 changed files with 34 additions and 32 deletions

View File

@ -3411,7 +3411,7 @@ static void setup_zone_pageset(struct zone *zone);
DEFINE_MUTEX(zonelists_mutex);
/* return values int ....just for stop_machine() */
static __init_refok int __build_all_zonelists(void *data)
static int __build_all_zonelists(void *data)
{
int nid;
int cpu;
@ -3755,7 +3755,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif
static int zone_batchsize(struct zone *zone)
static int __meminit zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
int batch;
@ -3837,7 +3837,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
pcp->batch = PAGE_SHIFT * 8;
}
static void setup_zone_pageset(struct zone *zone)
static void __meminit setup_zone_pageset(struct zone *zone)
{
int cpu;
@ -3910,33 +3910,6 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
return 0;
}
static int __zone_pcp_update(void *data)
{
struct zone *zone = data;
int cpu;
unsigned long batch = zone_batchsize(zone), flags;
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
if (pcp->count > 0)
free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch);
local_irq_restore(flags);
}
return 0;
}
void zone_pcp_update(struct zone *zone)
{
stop_machine(__zone_pcp_update, zone, NULL);
}
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
@ -3952,7 +3925,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone_batchsize(zone));
}
__meminit int init_currently_empty_zone(struct zone *zone,
int __meminit init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
unsigned long size,
enum memmap_context context)
@ -4765,7 +4738,7 @@ out:
}
/* Any regular memory on that node ? */
static void check_for_regular_memory(pg_data_t *pgdat)
static void __init check_for_regular_memory(pg_data_t *pgdat)
{
#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
@ -5893,6 +5866,35 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
static int __meminit __zone_pcp_update(void *data)
{
struct zone *zone = data;
int cpu;
unsigned long batch = zone_batchsize(zone), flags;
for_each_possible_cpu(cpu) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
if (pcp->count > 0)
free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch);
local_irq_restore(flags);
}
return 0;
}
void __meminit zone_pcp_update(struct zone *zone)
{
stop_machine(__zone_pcp_update, zone, NULL);
}
#endif
#ifdef CONFIG_MEMORY_HOTREMOVE
void zone_pcp_reset(struct zone *zone)
{