mm/hugetlb: get rid of NODEMASK_ALLOC

NODEMASK_ALLOC is used to allocate a nodemask bitmap, and it does it by
first determining whether it should be allocated on the stack or
dynamically, depending on NODES_SHIFT.  Right now, it goes the dynamic
path whenever the nodemask_t is above 32 bytes.

Although we could bump it to a reasonable value, the largest a nodemask_t
can get is 128 bytes, so since __nr_hugepages_store_common is called from
a rather short stack we can just get rid of the NODEMASK_ALLOC call here.

This reduces some code churn and complexity.

Link: http://lkml.kernel.org/r/20190402133415.21983-1-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Alex Ghiti <alex@ghiti.fr>
Cc: David Rientjes <rientjes@google.com>
Cc: Jing Xiangfeng <jingxiangfeng@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oscar Salvador 2019-05-13 17:19:23 -07:00 committed by Linus Torvalds
parent fd875dca7c
commit 2d0adf7e0d
1 changed files with 11 additions and 25 deletions

View File

@ -2448,44 +2448,30 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
unsigned long count, size_t len)
{
int err;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
nodemask_t nodes_allowed, *n_mask;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) {
err = -EINVAL;
goto out;
}
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return -EINVAL;
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_MEMORY];
}
} else if (nodes_allowed) {
init_nodemask_of_mempolicy(&nodes_allowed)))
n_mask = &node_states[N_MEMORY];
else
n_mask = &nodes_allowed;
} else {
/*
* Node specific request. count adjustment happens in
* set_max_huge_pages() after acquiring hugetlb_lock.
*/
init_nodemask_of_node(nodes_allowed, nid);
} else {
/*
* Node specific request, but we could not allocate the few
* words required for a node mask. We are unlikely to hit
* this condition. Since we can not pass down the appropriate
* node mask, just return ENOMEM.
*/
err = -ENOMEM;
goto out;
init_nodemask_of_node(&nodes_allowed, nid);
n_mask = &nodes_allowed;
}
err = set_max_huge_pages(h, count, nid, nodes_allowed);
out:
if (nodes_allowed != &node_states[N_MEMORY])
NODEMASK_FREE(nodes_allowed);
err = set_max_huge_pages(h, count, nid, n_mask);
return err ? err : len;
}