mm: maintain randomization of page free lists

When freeing a page with an order >= shuffle_page_order randomly select
the front or back of the list for insertion.

While the mm tries to defragment physical pages into huge pages this can
tend to make the page allocator more predictable over time.  Inject the
front-back randomness to preserve the initial randomness established by
shuffle_free_memory() when the kernel was booted.

The overhead of this manipulation is constrained by only being applied
for MAX_ORDER sized pages by default.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/154899812788.3165233.9066631950746578517.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Robert Elliott <elliott@hpe.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Dan Williams 2019-05-14 15:41:35 -07:00 committed by Linus Torvalds
parent b03641af68
commit 97500a4a54
4 changed files with 56 additions and 2 deletions

View File

@ -116,6 +116,18 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar
area->nr_free++;
}
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
/* Used to preserve page allocation order entropy */
void add_to_free_area_random(struct page *page, struct free_area *area,
int migratetype);
#else
static inline void add_to_free_area_random(struct page *page,
struct free_area *area, int migratetype)
{
add_to_free_area(page, area, migratetype);
}
#endif
/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
int migratetype)

View File

@ -43,6 +43,7 @@
#include <linux/mempolicy.h>
#include <linux/memremap.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
#include <linux/sort.h>
#include <linux/pfn.h>
#include <linux/backing-dev.h>
@ -958,7 +959,8 @@ done_merging:
* so it's less likely to be used soon and more likely to be merged
* as a higher order page
*/
if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
&& !is_shuffle_order(order)) {
struct page *higher_page, *higher_buddy;
combined_pfn = buddy_pfn & pfn;
higher_page = page + (combined_pfn - pfn);
@ -972,7 +974,12 @@ done_merging:
}
}
add_to_free_area(page, &zone->free_area[order], migratetype);
if (is_shuffle_order(order))
add_to_free_area_random(page, &zone->free_area[order],
migratetype);
else
add_to_free_area(page, &zone->free_area[order], migratetype);
}
/*

View File

@ -182,3 +182,26 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat)
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
shuffle_zone(z);
}
void add_to_free_area_random(struct page *page, struct free_area *area,
int migratetype)
{
static u64 rand;
static u8 rand_bits;
/*
* The lack of locking is deliberate. If 2 threads race to
* update the rand state it just adds to the entropy.
*/
if (rand_bits == 0) {
rand_bits = 64;
rand = get_random_u64();
}
if (rand & 1)
add_to_free_area(page, area, migratetype);
else
add_to_free_area_tail(page, area, migratetype);
rand_bits--;
rand >>= 1;
}

View File

@ -36,6 +36,13 @@ static inline void shuffle_zone(struct zone *z)
return;
__shuffle_zone(z);
}
static inline bool is_shuffle_order(int order)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return false;
return order >= SHUFFLE_ORDER;
}
#else
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
@ -48,5 +55,10 @@ static inline void shuffle_zone(struct zone *z)
static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
{
}
static inline bool is_shuffle_order(int order)
{
return false;
}
#endif
#endif /* _MM_SHUFFLE_H */