mm: properly separate the bloated ptl from the regular case

Use kernel/bounds.c to convert build-time spinlock_t size check into a
preprocessor symbol and apply that to properly separate the page::ptl
situation.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Peter Zijlstra 2013-11-14 14:31:52 -08:00 committed by Linus Torvalds
parent 49076ec2cc
commit 539edb5846
4 changed files with 25 additions and 21 deletions

View File

@ -1317,27 +1317,29 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
bool __ptlock_alloc(struct page *page);
void __ptlock_free(struct page *page);
#if BLOATED_SPINLOCKS
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
static inline spinlock_t *ptlock_ptr(struct page *page)
{
return page->ptl;
}
#else /* BLOATED_SPINLOCKS */
static inline bool ptlock_alloc(struct page *page)
{
if (sizeof(spinlock_t) > sizeof(page->ptl))
return __ptlock_alloc(page);
return true;
}
static inline void ptlock_free(struct page *page)
{
if (sizeof(spinlock_t) > sizeof(page->ptl))
__ptlock_free(page);
}
static inline spinlock_t *ptlock_ptr(struct page *page)
{
if (sizeof(spinlock_t) > sizeof(page->ptl))
return (spinlock_t *) page->ptl;
else
return (spinlock_t *) &page->ptl;
return &page->ptl;
}
#endif /* BLOATED_SPINLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
@ -1354,7 +1356,7 @@ static inline bool ptlock_init(struct page *page)
* slab code uses page->slab_cache and page->first_page (for tail
* pages), which share storage with page->ptl.
*/
VM_BUG_ON(page->ptl);
VM_BUG_ON(*(unsigned long *)&page->ptl);
if (!ptlock_alloc(page))
return false;
spin_lock_init(ptlock_ptr(page));

View File

@ -147,10 +147,11 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTE_PTLOCKS
unsigned long ptl; /* It's spinlock_t if it fits to long,
* otherwise it's pointer to dynamicaly
* allocated spinlock_t.
*/
#if BLOATED_SPINLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
#endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */

View File

@ -11,6 +11,7 @@
#include <linux/kbuild.h>
#include <linux/page_cgroup.h>
#include <linux/log2.h>
#include <linux/spinlock.h>
void foo(void)
{
@ -21,5 +22,6 @@ void foo(void)
#ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
#endif
DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
/* End of constants */
}

View File

@ -4271,21 +4271,20 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS
bool __ptlock_alloc(struct page *page)
#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!ptl)
return false;
page->ptl = (unsigned long)ptl;
page->ptl = ptl;
return true;
}
void __ptlock_free(struct page *page)
void ptlock_free(struct page *page)
{
if (sizeof(spinlock_t) > sizeof(page->ptl))
kfree((spinlock_t *)page->ptl);
kfree(page->ptl);
}
#endif