mm: Add new func _install_special_mapping() to mmap.c

The _install_special_mapping() is the new base function for
install_special_mapping(). This function will return a pointer of the
created VMA or a error code in an ERR_PTR()

This new function will be needed by the for the vdso 32 bit support to map the
additonal vvar and hpet pages into the 32 bit address space. This will be done
with io_remap_pfn_range() and remap_pfn_range, which requieres a vm_area_struct.

Reviewed-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Stefani Seibold <stefani@seibold.net>
Link: http://lkml.kernel.org/r/1395094933-14252-3-git-send-email-stefani@seibold.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Stefani Seibold 2014-03-17 23:22:02 +01:00 committed by H. Peter Anvin
parent d2312e3379
commit 3935ed6a3a
2 changed files with 19 additions and 4 deletions

View File

@ -1750,6 +1750,9 @@ extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);

View File

@ -2918,7 +2918,7 @@ static const struct vm_operations_struct special_mapping_vmops = {
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
int install_special_mapping(struct mm_struct *mm,
struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
@ -2927,7 +2927,7 @@ int install_special_mapping(struct mm_struct *mm,
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (unlikely(vma == NULL))
return -ENOMEM;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
@ -2948,11 +2948,23 @@ int install_special_mapping(struct mm_struct *mm,
perf_event_mmap(vma);
return 0;
return vma;
out:
kmem_cache_free(vm_area_cachep, vma);
return ret;
return ERR_PTR(ret);
}
int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
struct vm_area_struct *vma = _install_special_mapping(mm,
addr, len, vm_flags, pages);
if (IS_ERR(vma))
return PTR_ERR(vma);
return 0;
}
static DEFINE_MUTEX(mm_all_locks_mutex);