mremap: don't leak new_vma if f_op->mremap() fails

move_vma() can't just return if f_op->mremap() fails, we should unmap the
new vma like we do if move_page_tables() fails.  To avoid the code
duplication this patch moves the "move entries back" under the new "if
(err)" branch.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2015-09-04 15:48:01 -07:00 committed by Linus Torvalds
parent 31aafb45f4
commit df1eab303c

View File

@ -276,6 +276,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks);
if (moved_len < old_len) {
err = -ENOMEM;
} else if (vma->vm_file && vma->vm_file->f_op->mremap) {
err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
}
if (unlikely(err)) {
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
@ -286,16 +292,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
new_addr = -ENOMEM;
new_addr = err;
} else {
if (vma->vm_file && vma->vm_file->f_op->mremap) {
err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
if (err < 0) {
move_page_tables(new_vma, new_addr, vma,
old_addr, moved_len, true);
return err;
}
}
arch_remap(mm, old_addr, old_addr + old_len,
new_addr, new_addr + new_len);
}