binder: Don't modify VMA bounds in ->mmap handler

binder_mmap() tries to prevent the creation of overly big binder mappings
by silently truncating the size of the VMA to 4MiB. However, this violates
the API contract of mmap(). If userspace attempts to create a large binder
VMA, and later attempts to unmap that VMA, it will call munmap() on a range
beyond the end of the VMA, which may have been allocated to another VMA in
the meantime. This can lead to userspace memory corruption.

The following sequence of calls leads to a segfault without this commit:

int main(void) {
  int binder_fd = open("/dev/binder", O_RDWR);
  if (binder_fd == -1) err(1, "open binder");
  void *binder_mapping = mmap(NULL, 0x800000UL, PROT_READ, MAP_SHARED,
                              binder_fd, 0);
  if (binder_mapping == MAP_FAILED) err(1, "mmap binder");
  void *data_mapping = mmap(NULL, 0x400000UL, PROT_READ|PROT_WRITE,
                            MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  if (data_mapping == MAP_FAILED) err(1, "mmap data");
  munmap(binder_mapping, 0x800000UL);
  *(char*)data_mapping = 1;
  return 0;
}

Cc: stable@vger.kernel.org
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Todd Kjos <tkjos@google.com>
Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
Link: https://lore.kernel.org/r/20191016150119.154756-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jann Horn 2019-10-16 17:01:18 +02:00 committed by Greg Kroah-Hartman
parent 4f5cafb5cb
commit 45d02f79b5
2 changed files with 4 additions and 9 deletions

View File

@ -97,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
#define SZ_1K 0x400
#endif
#ifndef SZ_4M
#define SZ_4M 0x400000
#endif
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
@ -5177,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
__func__, proc->pid, vma->vm_start, vma->vm_end,

View File

@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
SZ_4M);
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {