mm: replace get_user_pages() write/force parameters with gup_flags
commit768ae309a9
upstream. This removes the 'write' and 'force' from get_user_pages() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [bwh: Backported to 4.4: - Drop changes in rapidio, vchiq, goldfish - Keep the "write" variable in amdgpu_ttm_tt_pin_userptr() as it's still needed - Also update calls from various other places that now use get_user_pages_remote() upstream, which were updated there by commit9beae1ea89
"mm: replace get_user_pages_remote() write/force ..." - Also update calls from hfi1 and ipath - Adjust context] Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3ec22a6bce
commit
8e50b8b07f
|
@ -2724,7 +2724,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
|
||||||
(unsigned long int)(oper.indata + prev_ix),
|
(unsigned long int)(oper.indata + prev_ix),
|
||||||
noinpages,
|
noinpages,
|
||||||
0, /* read access only for in data */
|
0, /* read access only for in data */
|
||||||
0, /* no force */
|
|
||||||
inpages,
|
inpages,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
|
@ -2740,8 +2739,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
|
||||||
current->mm,
|
current->mm,
|
||||||
(unsigned long int)oper.cipher_outdata,
|
(unsigned long int)oper.cipher_outdata,
|
||||||
nooutpages,
|
nooutpages,
|
||||||
1, /* write access for out data */
|
FOLL_WRITE, /* write access for out data */
|
||||||
0, /* no force */
|
|
||||||
outpages,
|
outpages,
|
||||||
NULL);
|
NULL);
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
|
|
|
@ -143,7 +143,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = get_user_pages(current, current->mm, virt_addr,
|
ret = get_user_pages(current, current->mm, virt_addr,
|
||||||
1, VM_READ, 0, NULL, NULL);
|
1, FOLL_WRITE, NULL, NULL);
|
||||||
if (ret<=0) {
|
if (ret<=0) {
|
||||||
#ifdef ERR_INJ_DEBUG
|
#ifdef ERR_INJ_DEBUG
|
||||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
printk("Virtual address %lx is not existing.\n",virt_addr);
|
||||||
|
|
|
@ -536,10 +536,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
|
||||||
{
|
{
|
||||||
long gup_ret;
|
long gup_ret;
|
||||||
int nr_pages = 1;
|
int nr_pages = 1;
|
||||||
int force = 0;
|
|
||||||
|
|
||||||
gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
|
gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
|
||||||
nr_pages, write, force, NULL, NULL);
|
nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
|
||||||
/*
|
/*
|
||||||
* get_user_pages() returns number of pages gotten.
|
* get_user_pages() returns number of pages gotten.
|
||||||
* 0 means we failed to fault in and get anything,
|
* 0 means we failed to fault in and get anything,
|
||||||
|
|
|
@ -496,9 +496,13 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||||
|
unsigned int flags = 0;
|
||||||
enum dma_data_direction direction = write ?
|
enum dma_data_direction direction = write ?
|
||||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||||
|
|
||||||
|
if (write)
|
||||||
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
if (current->mm != gtt->usermm)
|
if (current->mm != gtt->usermm)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
|
@ -519,7 +523,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||||
struct page **pages = ttm->pages + pinned;
|
struct page **pages = ttm->pages + pinned;
|
||||||
|
|
||||||
r = get_user_pages(current, current->mm, userptr, num_pages,
|
r = get_user_pages(current, current->mm, userptr, num_pages,
|
||||||
write, 0, pages, NULL);
|
flags, pages, NULL);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto release_pages;
|
goto release_pages;
|
||||||
|
|
||||||
|
|
|
@ -581,13 +581,17 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||||
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
||||||
if (pvec != NULL) {
|
if (pvec != NULL) {
|
||||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||||
|
unsigned int flags = 0;
|
||||||
|
|
||||||
|
if (!obj->userptr.read_only)
|
||||||
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
while (pinned < npages) {
|
while (pinned < npages) {
|
||||||
ret = get_user_pages(work->task, mm,
|
ret = get_user_pages(work->task, mm,
|
||||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||||
npages - pinned,
|
npages - pinned,
|
||||||
!obj->userptr.read_only, 0,
|
flags,
|
||||||
pvec + pinned, NULL);
|
pvec + pinned, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -557,7 +557,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||||
struct page **pages = ttm->pages + pinned;
|
struct page **pages = ttm->pages + pinned;
|
||||||
|
|
||||||
r = get_user_pages(current, current->mm, userptr, num_pages,
|
r = get_user_pages(current, current->mm, userptr, num_pages,
|
||||||
write, 0, pages, NULL);
|
write ? FOLL_WRITE : 0, pages, NULL);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto release_pages;
|
goto release_pages;
|
||||||
|
|
||||||
|
|
|
@ -242,8 +242,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||||
ret = get_user_pages(current, current->mm,
|
ret = get_user_pages(current, current->mm,
|
||||||
(unsigned long)xfer->mem_addr,
|
(unsigned long)xfer->mem_addr,
|
||||||
vsg->num_pages,
|
vsg->num_pages,
|
||||||
(vsg->direction == DMA_FROM_DEVICE),
|
(vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
|
||||||
0, vsg->pages, NULL);
|
vsg->pages, NULL);
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
if (ret != vsg->num_pages) {
|
if (ret != vsg->num_pages) {
|
||||||
|
|
|
@ -95,6 +95,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
struct scatterlist *sg, *sg_list_start;
|
struct scatterlist *sg, *sg_list_start;
|
||||||
int need_release = 0;
|
int need_release = 0;
|
||||||
|
unsigned int gup_flags = FOLL_WRITE;
|
||||||
|
|
||||||
if (dmasync)
|
if (dmasync)
|
||||||
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
|
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
|
||||||
|
@ -177,6 +178,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (!umem->writable)
|
||||||
|
gup_flags |= FOLL_FORCE;
|
||||||
|
|
||||||
need_release = 1;
|
need_release = 1;
|
||||||
sg_list_start = umem->sg_head.sgl;
|
sg_list_start = umem->sg_head.sgl;
|
||||||
|
|
||||||
|
@ -184,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||||
ret = get_user_pages(current, current->mm, cur_base,
|
ret = get_user_pages(current, current->mm, cur_base,
|
||||||
min_t(unsigned long, npages,
|
min_t(unsigned long, npages,
|
||||||
PAGE_SIZE / sizeof (struct page *)),
|
PAGE_SIZE / sizeof (struct page *)),
|
||||||
1, !umem->writable, page_list, vma_list);
|
gup_flags, page_list, vma_list);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
||||||
u64 off;
|
u64 off;
|
||||||
int j, k, ret = 0, start_idx, npages = 0;
|
int j, k, ret = 0, start_idx, npages = 0;
|
||||||
u64 base_virt_addr;
|
u64 base_virt_addr;
|
||||||
|
unsigned int flags = 0;
|
||||||
|
|
||||||
if (access_mask == 0)
|
if (access_mask == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
||||||
goto out_put_task;
|
goto out_put_task;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (access_mask & ODP_WRITE_ALLOWED_BIT)
|
||||||
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
|
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
|
||||||
k = start_idx;
|
k = start_idx;
|
||||||
|
|
||||||
|
@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
||||||
*/
|
*/
|
||||||
npages = get_user_pages(owning_process, owning_mm, user_virt,
|
npages = get_user_pages(owning_process, owning_mm, user_virt,
|
||||||
gup_num_pages,
|
gup_num_pages,
|
||||||
access_mask & ODP_WRITE_ALLOWED_BIT, 0,
|
flags, local_page_list, NULL);
|
||||||
local_page_list, NULL);
|
|
||||||
up_read(&owning_mm->mmap_sem);
|
up_read(&owning_mm->mmap_sem);
|
||||||
|
|
||||||
if (npages < 0)
|
if (npages < 0)
|
||||||
|
|
|
@ -472,8 +472,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
|
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1,
|
||||||
pages, NULL);
|
FOLL_WRITE, pages, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
|
||||||
for (got = 0; got < num_pages; got += ret) {
|
for (got = 0; got < num_pages; got += ret) {
|
||||||
ret = get_user_pages(current, current->mm,
|
ret = get_user_pages(current, current->mm,
|
||||||
start_page + got * PAGE_SIZE,
|
start_page + got * PAGE_SIZE,
|
||||||
num_pages - got, 1, 1,
|
num_pages - got,
|
||||||
|
FOLL_WRITE | FOLL_FORCE,
|
||||||
p + got, NULL);
|
p + got, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto bail_release;
|
goto bail_release;
|
||||||
|
|
|
@ -113,6 +113,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||||
int flags;
|
int flags;
|
||||||
dma_addr_t pa;
|
dma_addr_t pa;
|
||||||
DEFINE_DMA_ATTRS(attrs);
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
|
unsigned int gup_flags;
|
||||||
|
|
||||||
if (dmasync)
|
if (dmasync)
|
||||||
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
|
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
|
||||||
|
@ -140,6 +141,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||||
|
|
||||||
flags = IOMMU_READ | IOMMU_CACHE;
|
flags = IOMMU_READ | IOMMU_CACHE;
|
||||||
flags |= (writable) ? IOMMU_WRITE : 0;
|
flags |= (writable) ? IOMMU_WRITE : 0;
|
||||||
|
gup_flags = FOLL_WRITE;
|
||||||
|
gup_flags |= (writable) ? 0 : FOLL_FORCE;
|
||||||
cur_base = addr & PAGE_MASK;
|
cur_base = addr & PAGE_MASK;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
@ -147,7 +150,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||||
ret = get_user_pages(current, current->mm, cur_base,
|
ret = get_user_pages(current, current->mm, cur_base,
|
||||||
min_t(unsigned long, npages,
|
min_t(unsigned long, npages,
|
||||||
PAGE_SIZE / sizeof(struct page *)),
|
PAGE_SIZE / sizeof(struct page *)),
|
||||||
1, !writable, page_list, NULL);
|
gup_flags, page_list, NULL);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
||||||
{
|
{
|
||||||
unsigned long first, last;
|
unsigned long first, last;
|
||||||
int err, rw = 0;
|
int err, rw = 0;
|
||||||
|
unsigned int flags = FOLL_FORCE;
|
||||||
|
|
||||||
dma->direction = direction;
|
dma->direction = direction;
|
||||||
switch (dma->direction) {
|
switch (dma->direction) {
|
||||||
|
@ -178,13 +179,15 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
||||||
if (NULL == dma->pages)
|
if (NULL == dma->pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (rw == READ)
|
||||||
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
||||||
data, size, dma->nr_pages);
|
data, size, dma->nr_pages);
|
||||||
|
|
||||||
err = get_user_pages(current, current->mm,
|
err = get_user_pages(current, current->mm,
|
||||||
data & PAGE_MASK, dma->nr_pages,
|
data & PAGE_MASK, dma->nr_pages,
|
||||||
rw == READ, 1, /* force */
|
flags, dma->pages, NULL);
|
||||||
dma->pages, NULL);
|
|
||||||
|
|
||||||
if (err != dma->nr_pages) {
|
if (err != dma->nr_pages) {
|
||||||
dma->nr_pages = (err >= 0) ? err : 0;
|
dma->nr_pages = (err >= 0) ? err : 0;
|
||||||
|
|
|
@ -1398,8 +1398,7 @@ retry:
|
||||||
mm,
|
mm,
|
||||||
(u64)addr,
|
(u64)addr,
|
||||||
nr_pages,
|
nr_pages,
|
||||||
!!(prot & SCIF_PROT_WRITE),
|
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
|
||||||
0,
|
|
||||||
pinned_pages->pages,
|
pinned_pages->pages,
|
||||||
NULL);
|
NULL);
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
|
|
|
@ -199,7 +199,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
|
||||||
*pageshift = PAGE_SHIFT;
|
*pageshift = PAGE_SHIFT;
|
||||||
#endif
|
#endif
|
||||||
if (get_user_pages
|
if (get_user_pages
|
||||||
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
|
(current, current->mm, vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
*paddr = page_to_phys(page);
|
*paddr = page_to_phys(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
|
|
@ -85,7 +85,7 @@ static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
|
||||||
for (got = 0; got < num_pages; got += ret) {
|
for (got = 0; got < num_pages; got += ret) {
|
||||||
ret = get_user_pages(current, current->mm,
|
ret = get_user_pages(current, current->mm,
|
||||||
start_page + got * PAGE_SIZE,
|
start_page + got * PAGE_SIZE,
|
||||||
num_pages - got, 1, 1,
|
num_pages - got, FOLL_WRITE | FOLL_FORCE,
|
||||||
p + got, NULL);
|
p + got, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto bail_release;
|
goto bail_release;
|
||||||
|
|
|
@ -72,7 +72,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
|
||||||
for (got = 0; got < num_pages; got += ret) {
|
for (got = 0; got < num_pages; got += ret) {
|
||||||
ret = get_user_pages(current, current->mm,
|
ret = get_user_pages(current, current->mm,
|
||||||
start_page + got * PAGE_SIZE,
|
start_page + got * PAGE_SIZE,
|
||||||
num_pages - got, 1, 1,
|
num_pages - got, FOLL_WRITE | FOLL_FORCE,
|
||||||
p + got, NULL);
|
p + got, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto bail_release;
|
goto bail_release;
|
||||||
|
|
|
@ -246,8 +246,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
num_pinned = get_user_pages(current, current->mm,
|
num_pinned = get_user_pages(current, current->mm,
|
||||||
param.local_vaddr - lb_offset, num_pages,
|
param.local_vaddr - lb_offset, num_pages,
|
||||||
(param.source == -1) ? READ : WRITE,
|
(param.source == -1) ? 0 : FOLL_WRITE,
|
||||||
0, pages, NULL);
|
pages, NULL);
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
if (num_pinned != num_pages) {
|
if (num_pinned != num_pages) {
|
||||||
|
|
|
@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned int gup_flags = FOLL_FORCE;
|
||||||
|
|
||||||
#ifdef CONFIG_STACK_GROWSUP
|
#ifdef CONFIG_STACK_GROWSUP
|
||||||
if (write) {
|
if (write) {
|
||||||
|
@ -199,8 +200,12 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ret = get_user_pages(current, bprm->mm, pos,
|
|
||||||
1, write, 1, &page, NULL);
|
if (write)
|
||||||
|
gup_flags |= FOLL_WRITE;
|
||||||
|
|
||||||
|
ret = get_user_pages(current, bprm->mm, pos, 1, gup_flags,
|
||||||
|
&page, NULL);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1199,7 +1199,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
struct vm_area_struct **vmas, int *nonblocking);
|
struct vm_area_struct **vmas, int *nonblocking);
|
||||||
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long nr_pages,
|
unsigned long start, unsigned long nr_pages,
|
||||||
int write, int force, struct page **pages,
|
unsigned int gup_flags, struct page **pages,
|
||||||
struct vm_area_struct **vmas);
|
struct vm_area_struct **vmas);
|
||||||
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long nr_pages,
|
unsigned long start, unsigned long nr_pages,
|
||||||
|
|
|
@ -299,7 +299,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
/* Read the page with vaddr into memory */
|
/* Read the page with vaddr into memory */
|
||||||
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
|
ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1700,7 +1700,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
|
||||||
if (likely(result == 0))
|
if (likely(result == 0))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
|
result = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &page, NULL);
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
|
|
15
mm/gup.c
15
mm/gup.c
|
@ -854,18 +854,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||||
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
|
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
|
||||||
*/
|
*/
|
||||||
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long nr_pages, int write,
|
unsigned long start, unsigned long nr_pages,
|
||||||
int force, struct page **pages, struct vm_area_struct **vmas)
|
unsigned int gup_flags, struct page **pages,
|
||||||
|
struct vm_area_struct **vmas)
|
||||||
{
|
{
|
||||||
unsigned int flags = FOLL_TOUCH;
|
|
||||||
|
|
||||||
if (write)
|
|
||||||
flags |= FOLL_WRITE;
|
|
||||||
if (force)
|
|
||||||
flags |= FOLL_FORCE;
|
|
||||||
|
|
||||||
return __get_user_pages_locked(tsk, mm, start, nr_pages,
|
return __get_user_pages_locked(tsk, mm, start, nr_pages,
|
||||||
pages, vmas, NULL, false, flags);
|
pages, vmas, NULL, false,
|
||||||
|
gup_flags | FOLL_TOUCH);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_user_pages);
|
EXPORT_SYMBOL(get_user_pages);
|
||||||
|
|
||||||
|
|
|
@ -3715,6 +3715,10 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
void *old_buf = buf;
|
void *old_buf = buf;
|
||||||
|
unsigned int flags = FOLL_FORCE;
|
||||||
|
|
||||||
|
if (write)
|
||||||
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
/* ignore errors, just check how much was successfully transferred */
|
/* ignore errors, just check how much was successfully transferred */
|
||||||
|
@ -3724,7 +3728,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
ret = get_user_pages(tsk, mm, addr, 1,
|
ret = get_user_pages(tsk, mm, addr, 1,
|
||||||
write, 1, &page, &vma);
|
flags, &page, &vma);
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
#ifndef CONFIG_HAVE_IOREMAP_PROT
|
#ifndef CONFIG_HAVE_IOREMAP_PROT
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -818,7 +818,7 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
|
||||||
struct page *p;
|
struct page *p;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
|
err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, &p, NULL);
|
||||||
if (err >= 0) {
|
if (err >= 0) {
|
||||||
err = page_to_nid(p);
|
err = page_to_nid(p);
|
||||||
put_page(p);
|
put_page(p);
|
||||||
|
|
18
mm/nommu.c
18
mm/nommu.c
|
@ -184,18 +184,11 @@ finish_or_fault:
|
||||||
*/
|
*/
|
||||||
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long nr_pages,
|
unsigned long start, unsigned long nr_pages,
|
||||||
int write, int force, struct page **pages,
|
unsigned int gup_flags, struct page **pages,
|
||||||
struct vm_area_struct **vmas)
|
struct vm_area_struct **vmas)
|
||||||
{
|
{
|
||||||
int flags = 0;
|
return __get_user_pages(tsk, mm, start, nr_pages,
|
||||||
|
gup_flags, pages, vmas, NULL);
|
||||||
if (write)
|
|
||||||
flags |= FOLL_WRITE;
|
|
||||||
if (force)
|
|
||||||
flags |= FOLL_FORCE;
|
|
||||||
|
|
||||||
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
|
|
||||||
NULL);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_user_pages);
|
EXPORT_SYMBOL(get_user_pages);
|
||||||
|
|
||||||
|
@ -204,10 +197,7 @@ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned int gup_flags, struct page **pages,
|
unsigned int gup_flags, struct page **pages,
|
||||||
int *locked)
|
int *locked)
|
||||||
{
|
{
|
||||||
int write = gup_flags & FOLL_WRITE;
|
return get_user_pages(tsk, mm, start, nr_pages, gup_flags,
|
||||||
int force = gup_flags & FOLL_FORCE;
|
|
||||||
|
|
||||||
return get_user_pages(tsk, mm, start, nr_pages, write, force,
|
|
||||||
pages, NULL);
|
pages, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_user_pages_locked);
|
EXPORT_SYMBOL(get_user_pages_locked);
|
||||||
|
|
|
@ -874,7 +874,8 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
|
||||||
}
|
}
|
||||||
/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
|
/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
|
if (get_user_pages(current, bprm->mm, pos, 1,
|
||||||
|
FOLL_FORCE, &page, NULL) <= 0)
|
||||||
return false;
|
return false;
|
||||||
#else
|
#else
|
||||||
page = bprm->page[pos / PAGE_SIZE];
|
page = bprm->page[pos / PAGE_SIZE];
|
||||||
|
|
Loading…
Reference in New Issue