kill iov_iter_copy_from_user()

all callers can use copy_page_from_iter() and it actually simplifies
them.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-04-10 20:54:51 -04:00
parent f6c0a1920e
commit e7c24607b5
5 changed files with 5 additions and 40 deletions

View File

@ -737,13 +737,12 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
left = len;
for (n = 0; n < num_pages; n++) {
size_t plen = min_t(size_t, left, PAGE_SIZE);
ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
ret = copy_page_from_iter(pages[n], 0, plen, &i);
if (ret != plen) {
ret = -EFAULT;
break;
}
left -= ret;
iov_iter_advance(&i, ret);
}
if (ret < 0) {

View File

@ -2444,11 +2444,10 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
bytes = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
0, bytes);
bytes = min_t(size_t, cur_len, PAGE_SIZE);
copied = copy_page_from_iter(wdata->pages[i], 0, bytes,
&it);
cur_len -= copied;
iov_iter_advance(&it, copied);
/*
* If we didn't copy as much as we expected, then that
* may mean we trod into an unmapped area. Stop copying

View File

@ -62,8 +62,6 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
size_t iov_iter_copy_from_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);

View File

@ -129,33 +129,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t iov_iter_copy_from_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr;
size_t copied;
kaddr = kmap(page);
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
i->iov, i->iov_offset, bytes);
}
kunmap(page);
return copied;
}
EXPORT_SYMBOL(iov_iter_copy_from_user);
void iov_iter_advance(struct iov_iter *i, size_t bytes)
{
BUG_ON(i->count < bytes);

View File

@ -46,11 +46,7 @@ static int process_vm_rw_pages(struct page **pages,
copy = len;
if (vm_write) {
if (copy > iov_iter_count(iter))
copy = iov_iter_count(iter);
copied = iov_iter_copy_from_user(page, iter,
offset, copy);
iov_iter_advance(iter, copied);
copied = copy_page_from_iter(page, offset, copy, iter);
set_page_dirty_lock(page);
} else {
copied = copy_page_to_iter(page, offset, copy, iter);