summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2021-04-30 10:26:41 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2021-06-10 11:45:14 -0400
commitf0b65f39ac505e8f1dcdaa165aa7b8c0bd6fd454 (patch)
tree05ad20f84ee097a534132fa9b2df9feae6d4a7d2 /mm/filemap.c
parente4f8df86798aea60aff6cfff40252b709431f850 (diff)
downloadlinux-f0b65f39ac505e8f1dcdaa165aa7b8c0bd6fd454.tar.bz2
iov_iter: replace iov_iter_copy_from_user_atomic() with iterator-advancing variant
Replacement is called copy_page_from_iter_atomic(); unlike the old primitive the callers do *not* need to do iov_iter_advance() after it. In case when they end up consuming less than they'd been given they need to do iov_iter_revert() on everything they had not consumed. That, however, needs to be done only on slow paths. All in-tree callers converted. And that kills the last user of iterate_all_kinds() Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 0be24942bf8e..cf9de790f493 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3661,14 +3661,16 @@ again:
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
- copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ copied = copy_page_from_iter_atomic(page, offset, bytes, i);
flush_dcache_page(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
- if (unlikely(status < 0))
- break;
-
+ if (unlikely(status != copied)) {
+ iov_iter_revert(i, copied - max(status, 0L));
+ if (unlikely(status < 0))
+ break;
+ }
cond_resched();
if (unlikely(status == 0)) {
@@ -3682,10 +3684,8 @@ again:
bytes = copied;
goto again;
}
- copied = status;
- iov_iter_advance(i, copied);
- pos += copied;
- written += copied;
+ pos += status;
+ written += status;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i));