summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 01:24:53 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:54 -0700
commit41cb8ac025dbbf6782eae10d231e7e2336ad3724 (patch)
tree0070e26ee574338a4a3f56f574e1c126a4357c8e /mm/filemap.c
parent902aaed0d983dfd459fcb2b678608d4584782200 (diff)
downloadlinux-41cb8ac025dbbf6782eae10d231e7e2336ad3724.tar.bz2
mm: revert KERNEL_DS buffered write optimisation
Revert the patch from Neil Brown to optimise NFSD writev handling. Cc: Neil Brown <neilb@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c32
1 files changed, 13 insertions, 19 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7989c44cb293..c504db18ac26 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1874,27 +1874,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
/* Limit the size of the copy to the caller's write size */
bytes = min(bytes, count);
- /* We only need to worry about prefaulting when writes are from
- * user-space. NFSd uses vfs_writev with several non-aligned
- * segments in the vector, and limiting to one segment a time is
- * a noticeable performance for re-write
+ /*
+ * Limit the size of the copy to that of the current segment,
+ * because fault_in_pages_readable() doesn't know how to walk
+ * segments.
*/
- if (!segment_eq(get_fs(), KERNEL_DS)) {
- /*
- * Limit the size of the copy to that of the current
- * segment, because fault_in_pages_readable() doesn't
- * know how to walk segments.
- */
- bytes = min(bytes, cur_iov->iov_len - iov_base);
+ bytes = min(bytes, cur_iov->iov_len - iov_base);
+
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ */
+ fault_in_pages_readable(buf, bytes);
- /*
- * Bring in the user page that we will copy from
- * _first_. Otherwise there's a nasty deadlock on
- * copying from the same page as we're writing to,
- * without it being marked up-to-date.
- */
- fault_in_pages_readable(buf, bytes);
- }
page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
if (!page) {
status = -ENOMEM;