summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-05-13 08:35:35 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-05-13 08:35:35 +0200
commit4f23122858a27ba97444b9b37a066d83edebd4c8 (patch)
tree7ccfea2b6d606318c9812f0dc9c3aa39411ac456
parent2b1ccc0ee918a653d0483fdad9dd6112ce8e9043 (diff)
downloadlinux-4f23122858a27ba97444b9b37a066d83edebd4c8.tar.bz2
splice: fix repeated kmap()'s in default_file_splice_read()
We cannot reliably map more than one page at the time, or we risk deadlocking. Just allocate the pages from low mem instead. Reported-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--fs/splice.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/fs/splice.c b/fs/splice.c
index eefd96b1d7fb..c5e3c79b95a8 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -580,13 +580,13 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
struct page *page;
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_USER);
error = -ENOMEM;
if (!page)
goto err;
this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
- vec[i].iov_base = (void __user *) kmap(page);
+ vec[i].iov_base = (void __user *) page_address(page);
vec[i].iov_len = this_len;
pages[i] = page;
spd.nr_pages++;
@@ -604,7 +604,6 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
nr_freed = 0;
for (i = 0; i < spd.nr_pages; i++) {
- kunmap(pages[i]);
this_len = min_t(size_t, vec[i].iov_len, res);
partial[i].offset = 0;
partial[i].len = this_len;
@@ -624,10 +623,9 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
return res;
err:
- for (i = 0; i < spd.nr_pages; i++) {
- kunmap(pages[i]);
+ for (i = 0; i < spd.nr_pages; i++)
__free_page(pages[i]);
- }
+
return error;
}
EXPORT_SYMBOL(default_file_splice_read);