diff options
author | Nick Piggin <npiggin@suse.de> | 2007-03-27 08:55:08 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-03-27 08:55:08 +0200 |
commit | 485ddb4b9741bafb70b22e5c1f9b4f37dc3e85bd (patch) | |
tree | 9d666e849cdf9c495d446df242d87e798d4baec9 /fs/splice.c | |
parent | 1ffb96c587fa2af0d690dc3548a4a781c477bfb7 (diff) | |
download | linux-485ddb4b9741bafb70b22e5c1f9b4f37dc3e85bd.tar.bz2 |
1/2 splice: dont steal
Stealing pages with splice is problematic because we cannot just insert
an uptodate page into the pagecache and hope the filesystem can take care
of it later.
We also cannot just ClearPageUptodate, then hope prepare_write does not
write anything into the page, because I don't think prepare_write gives
that guarantee.
Remove support for SPLICE_F_MOVE for now. If we really want to bring it
back, we might be able to do so with a the new filesystem buffered write
aops APIs I'm working on. If we really don't want to bring it back, then
we should decide that sooner rather than later, and remove the flag and
all the stealing infrastructure before anybody starts using it.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/splice.c')
-rw-r--r-- | fs/splice.c | 101 |
1 files changed, 38 insertions, 63 deletions
diff --git a/fs/splice.c b/fs/splice.c index 2fca6ebf4cc2..badc78ff1246 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -576,76 +576,51 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (this_len + offset > PAGE_CACHE_SIZE) this_len = PAGE_CACHE_SIZE - offset; - /* - * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full - * page. - */ - if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { +find_page: + page = find_lock_page(mapping, index); + if (!page) { + ret = -ENOMEM; + page = page_cache_alloc_cold(mapping); + if (unlikely(!page)) + goto out_ret; + /* - * If steal succeeds, buf->page is now pruned from the - * pagecache and we can reuse it. The page will also be - * locked on successful return. + * This will also lock the page */ - if (buf->ops->steal(pipe, buf)) - goto find_page; - - page = buf->page; - if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) { - unlock_page(page); - goto find_page; - } - - page_cache_get(page); - - if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add(page); - } else { -find_page: - page = find_lock_page(mapping, index); - if (!page) { - ret = -ENOMEM; - page = page_cache_alloc_cold(mapping); - if (unlikely(!page)) - goto out_ret; + ret = add_to_page_cache_lru(page, mapping, index, + GFP_KERNEL); + if (unlikely(ret)) + goto out; + } - /* - * This will also lock the page - */ - ret = add_to_page_cache_lru(page, mapping, index, - GFP_KERNEL); + /* + * We get here with the page locked. If the page is also + * uptodate, we don't need to do more. If it isn't, we + * may need to bring it in if we are not going to overwrite + * the full page. + */ + if (!PageUptodate(page)) { + if (this_len < PAGE_CACHE_SIZE) { + ret = mapping->a_ops->readpage(file, page); if (unlikely(ret)) goto out; - } - /* - * We get here with the page locked. If the page is also - * uptodate, we don't need to do more. If it isn't, we - * may need to bring it in if we are not going to overwrite - * the full page. - */ - if (!PageUptodate(page)) { - if (this_len < PAGE_CACHE_SIZE) { - ret = mapping->a_ops->readpage(file, page); - if (unlikely(ret)) - goto out; - - lock_page(page); - - if (!PageUptodate(page)) { - /* - * Page got invalidated, repeat. - */ - if (!page->mapping) { - unlock_page(page); - page_cache_release(page); - goto find_page; - } - ret = -EIO; - goto out; + lock_page(page); + + if (!PageUptodate(page)) { + /* + * Page got invalidated, repeat. + */ + if (!page->mapping) { + unlock_page(page); + page_cache_release(page); + goto find_page; } - } else - SetPageUptodate(page); - } + ret = -EIO; + goto out; + } + } else + SetPageUptodate(page); } ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); |