diff options
author | Maxim Patlasov <mpatlasov@parallels.com> | 2012-12-14 19:21:08 +0400 |
---|---|---|
committer | Miklos Szeredi <mszeredi@suse.cz> | 2013-04-17 21:50:59 +0200 |
commit | bcba24ccdc82f7415154cf87226c2577cea13a5c (patch) | |
tree | 10e9c4e70f82cab86bd3790cd7ccb6adccf5aff0 /fs/fuse | |
parent | 36cf66ed9f871fc0d0911921fba5873df3ddb2dc (diff) | |
download | linux-bcba24ccdc82f7415154cf87226c2577cea13a5c.tar.bz2 |
fuse: enable asynchronous processing direct IO
In case of synchronous DIO request (i.e. read(2) or write(2) for a file
opened with O_DIRECT), the patch submits fuse requests asynchronously, but
waits for their completions before return from fuse_direct_IO().
In case of asynchronous DIO request (i.e. libaio io_submit() or a file opened
with O_DIRECT), the patch submits fuse requests asynchronously and return
-EIOCBQUEUED immediately.
The only special case is async DIO extending file. Here the patch falls back
to old behaviour because we can't return -EIOCBQUEUED and update i_size later,
without i_mutex hold. And we have no method to wait on real async I/O
requests.
The patch also clean __fuse_direct_write() up: it's better to update i_size
in its callers. Thanks Brian for suggestion.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r-- | fs/fuse/file.c | 51 |
1 files changed, 44 insertions, 7 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e207dcdf32c0..ba1d50369c24 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1366,11 +1366,8 @@ static ssize_t __fuse_direct_write(struct fuse_io_priv *io, ssize_t res; res = generic_write_checks(file, ppos, &count, 0); - if (!res) { + if (!res) res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1); - if (!io->async && res > 0) - fuse_write_update_size(inode, *ppos); - } fuse_invalidate_attr(inode); @@ -1391,6 +1388,8 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, /* Don't allow parallel writes to the same file */ mutex_lock(&inode->i_mutex); res = __fuse_direct_write(&io, &iov, 1, ppos); + if (res > 0) + fuse_write_update_size(inode, *ppos); mutex_unlock(&inode->i_mutex); return res; @@ -2360,23 +2359,61 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, ssize_t ret = 0; struct file *file = NULL; loff_t pos = 0; + struct inode *inode; + loff_t i_size; + size_t count = iov_length(iov, nr_segs); struct fuse_io_priv *io; file = iocb->ki_filp; pos = offset; + inode = file->f_mapping->host; + i_size = i_size_read(inode); - io = kzalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); + io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); if (!io) return -ENOMEM; - + spin_lock_init(&io->lock); + io->reqs = 1; + io->bytes = -1; + io->size = 0; + io->offset = offset; + io->write = (rw == WRITE); + io->err = 0; io->file = file; + /* + * By default, we want to optimize all I/Os with async request + * submission to the client filesystem. + */ + io->async = 1; + io->iocb = iocb; + + /* + * We cannot asynchronously extend the size of a file. We have no method + * to wait on real async I/O requests, so we must submit this request + * synchronously. + */ + if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) + io->async = false; if (rw == WRITE) ret = __fuse_direct_write(io, iov, nr_segs, &pos); else ret = __fuse_direct_read(io, iov, nr_segs, &pos); - kfree(io); + if (io->async) { + fuse_aio_complete(io, ret < 0 ? ret : 0, -1); + + /* we have a non-extending, async request, so return */ + if (ret > 0 && !is_sync_kiocb(iocb)) + return -EIOCBQUEUED; + + ret = wait_on_sync_kiocb(iocb); + } else { + kfree(io); + } + + if (rw == WRITE && ret > 0) + fuse_write_update_size(inode, pos); return ret; } |