summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJeff Layton <jlayton@kernel.org>2019-11-13 09:56:06 -0500
committerIlya Dryomov <idryomov@gmail.com>2019-11-14 18:44:51 +0100
commit6a81749ebe5f1b52d7eeb8a1031deb8d520f23e6 (patch)
tree750b990337ef4e197b7da915acec97d8f00d3a4c /fs
parenta81bc3102b4ffb885f34855d0133f862f915ab13 (diff)
downloadlinux-6a81749ebe5f1b52d7eeb8a1031deb8d520f23e6.tar.bz2
ceph: increment/decrement dio counter on async requests
Ceph can in some cases issue an async DIO request, in which case we can end up calling ceph_end_io_direct before the I/O is actually complete. That may allow buffered operations to proceed while DIO requests are still in flight. Fix this by incrementing the i_dio_count when issuing an async DIO request, and decrement it when tearing down the aio_req. Fixes: 321fe13c9398 ("ceph: add buffered/direct exclusionary locking for reads and writes") Signed-off-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ceph/file.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 06efeaff3b57..8de633964dc3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -753,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
if (!atomic_dec_and_test(&aio_req->pending_reqs))
return;
+ if (aio_req->iocb->ki_flags & IOCB_DIRECT)
+ inode_dio_end(inode);
+
ret = aio_req->error;
if (!ret)
ret = aio_req->total_len;
@@ -1091,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
CEPH_CAP_FILE_RD);
list_splice(&aio_req->osd_reqs, &osd_reqs);
+ inode_dio_begin(inode);
while (!list_empty(&osd_reqs)) {
req = list_first_entry(&osd_reqs,
struct ceph_osd_request,