summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c58
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/inode.c16
-rw-r--r--fs/cifs/smb2file.c18
-rw-r--r--fs/cifs/smb2ops.c53
-rw-r--r--fs/cifs/smb2pdu.c46
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/io_uring.c81
-rw-r--r--fs/open.c19
15 files changed, 291 insertions, 60 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4707dfff991b..c2a85b587922 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,15 +345,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- int ret = 0;
+ gfp_t gfp;
+ ssize_t ret;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+ if (nowait)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_KERNEL;
+
+ bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
+ if (!bio)
+ return -EAGAIN;
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -375,7 +384,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
+ ret = 0;
for (;;) {
+ int err;
+
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
@@ -383,8 +395,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(ret)) {
+ err = bio_iov_iter_get_pages(bio, iter);
+ if (unlikely(err)) {
+ if (!ret)
+ ret = err;
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
break;
@@ -399,6 +413,14 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
+ /*
+ * Tell underlying layer to not block for resource shortage.
+ * And if we would have blocked, return error inline instead
+ * of through the bio->bi_end_io() callback.
+ */
+ if (nowait)
+ bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
+
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -412,6 +434,11 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -432,8 +459,20 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
+ ret += bio->bi_iter.bi_size;
+
+ bio = bio_alloc(gfp, nr_pages);
+ if (!bio) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
}
if (!is_poll)
@@ -453,13 +492,16 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
+out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
- if (likely(!ret))
- ret = dio->size;
bio_put(&dio->bio);
return ret;
+error:
+ if (!is_poll)
+ blk_finish_plug(&plug);
+ goto out;
}
static ssize_t
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 212b4a854f2c..38651fae7f21 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,7 @@ config BTRFS_FS
tristate "Btrfs filesystem support"
select CRYPTO
select CRYPTO_CRC32C
+ select LIBCRC32C
select ZLIB_INFLATE
select ZLIB_DEFLATE
select LZO_COMPRESS
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41a2bd2e0c56..5f7ee70b3d1a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4106,6 +4106,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
cleanup_srcu_struct(&fs_info->subvol_srcu);
+ btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1af069a9a0c7..ee582a36653d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -395,10 +395,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
return 0;
}
+/*
+ * Check if the inode has flags compatible with compression
+ */
+static inline bool inode_can_compress(struct inode *inode)
+{
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
+ BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+ return false;
+ return true;
+}
+
+/*
+ * Check if the inode needs to be submitted to compression, based on mount
+ * options, defragmentation, properties or heuristics.
+ */
static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ if (!inode_can_compress(inode)) {
+ WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
+ KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
+ btrfs_ino(BTRFS_I(inode)));
+ return 0;
+ }
/* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
@@ -1631,7 +1652,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
- } else if (!inode_need_compress(inode, start, end)) {
+ } else if (!inode_can_compress(inode) ||
+ !inode_need_compress(inode, start, end)) {
ret = cow_file_range(inode, locked_page, start, end, end,
page_started, nr_written, 1, NULL);
} else {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a13ddba1ebc3..d74b74ca07af 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5941,6 +5941,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 stripe_len;
u64 raid56_full_stripe_start = (u64)-1;
int data_stripes;
+ int ret = 0;
ASSERT(op != BTRFS_MAP_DISCARD);
@@ -5961,8 +5962,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
stripe_offset, offset, em->start, logical, stripe_len);
- free_extent_map(em);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* stripe_offset is the offset of this block in its stripe */
@@ -6009,7 +6010,10 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom->stripe_offset = stripe_offset;
io_geom->raid56_stripe_offset = raid56_full_stripe_start;
- return 0;
+out:
+ /* once for us */
+ free_extent_map(em);
+ return ret;
}
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 270d3c58fb3b..3289b566463f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1104,6 +1104,10 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
goto out;
}
+ rc = -EOPNOTSUPP;
+ if (!target_tcon->ses->server->ops->copychunk_range)
+ goto out;
+
/*
* Note: cifs case is easier than btrfs since server responsible for
* checks for proper open modes and file type and if it wants
@@ -1115,11 +1119,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
/* should we flush first and last page first */
truncate_inode_pages(&target_inode->i_data, 0);
- if (target_tcon->ses->server->ops->copychunk_range)
+ rc = file_modified(dst_file);
+ if (!rc)
rc = target_tcon->ses->server->ops->copychunk_range(xid,
smb_file_src, smb_file_target, off, len, destoff);
- else
- rc = -EOPNOTSUPP;
+
+ file_accessed(src_file);
/* force revalidate of size and timestamps of target file now
* that target is updated on the server
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index aea005703785..4b21a90015a9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.20"
+#define CIFS_VERSION "2.21"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1bffe029fb66..56ca4b8ccaba 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2406,6 +2406,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
struct inode *inode = d_inode(direntry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+ struct cifsFileInfo *wfile;
+ struct cifs_tcon *tcon;
char *full_path = NULL;
int rc = -EACCES;
__u32 dosattr = 0;
@@ -2452,6 +2454,20 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
mapping_set_error(inode->i_mapping, rc);
rc = 0;
+ if (attrs->ia_valid & ATTR_MTIME) {
+ rc = cifs_get_writable_file(cifsInode, false, &wfile);
+ if (!rc) {
+ tcon = tlink_tcon(wfile->tlink);
+ rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
+ cifsFileInfo_put(wfile);
+ if (rc)
+ return rc;
+ } else if (rc != -EBADF)
+ return rc;
+ else
+ rc = 0;
+ }
+
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 54bffb2a1786..e6a1fc72018f 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -88,14 +88,20 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
}
if (buf) {
- /* open response does not have IndexNumber field - get it */
- rc = SMB2_get_srv_num(xid, oparms->tcon, fid->persistent_fid,
+ /* if open response does not have IndexNumber field - get it */
+ if (smb2_data->IndexNumber == 0) {
+ rc = SMB2_get_srv_num(xid, oparms->tcon,
+ fid->persistent_fid,
fid->volatile_fid,
&smb2_data->IndexNumber);
- if (rc) {
- /* let get_inode_info disable server inode numbers */
- smb2_data->IndexNumber = 0;
- rc = 0;
+ if (rc) {
+ /*
+ * let get_inode_info disable server inode
+ * numbers
+ */
+ smb2_data->IndexNumber = 0;
+ rc = 0;
+ }
}
move_smb2_info_to_cifs(buf, smb2_data);
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0cdc4e47ca87..a5bc1b671c12 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -694,8 +694,51 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
smb2_set_related(&rqst[1]);
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect, it will end up calling
+ * cifs_mark_open_files_invalid() which takes the lock again
+ * thus causing a deadlock
+ */
+
+ mutex_unlock(&tcon->crfid.fid_mutex);
rc = compound_send_recv(xid, ses, flags, 2, rqst,
resp_buftype, rsp_iov);
+ mutex_lock(&tcon->crfid.fid_mutex);
+
+ /*
+ * Now we need to check again as the cached root might have
+ * been successfully re-opened from a concurrent process
+ */
+
+ if (tcon->crfid.is_valid) {
+ /* work was already done */
+
+ /* stash fids for close() later */
+ struct cifs_fid fid = {
+ .persistent_fid = pfid->persistent_fid,
+ .volatile_fid = pfid->volatile_fid,
+ };
+
+ /*
+ * caller expects this func to set pfid to a valid
+ * cached root, so we copy the existing one and get a
+ * reference.
+ */
+ memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
+ kref_get(&tcon->crfid.refcount);
+
+ mutex_unlock(&tcon->crfid.fid_mutex);
+
+ if (rc == 0) {
+ /* close extra handle outside of crit sec */
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+ goto oshr_free;
+ }
+
+ /* Cached root is still invalid, continue normaly */
+
if (rc)
goto oshr_exit;
@@ -711,11 +754,12 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
tcon->crfid.is_valid = true;
kref_init(&tcon->crfid.refcount);
+ /* BB TBD check to see if oplock level check can be removed below */
if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
kref_get(&tcon->crfid.refcount);
- oplock = smb2_parse_lease_state(server, o_rsp,
- &oparms.fid->epoch,
- oparms.fid->lease_key);
+ smb2_parse_contexts(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key, &oplock, NULL);
} else
goto oshr_exit;
@@ -729,8 +773,9 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
(char *)&tcon->crfid.file_all_info))
tcon->crfid.file_all_info_is_valid = 1;
- oshr_exit:
+oshr_exit:
mutex_unlock(&tcon->crfid.fid_mutex);
+oshr_free:
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f58e4dc3987b..c8cd7b6cdda2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1873,10 +1873,21 @@ create_reconnect_durable_buf(struct cifs_fid *fid)
return buf;
}
-__u8
-smb2_parse_lease_state(struct TCP_Server_Info *server,
+static void
+parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
+{
+ struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
+
+ cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
+ pdisk_id->DiskFileId, pdisk_id->VolumeId);
+ buf->IndexNumber = pdisk_id->DiskFileId;
+}
+
+void
+smb2_parse_contexts(struct TCP_Server_Info *server,
struct smb2_create_rsp *rsp,
- unsigned int *epoch, char *lease_key)
+ unsigned int *epoch, char *lease_key, __u8 *oplock,
+ struct smb2_file_all_info *buf)
{
char *data_offset;
struct create_context *cc;
@@ -1884,15 +1895,24 @@ smb2_parse_lease_state(struct TCP_Server_Info *server,
unsigned int remaining;
char *name;
+ *oplock = 0;
data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
remaining = le32_to_cpu(rsp->CreateContextsLength);
cc = (struct create_context *)data_offset;
+
+ /* Initialize inode number to 0 in case no valid data in qfid context */
+ if (buf)
+ buf->IndexNumber = 0;
+
while (remaining >= sizeof(struct create_context)) {
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
if (le16_to_cpu(cc->NameLength) == 4 &&
- strncmp(name, "RqLs", 4) == 0)
- return server->ops->parse_lease_buf(cc, epoch,
- lease_key);
+ strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
+ *oplock = server->ops->parse_lease_buf(cc, epoch,
+ lease_key);
+ else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
+ strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
+ parse_query_id_ctxt(cc, buf);
next = le32_to_cpu(cc->Next);
if (!next)
@@ -1901,7 +1921,10 @@ smb2_parse_lease_state(struct TCP_Server_Info *server,
cc = (struct create_context *)((char *)cc + next);
}
- return 0;
+ if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ *oplock = rsp->OplockLevel;
+
+ return;
}
static int
@@ -2588,12 +2611,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
buf->DeletePending = 0;
}
- if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
- *oplock = smb2_parse_lease_state(server, rsp,
- &oparms->fid->epoch,
- oparms->fid->lease_key);
- else
- *oplock = rsp->OplockLevel;
+
+ smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
+ oparms->fid->lease_key, oplock, buf);
creat_exit:
SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7e2e782f8edd..747de9317659 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -818,7 +818,9 @@ struct durable_reconnect_context_v2 {
} __packed;
/* See MS-SMB2 2.2.14.2.9 */
-struct on_disk_id {
+struct create_on_disk_id {
+ struct create_context ccontext;
+ __u8 Name[8];
__le64 DiskFileId;
__le64 VolumeId;
__u32 Reserved[4];
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 52df125e9189..07ca72486cfa 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -228,9 +228,10 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-extern __u8 smb2_parse_lease_state(struct TCP_Server_Info *server,
- struct smb2_create_rsp *rsp,
- unsigned int *epoch, char *lease_key);
+extern void smb2_parse_contexts(struct TCP_Server_Info *server,
+ struct smb2_create_rsp *rsp,
+ unsigned int *epoch, char *lease_key,
+ __u8 *oplock, struct smb2_file_all_info *buf);
extern int smb3_encryption_required(const struct cifs_tcon *tcon);
extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e2a66e12fbc6..012bc0efb9d3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -202,7 +202,7 @@ struct async_list {
struct file *file;
off_t io_end;
- size_t io_pages;
+ size_t io_len;
};
struct io_ring_ctx {
@@ -333,7 +333,8 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_FAIL_LINK 128 /* fail rest of links */
+#define REQ_F_LINK_DONE 128 /* linked sqes done */
+#define REQ_F_FAIL_LINK 256 /* fail rest of links */
u64 user_data;
u32 result;
u32 sequence;
@@ -429,7 +430,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -632,6 +633,7 @@ static void io_req_link_next(struct io_kiocb *req)
nxt->flags |= REQ_F_LINK;
}
+ nxt->flags |= REQ_F_LINK_DONE;
INIT_WORK(&nxt->work, io_sq_wq_submit_work);
queue_work(req->ctx->sqo_wq, &nxt->work);
}
@@ -1064,8 +1066,44 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
*/
offset = buf_addr - imu->ubuf;
iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
- if (offset)
- iov_iter_advance(iter, offset);
+
+ if (offset) {
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here, because
+ * we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * first and last bvec
+ *
+ * So just find our index, and adjust the iterator afterwards.
+ * If the offset is within the first bvec (or the whole first
+ * bvec, just use iov_iter_advance(). This makes it easier
+ * since we can just skip the first segment, which may not
+ * be PAGE_SIZE aligned.
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+ if (offset <= bvec->bv_len) {
+ iov_iter_advance(iter, offset);
+ } else {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> PAGE_SHIFT);
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+ iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->iov_offset = offset & ~PAGE_MASK;
+ if (iter->iov_offset)
+ iter->count -= iter->iov_offset;
+ }
+ }
+
return 0;
}
@@ -1120,28 +1158,26 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
off_t io_end = kiocb->ki_pos + len;
if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
- unsigned long max_pages;
+ unsigned long max_bytes;
/* Use 8x RA size as a decent limiter for both reads/writes */
- max_pages = filp->f_ra.ra_pages;
- if (!max_pages)
- max_pages = VM_READAHEAD_PAGES;
- max_pages *= 8;
-
- /* If max pages are exceeded, reset the state */
- len >>= PAGE_SHIFT;
- if (async_list->io_pages + len <= max_pages) {
+ max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
+ if (!max_bytes)
+ max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
+
+ /* If max len are exceeded, reset the state */
+ if (async_list->io_len + len <= max_bytes) {
req->flags |= REQ_F_SEQ_PREV;
- async_list->io_pages += len;
+ async_list->io_len += len;
} else {
io_end = 0;
- async_list->io_pages = 0;
+ async_list->io_len = 0;
}
}
/* New file? Reset state. */
if (async_list->file != filp) {
- async_list->io_pages = 0;
+ async_list->io_len = 0;
async_list->file = filp;
}
async_list->io_end = io_end;
@@ -1630,6 +1666,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
INIT_LIST_HEAD(&poll->wait.entry);
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ INIT_LIST_HEAD(&req->list);
+
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
spin_lock_irq(&ctx->completion_lock);
@@ -1844,6 +1882,10 @@ restart:
/* async context always use a copy of the sqe */
kfree(sqe);
+ /* req from defer and link list needn't decrease async cnt */
+ if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+ goto out;
+
if (!async_list)
break;
if (!list_empty(&req_list)) {
@@ -1891,6 +1933,7 @@ restart:
}
}
+out:
if (cur_mm) {
set_fs(old_fs);
unuse_mm(cur_mm);
@@ -1917,6 +1960,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
ret = true;
spin_lock(&list->lock);
list_add_tail(&req->list, &list->list);
+ /*
+ * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
+ */
+ smp_mb();
if (!atomic_read(&list->cnt)) {
list_del_init(&req->list);
ret = false;
diff --git a/fs/open.c b/fs/open.c
index b5b80469b93d..a59abe3c669a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
override_cred->cap_permitted;
}
+ /*
+ * The new set of credentials can *only* be used in
+ * task-synchronous circumstances, and does not need
+ * RCU freeing, unless somebody then takes a separate
+ * reference to it.
+ *
+ * NOTE! This is _only_ true because this credential
+ * is used purely for override_creds() that installs
+ * it as the subjective cred. Other threads will be
+ * accessing ->real_cred, not the subjective cred.
+ *
+ * If somebody _does_ make a copy of this (using the
+ * 'get_current_cred()' function), that will clear the
+ * non_rcu field, because now that other user may be
+ * expecting RCU freeing. But normal thread-synchronous
+ * cred accesses will keep things non-RCY.
+ */
+ override_cred->non_rcu = 1;
+
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);