diff options
Diffstat (limited to 'fs')
162 files changed, 7334 insertions, 4391 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index 2501e6f1f965..7b623e9fc1b0 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -322,4 +322,7 @@ source "fs/nls/Kconfig" source "fs/dlm/Kconfig" source "fs/unicode/Kconfig" +config IO_WQ + bool + endmenu diff --git a/fs/Makefile b/fs/Makefile index 14231b4cf383..1148c555c4d3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_AIO) += aio.o obj-$(CONFIG_IO_URING) += io_uring.o +obj-$(CONFIG_IO_WQ) += io-wq.o obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FS_ENCRYPTION) += crypto/ obj-$(CONFIG_FS_VERITY) += verity/ diff --git a/fs/affs/affs.h b/fs/affs/affs.h index a92eb6ae2ae2..a755bef7c4c7 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -43,8 +43,8 @@ struct affs_ext_key { */ struct affs_inode_info { atomic_t i_opencnt; - struct semaphore i_link_lock; /* Protects internal inode access. */ - struct semaphore i_ext_lock; /* Protects internal inode access. */ + struct mutex i_link_lock; /* Protects internal inode access. */ + struct mutex i_ext_lock; /* Protects internal inode access. */ #define i_hash_lock i_ext_lock u32 i_blkcnt; /* block count */ u32 i_extcnt; /* extended block count */ @@ -293,30 +293,30 @@ affs_adjust_bitmapchecksum(struct buffer_head *bh, u32 val) static inline void affs_lock_link(struct inode *inode) { - down(&AFFS_I(inode)->i_link_lock); + mutex_lock(&AFFS_I(inode)->i_link_lock); } static inline void affs_unlock_link(struct inode *inode) { - up(&AFFS_I(inode)->i_link_lock); + mutex_unlock(&AFFS_I(inode)->i_link_lock); } static inline void affs_lock_dir(struct inode *inode) { - down(&AFFS_I(inode)->i_hash_lock); + mutex_lock_nested(&AFFS_I(inode)->i_hash_lock, SINGLE_DEPTH_NESTING); } static inline void affs_unlock_dir(struct inode *inode) { - up(&AFFS_I(inode)->i_hash_lock); + mutex_unlock(&AFFS_I(inode)->i_hash_lock); } static inline void affs_lock_ext(struct inode *inode) { - down(&AFFS_I(inode)->i_ext_lock); + mutex_lock(&AFFS_I(inode)->i_ext_lock); } static inline void affs_unlock_ext(struct inode *inode) { - up(&AFFS_I(inode)->i_ext_lock); + mutex_unlock(&AFFS_I(inode)->i_ext_lock); } diff --git a/fs/affs/super.c b/fs/affs/super.c index cc463ae47c12..47107c6712a6 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -121,8 +121,8 @@ static void init_once(void *foo) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; - sema_init(&ei->i_link_lock, 1); - sema_init(&ei->i_ext_lock, 1); + mutex_init(&ei->i_link_lock); + mutex_init(&ei->i_ext_lock); inode_init_once(&ei->vfs_inode); } @@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data) int root_block; unsigned long mount_flags; int res = 0; - char *new_opts; char volume[32]; char *prefix = NULL; - new_opts = kstrdup(data, GFP_KERNEL); - if (data && !new_opts) - return -ENOMEM; - pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); sync_filesystem(sb); @@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) &blocksize, &prefix, volume, &mount_flags)) { kfree(prefix); - kfree(new_opts); return -EINVAL; } diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 6cdd7047c809..2dca8df1a18d 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count, _enter("%p,%zu,", server, count); ASSERT(server != NULL); - ASSERTCMP(count, <=, AFSCBMAX); /* TODO: Sort the callback break list by volume ID */ diff --git a/fs/afs/dir.c b/fs/afs/dir.c index cc12772d0a4d..497f979018c2 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -803,7 +803,12 @@ success: continue; if (cookie->inodes[i]) { - afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]), + struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]); + + if (test_bit(AFS_VNODE_UNSET, &iv->flags)) + continue; + + afs_vnode_commit_status(&fc, iv, scb->cb_break, NULL, scb); continue; } diff --git a/fs/afs/flock.c b/fs/afs/flock.c index d5e5a6ddc847..0f2a94ba73cb 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -346,8 +346,8 @@ again: if (ret < 0) { trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail, ret); - pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n", - vnode->fid.vid, vnode->fid.vnode, ret); + pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n", + vnode->fid.vid, vnode->fid.vnode, ret); } spin_lock(&vnode->lock); diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 46d2d7cb461d..281470fe1183 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -34,8 +34,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren { static unsigned long once_only; - pr_warn("kAFS: AFS vnode with undefined type %u\n", - vnode->status.type); + pr_warn("kAFS: AFS vnode with undefined type %u\n", vnode->status.type); pr_warn("kAFS: A=%d m=%o s=%llx v=%llx\n", vnode->status.abort_code, vnode->status.mode, @@ -175,11 +174,11 @@ static void afs_apply_status(struct afs_fs_cursor *fc, BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); if (status->type != vnode->status.type) { - pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n", - vnode->fid.vid, - vnode->fid.vnode, - vnode->fid.unique, - status->type, vnode->status.type); + pr_warn("Vnode %llx:%llx:%x changed type %u to %u\n", + vnode->fid.vid, + vnode->fid.vnode, + vnode->fid.unique, + status->type, vnode->status.type); afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status); return; } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 0e5269374ac1..61498d9f06ef 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call, call->need_attention = false; __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); + timeout = rtt2; continue; } diff --git a/fs/afs/super.c b/fs/afs/super.c index f18911e8d770..488641b1a418 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) /* fill in the superblock */ sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; + sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; if (!as->dyn_root) diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c index 21eb0c0be912..8fea54eba0c2 100644 --- a/fs/afs/vl_list.c +++ b/fs/afs/vl_list.c @@ -279,8 +279,8 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell, struct afs_addr_list *old = addrs; write_lock(&server->lock); - rcu_swap_protected(server->addresses, old, - lockdep_is_held(&server->lock)); + old = rcu_replace_pointer(server->addresses, old, + lockdep_is_held(&server->lock)); write_unlock(&server->lock); afs_put_addrlist(old); } diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 3ee7abf4b2d0..9ac035c17dc4 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -152,8 +152,8 @@ static void yfs_check_req(struct afs_call *call, __be32 *bp) pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n", call->type->name, len, call->request_size); else if (len < call->request_size) - pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n", - call->type->name, len, call->request_size); + pr_warn("kAFS: %s: Request buffer underflow (%zu<%u)\n", + call->type->name, len, call->request_size); } /* @@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, #ifdef CONFIG_COMPAT struct __compat_aio_sigset { - compat_sigset_t __user *sigmask; + compat_uptr_t sigmask; compat_size_t sigsetsize; }; @@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, struct old_timespec32 __user *, timeout, const struct __compat_aio_sigset __user *, usig) { - struct __compat_aio_sigset ksig = { NULL, }; + struct __compat_aio_sigset ksig = { 0, }; struct timespec64 t; bool interrupted; int ret; @@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) return -EFAULT; - ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); + ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); if (ret) return ret; @@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, struct __kernel_timespec __user *, timeout, const struct __compat_aio_sigset __user *, usig) { - struct __compat_aio_sigset ksig = { NULL, }; + struct __compat_aio_sigset ksig = { 0, }; struct timespec64 t; bool interrupted; int ret; @@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) return -EFAULT; - ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); + ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); if (ret) return ret; diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c index 2866fabf497f..91f5787dae7c 100644 --- a/fs/autofs/expire.c +++ b/fs/autofs/expire.c @@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb, */ how &= ~AUTOFS_EXP_LEAVES; found = should_expire(expired, mnt, timeout, how); - if (!found || found != expired) - /* Something has changed, continue */ + if (found != expired) { // something has changed, continue + dput(found); goto next; + } if (expired != dentry) dput(dentry); diff --git a/fs/block_dev.c b/fs/block_dev.c index 9c073dbdc1b0..ee63c2732fa2 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty) "resized disk %s\n", bdev->bd_disk ? bdev->bd_disk->disk_name : ""); } - - if (!bdev->bd_disk) - return; - if (disk_part_scan_enabled(bdev->bd_disk)) - bdev->bd_invalidated = 1; + bdev->bd_invalidated = 1; } /** @@ -1420,8 +1416,8 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty) * and adjusts it if it differs. When shrinking the bdev size, its all caches * are freed. */ -void check_disk_size_change(struct gendisk *disk, struct block_device *bdev, - bool verbose) +static void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev, bool verbose) { loff_t disk_size, bdev_size; @@ -1437,6 +1433,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev, if (bdev_size > disk_size) flush_disk(bdev, false); } + bdev->bd_invalidated = 0; } /** @@ -1466,7 +1463,6 @@ int revalidate_disk(struct gendisk *disk) mutex_lock(&bdev->bd_mutex); check_disk_size_change(disk, bdev, ret == 0); - bdev->bd_invalidated = 0; mutex_unlock(&bdev->bd_mutex); bdput(bdev); } @@ -1512,6 +1508,45 @@ EXPORT_SYMBOL(bd_set_size); static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); +int bdev_disk_changed(struct block_device *bdev, bool invalidate) +{ + struct gendisk *disk = bdev->bd_disk; + int ret; + + lockdep_assert_held(&bdev->bd_mutex); + +rescan: + ret = blk_drop_partitions(disk, bdev); + if (ret) + return ret; + + if (invalidate) + set_capacity(disk, 0); + else if (disk->fops->revalidate_disk) + disk->fops->revalidate_disk(disk); + + check_disk_size_change(disk, bdev, !invalidate); + + if (get_capacity(disk)) { + ret = blk_add_partitions(disk, bdev); + if (ret == -EAGAIN) + goto rescan; + } else { + /* + * Tell userspace that the media / partition table may have + * changed. + */ + kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); + } + + return ret; +} +/* + * Only exported for for loop and dasd for historic reasons. Don't use in new + * code! + */ +EXPORT_SYMBOL_GPL(bdev_disk_changed); + /* * bd_mutex locking: * @@ -1594,12 +1629,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) * The latter is necessary to prevent ghost * partitions on a removed medium. */ - if (bdev->bd_invalidated) { - if (!ret) - rescan_partitions(disk, bdev); - else if (ret == -ENOMEDIUM) - invalidate_partitions(disk, bdev); - } + if (bdev->bd_invalidated && + (!ret || ret == -ENOMEDIUM)) + bdev_disk_changed(bdev, ret == -ENOMEDIUM); if (ret) goto out_clear; @@ -1632,12 +1664,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); /* the same as first opener case, read comment there */ - if (bdev->bd_invalidated) { - if (!ret) - rescan_partitions(bdev->bd_disk, bdev); - else if (ret == -ENOMEDIUM) - invalidate_partitions(bdev->bd_disk, bdev); - } + if (bdev->bd_invalidated && + (!ret || ret == -ENOMEDIUM)) + bdev_disk_changed(bdev, ret == -ENOMEDIUM); if (ret) goto out_unlock_bdev; } diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 38651fae7f21..75b6d10c9845 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -5,6 +5,8 @@ config BTRFS_FS select CRYPTO select CRYPTO_CRC32C select LIBCRC32C + select CRYPTO_XXHASH + select CRYPTO_SHA256 select ZLIB_INFLATE select ZLIB_DEFLATE select LZO_COMPRESS diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 2e9e13ffbd08..1d32a07bb2d1 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -53,24 +53,12 @@ struct btrfs_workqueue { struct __btrfs_workqueue *high; }; -static void normal_work_helper(struct btrfs_work *work); - -#define BTRFS_WORK_HELPER(name) \ -noinline_for_stack void btrfs_##name(struct work_struct *arg) \ -{ \ - struct btrfs_work *work = container_of(arg, struct btrfs_work, \ - normal_work); \ - normal_work_helper(work); \ -} - -struct btrfs_fs_info * -btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) { return wq->fs_info; } -struct btrfs_fs_info * -btrfs_work_owner(const struct btrfs_work *work) +struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) { return work->wq->fs_info; } @@ -89,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; } -BTRFS_WORK_HELPER(worker_helper); -BTRFS_WORK_HELPER(delalloc_helper); -BTRFS_WORK_HELPER(flush_delalloc_helper); -BTRFS_WORK_HELPER(cache_helper); -BTRFS_WORK_HELPER(submit_helper); -BTRFS_WORK_HELPER(fixup_helper); -BTRFS_WORK_HELPER(endio_helper); -BTRFS_WORK_HELPER(endio_meta_helper); -BTRFS_WORK_HELPER(endio_meta_write_helper); -BTRFS_WORK_HELPER(endio_raid56_helper); -BTRFS_WORK_HELPER(endio_repair_helper); -BTRFS_WORK_HELPER(rmw_helper); -BTRFS_WORK_HELPER(endio_write_helper); -BTRFS_WORK_HELPER(freespace_write_helper); -BTRFS_WORK_HELPER(delayed_meta_helper); -BTRFS_WORK_HELPER(readahead_helper); -BTRFS_WORK_HELPER(qgroup_rescan_helper); -BTRFS_WORK_HELPER(extent_refs_helper); -BTRFS_WORK_HELPER(scrub_helper); -BTRFS_WORK_HELPER(scrubwrc_helper); -BTRFS_WORK_HELPER(scrubnc_helper); -BTRFS_WORK_HELPER(scrubparity_helper); - static struct __btrfs_workqueue * __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh) @@ -252,16 +217,16 @@ out: } } -static void run_ordered_work(struct __btrfs_workqueue *wq) +static void run_ordered_work(struct __btrfs_workqueue *wq, + struct btrfs_work *self) { struct list_head *list = &wq->ordered_list; struct btrfs_work *work; spinlock_t *lock = &wq->list_lock; unsigned long flags; + bool free_self = false; while (1) { - void *wtag; - spin_lock_irqsave(lock, flags); if (list_empty(list)) break; @@ -287,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) list_del(&work->ordered_list); spin_unlock_irqrestore(lock, flags); - /* - * We don't want to call the ordered free functions with the - * lock held though. Save the work as tag for the trace event, - * because the callback could free the structure. - */ - wtag = work; - work->ordered_free(work); - trace_btrfs_all_work_done(wq->fs_info, wtag); + if (work == self) { + /* + * This is the work item that the worker is currently + * executing. + * + * The kernel workqueue code guarantees non-reentrancy + * of work items. I.e., if a work item with the same + * address and work function is queued twice, the second + * execution is blocked until the first one finishes. A + * work item may be freed and recycled with the same + * work function; the workqueue code assumes that the + * original work item cannot depend on the recycled work + * item in that case (see find_worker_executing_work()). + * + * Note that different types of Btrfs work can depend on + * each other, and one type of work on one Btrfs + * filesystem may even depend on the same type of work + * on another Btrfs filesystem via, e.g., a loop device. + * Therefore, we must not allow the current work item to + * be recycled until we are really done, otherwise we + * break the above assumption and can deadlock. + */ + free_self = true; + } else { + /* + * We don't want to call the ordered free functions with + * the lock held. + */ + work->ordered_free(work); + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); + } } spin_unlock_irqrestore(lock, flags); + + if (free_self) { + self->ordered_free(self); + /* NB: self must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, self); + } } -static void normal_work_helper(struct btrfs_work *work) +static void btrfs_work_helper(struct work_struct *normal_work) { + struct btrfs_work *work = container_of(normal_work, struct btrfs_work, + normal_work); struct __btrfs_workqueue *wq; - void *wtag; int need_order = 0; /* @@ -316,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work) if (work->ordered_func) need_order = 1; wq = work->wq; - /* Safe for tracepoints in case work gets freed by the callback */ - wtag = work; trace_btrfs_work_sched(work); thresh_exec_hook(wq); work->func(work); if (need_order) { set_bit(WORK_DONE_BIT, &work->flags); - run_ordered_work(wq); + run_ordered_work(wq, work); + } else { + /* NB: work must not be dereferenced past this point. */ + trace_btrfs_all_work_done(wq->fs_info, work); } - if (!need_order) - trace_btrfs_all_work_done(wq->fs_info, wtag); } -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free) +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free) { work->func = func; work->ordered_func = ordered_func; work->ordered_free = ordered_free; - INIT_WORK(&work->normal_work, uniq_func); + INIT_WORK(&work->normal_work, btrfs_work_helper); INIT_LIST_HEAD(&work->ordered_list); work->flags = 0; } diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 7861c9feba5f..a4434301d84d 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -29,49 +29,20 @@ struct btrfs_work { unsigned long flags; }; -#define BTRFS_WORK_HELPER_PROTO(name) \ -void btrfs_##name(struct work_struct *arg) - -BTRFS_WORK_HELPER_PROTO(worker_helper); -BTRFS_WORK_HELPER_PROTO(delalloc_helper); -BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); -BTRFS_WORK_HELPER_PROTO(cache_helper); -BTRFS_WORK_HELPER_PROTO(submit_helper); -BTRFS_WORK_HELPER_PROTO(fixup_helper); -BTRFS_WORK_HELPER_PROTO(endio_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_helper); -BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); -BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); -BTRFS_WORK_HELPER_PROTO(endio_repair_helper); -BTRFS_WORK_HELPER_PROTO(rmw_helper); -BTRFS_WORK_HELPER_PROTO(endio_write_helper); -BTRFS_WORK_HELPER_PROTO(freespace_write_helper); -BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); -BTRFS_WORK_HELPER_PROTO(readahead_helper); -BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); -BTRFS_WORK_HELPER_PROTO(extent_refs_helper); -BTRFS_WORK_HELPER_PROTO(scrub_helper); -BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); -BTRFS_WORK_HELPER_PROTO(scrubnc_helper); -BTRFS_WORK_HELPER_PROTO(scrubparity_helper); - - struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, unsigned int flags, int limit_active, int thresh); -void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, - btrfs_func_t func, - btrfs_func_t ordered_func, - btrfs_func_t ordered_free); +void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, + btrfs_func_t ordered_func, btrfs_func_t ordered_free); void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); void btrfs_set_work_high_priority(struct btrfs_work *work); -struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); -struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); +struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work); +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); #endif diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index bf7e3f23bba7..6934a5b8708f 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -120,12 +120,12 @@ u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) return get_alloc_profile(fs_info, orig_flags); } -void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group(struct btrfs_block_group *cache) { atomic_inc(&cache->count); } -void btrfs_put_block_group(struct btrfs_block_group_cache *cache) +void btrfs_put_block_group(struct btrfs_block_group *cache) { if (atomic_dec_and_test(&cache->count)) { WARN_ON(cache->pinned > 0); @@ -149,22 +149,21 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) * This adds the block group to the fs_info rb tree for the block group cache */ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct rb_node **p; struct rb_node *parent = NULL; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; spin_lock(&info->block_group_cache_lock); p = &info->block_group_cache_tree.rb_node; while (*p) { parent = *p; - cache = rb_entry(parent, struct btrfs_block_group_cache, - cache_node); - if (block_group->key.objectid < cache->key.objectid) { + cache = rb_entry(parent, struct btrfs_block_group, cache_node); + if (block_group->start < cache->start) { p = &(*p)->rb_left; - } else if (block_group->key.objectid > cache->key.objectid) { + } else if (block_group->start > cache->start) { p = &(*p)->rb_right; } else { spin_unlock(&info->block_group_cache_lock); @@ -176,8 +175,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, rb_insert_color(&block_group->cache_node, &info->block_group_cache_tree); - if (info->first_logical_byte > block_group->key.objectid) - info->first_logical_byte = block_group->key.objectid; + if (info->first_logical_byte > block_group->start) + info->first_logical_byte = block_group->start; spin_unlock(&info->block_group_cache_lock); @@ -188,10 +187,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, * This will return the block group at or after bytenr if contains is 0, else * it will return the block group that contains the bytenr */ -static struct btrfs_block_group_cache *block_group_cache_tree_search( +static struct btrfs_block_group *block_group_cache_tree_search( struct btrfs_fs_info *info, u64 bytenr, int contains) { - struct btrfs_block_group_cache *cache, *ret = NULL; + struct btrfs_block_group *cache, *ret = NULL; struct rb_node *n; u64 end, start; @@ -199,13 +198,12 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( n = info->block_group_cache_tree.rb_node; while (n) { - cache = rb_entry(n, struct btrfs_block_group_cache, - cache_node); - end = cache->key.objectid + cache->key.offset - 1; - start = cache->key.objectid; + cache = rb_entry(n, struct btrfs_block_group, cache_node); + end = cache->start + cache->length - 1; + start = cache->start; if (bytenr < start) { - if (!contains && (!ret || start < ret->key.objectid)) + if (!contains && (!ret || start < ret->start)) ret = cache; n = n->rb_left; } else if (bytenr > start) { @@ -221,8 +219,8 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( } if (ret) { btrfs_get_block_group(ret); - if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) - info->first_logical_byte = ret->key.objectid; + if (bytenr == 0 && info->first_logical_byte > ret->start) + info->first_logical_byte = ret->start; } spin_unlock(&info->block_group_cache_lock); @@ -232,7 +230,7 @@ static struct btrfs_block_group_cache *block_group_cache_tree_search( /* * Return the block group that starts at or after bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_first_block_group( +struct btrfs_block_group *btrfs_lookup_first_block_group( struct btrfs_fs_info *info, u64 bytenr) { return block_group_cache_tree_search(info, bytenr, 0); @@ -241,14 +239,14 @@ struct btrfs_block_group_cache *btrfs_lookup_first_block_group( /* * Return the block group that contains the given bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_block_group( +struct btrfs_block_group *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr) { return block_group_cache_tree_search(info, bytenr, 1); } -struct btrfs_block_group_cache *btrfs_next_block_group( - struct btrfs_block_group_cache *cache) +struct btrfs_block_group *btrfs_next_block_group( + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct rb_node *node; @@ -257,7 +255,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( /* If our block group was removed, we need a full search. */ if (RB_EMPTY_NODE(&cache->cache_node)) { - const u64 next_bytenr = cache->key.objectid + cache->key.offset; + const u64 next_bytenr = cache->start + cache->length; spin_unlock(&fs_info->block_group_cache_lock); btrfs_put_block_group(cache); @@ -266,8 +264,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( node = rb_next(&cache->cache_node); btrfs_put_block_group(cache); if (node) { - cache = rb_entry(node, struct btrfs_block_group_cache, - cache_node); + cache = rb_entry(node, struct btrfs_block_group, cache_node); btrfs_get_block_group(cache); } else cache = NULL; @@ -277,7 +274,7 @@ struct btrfs_block_group_cache *btrfs_next_block_group( bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bool ret = true; bg = btrfs_lookup_block_group(fs_info, bytenr); @@ -300,7 +297,7 @@ bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bg = btrfs_lookup_block_group(fs_info, bytenr); ASSERT(bg); @@ -314,7 +311,7 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) btrfs_put_block_group(bg); } -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) +void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) { wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); } @@ -322,7 +319,7 @@ void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, const u64 start) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; bg = btrfs_lookup_block_group(fs_info, start); ASSERT(bg); @@ -331,7 +328,7 @@ void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, btrfs_put_block_group(bg); } -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) +void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) { struct btrfs_space_info *space_info = bg->space_info; @@ -357,7 +354,7 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) } struct btrfs_caching_control *btrfs_get_caching_control( - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_caching_control *ctl; @@ -392,7 +389,7 @@ void btrfs_put_caching_control(struct btrfs_caching_control *ctl) * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using * any of the information in this block group. */ -void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes) { struct btrfs_caching_control *caching_ctl; @@ -401,13 +398,13 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache if (!caching_ctl) return; - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || + wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || (cache->free_space_ctl->free_space >= num_bytes)); btrfs_put_caching_control(caching_ctl); } -int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) +int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) { struct btrfs_caching_control *caching_ctl; int ret = 0; @@ -416,7 +413,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) if (!caching_ctl) return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); + wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); if (cache->cached == BTRFS_CACHE_ERROR) ret = -EIO; btrfs_put_caching_control(caching_ctl); @@ -424,11 +421,11 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) } #ifdef CONFIG_BTRFS_DEBUG -static void fragment_free_space(struct btrfs_block_group_cache *block_group) +static void fragment_free_space(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; - u64 start = block_group->key.objectid; - u64 len = block_group->key.offset; + u64 start = block_group->start; + u64 len = block_group->length; u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? fs_info->nodesize : fs_info->sectorsize; u64 step = chunk << 1; @@ -450,8 +447,7 @@ static void fragment_free_space(struct btrfs_block_group_cache *block_group) * used yet since their free space will be released as soon as the transaction * commits. */ -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, - u64 start, u64 end) +u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) { struct btrfs_fs_info *info = block_group->fs_info; u64 extent_start, extent_end, size, total_added = 0; @@ -491,7 +487,7 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group, static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) { - struct btrfs_block_group_cache *block_group = caching_ctl->block_group; + struct btrfs_block_group *block_group = caching_ctl->block_group; struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_path *path; @@ -507,7 +503,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) if (!path) return -ENOMEM; - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); + last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); #ifdef CONFIG_BTRFS_DEBUG /* @@ -587,13 +583,12 @@ next: goto next; } - if (key.objectid < block_group->key.objectid) { + if (key.objectid < block_group->start) { path->slots[0]++; continue; } - if (key.objectid >= block_group->key.objectid + - block_group->key.offset) + if (key.objectid >= block_group->start + block_group->length) break; if (key.type == BTRFS_EXTENT_ITEM_KEY || @@ -617,8 +612,7 @@ next: ret = 0; total_found += add_new_free_space(block_group, last, - block_group->key.objectid + - block_group->key.offset); + block_group->start + block_group->length); caching_ctl->progress = (u64)-1; out: @@ -628,7 +622,7 @@ out: static noinline void caching_thread(struct btrfs_work *work) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; int ret; @@ -656,8 +650,7 @@ static noinline void caching_thread(struct btrfs_work *work) spin_lock(&block_group->space_info->lock); spin_lock(&block_group->lock); - bytes_used = block_group->key.offset - - btrfs_block_group_used(&block_group->item); + bytes_used = block_group->length - block_group->used; block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); @@ -677,8 +670,7 @@ static noinline void caching_thread(struct btrfs_work *work) btrfs_put_block_group(block_group); } -int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, - int load_cache_only) +int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) { DEFINE_WAIT(wait); struct btrfs_fs_info *fs_info = cache->fs_info; @@ -693,10 +685,9 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, mutex_init(&caching_ctl->mutex); init_waitqueue_head(&caching_ctl->wait); caching_ctl->block_group = cache; - caching_ctl->progress = cache->key.objectid; + caching_ctl->progress = cache->start; refcount_set(&caching_ctl->count, 1); - btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, - caching_thread, NULL, NULL); + btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); spin_lock(&cache->lock); /* @@ -763,8 +754,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); - bytes_used = cache->key.offset - - btrfs_block_group_used(&cache->item); + bytes_used = cache->length - cache->used; cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); @@ -833,27 +823,36 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) * * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group * in the whole filesystem + * + * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups */ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) { - if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) { + bool found_raid56 = false; + bool found_raid1c34 = false; + + if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || + (flags & BTRFS_BLOCK_GROUP_RAID1C3) || + (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { struct list_head *head = &fs_info->space_info; struct btrfs_space_info *sinfo; list_for_each_entry_rcu(sinfo, head, list) { - bool found = false; - down_read(&sinfo->groups_sem); if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) - found = true; + found_raid56 = true; if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) - found = true; + found_raid56 = true; + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) + found_raid1c34 = true; + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) + found_raid1c34 = true; up_read(&sinfo->groups_sem); - - if (found) - return; } - btrfs_clear_fs_incompat(fs_info, RAID56); + if (found_raid56) + btrfs_clear_fs_incompat(fs_info, RAID56); + if (found_raid1c34) + btrfs_clear_fs_incompat(fs_info, RAID1C34); } } @@ -863,7 +862,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_path *path; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_free_cluster *cluster; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_key key; @@ -886,10 +885,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * remove it. */ btrfs_free_excluded_extents(block_group); - btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, - block_group->key.offset); + btrfs_free_ref_tree_range(fs_info, block_group->start, + block_group->length); - memcpy(&key, &block_group->key, sizeof(key)); index = btrfs_bg_flags_to_raid_index(block_group->flags); factor = btrfs_bg_type_to_factor(block_group->flags); @@ -967,8 +965,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; key.type = 0; + key.offset = block_group->start; ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) @@ -987,7 +985,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); - if (fs_info->first_logical_byte == block_group->key.objectid) + if (fs_info->first_logical_byte == block_group->start) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -1048,19 +1046,21 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { WARN_ON(block_group->space_info->total_bytes - < block_group->key.offset); + < block_group->length); WARN_ON(block_group->space_info->bytes_readonly - < block_group->key.offset); + < block_group->length); WARN_ON(block_group->space_info->disk_total - < block_group->key.offset * factor); + < block_group->length * factor); } - block_group->space_info->total_bytes -= block_group->key.offset; - block_group->space_info->bytes_readonly -= block_group->key.offset; - block_group->space_info->disk_total -= block_group->key.offset * factor; + block_group->space_info->total_bytes -= block_group->length; + block_group->space_info->bytes_readonly -= block_group->length; + block_group->space_info->disk_total -= block_group->length * factor; spin_unlock(&block_group->space_info->lock); - memcpy(&key, &block_group->key, sizeof(key)); + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; mutex_lock(&fs_info->chunk_mutex); spin_lock(&block_group->lock); @@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( * data in this block group. That check should be done by relocation routine, * not this function. */ -static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +static int inc_block_group_ro(struct btrfs_block_group *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -1209,8 +1209,8 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) goto out; } - num_bytes = cache->key.offset - cache->reserved - cache->pinned - - cache->bytes_super - btrfs_block_group_used(&cache->item); + num_bytes = cache->length - cache->reserved - cache->pinned - + cache->bytes_super - cache->used; sinfo_used = btrfs_space_info_used(sinfo, true); /* @@ -1231,8 +1231,7 @@ out: spin_unlock(&sinfo->lock); if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { btrfs_info(cache->fs_info, - "unable to make block group %llu ro", - cache->key.objectid); + "unable to make block group %llu ro", cache->start); btrfs_info(cache->fs_info, "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", sinfo_used, num_bytes, min_allocable_bytes); @@ -1247,7 +1246,7 @@ out: */ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; struct btrfs_trans_handle *trans; int ret = 0; @@ -1261,7 +1260,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) int trimming; block_group = list_first_entry(&fs_info->unused_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&block_group->bg_list); @@ -1279,8 +1278,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) down_write(&space_info->groups_sem); spin_lock(&block_group->lock); if (block_group->reserved || block_group->pinned || - btrfs_block_group_used(&block_group->item) || - block_group->ro || + block_group->used || block_group->ro || list_is_singular(&block_group->list)) { /* * We want to bail if we made new allocations or have @@ -1308,7 +1306,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * properly if we fail to join the transaction. */ trans = btrfs_start_trans_remove_block_group(fs_info, - block_group->key.objectid); + block_group->start); if (IS_ERR(trans)) { btrfs_dec_block_group_ro(block_group); ret = PTR_ERR(trans); @@ -1319,8 +1317,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * We could have pending pinned extents for this block group, * just delete them, we don't care about them anymore. */ - start = block_group->key.objectid; - end = start + block_group->key.offset - 1; + start = block_group->start; + end = start + block_group->length - 1; /* * Hold the unused_bg_unpin_mutex lock to avoid racing with * btrfs_finish_extent_commit(). If we are at transaction N, @@ -1375,7 +1373,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Btrfs_remove_chunk will abort the transaction if things go * horribly wrong. */ - ret = btrfs_remove_chunk(trans, block_group->key.objectid); + ret = btrfs_remove_chunk(trans, block_group->start); if (ret) { if (trimming) @@ -1410,7 +1408,7 @@ next: spin_unlock(&fs_info->unused_bgs_lock); } -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) +void btrfs_mark_bg_unused(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; @@ -1478,7 +1476,7 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info, read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), sizeof(bg)); - flags = btrfs_block_group_flags(&bg) & + flags = btrfs_stack_block_group_flags(&bg) & BTRFS_BLOCK_GROUP_TYPE_MASK; if (flags != (em->map_lookup->type & @@ -1518,7 +1516,7 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) write_sequnlock(&fs_info->profiles_lock); } -static int exclude_super_stripes(struct btrfs_block_group_cache *cache) +static int exclude_super_stripes(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; u64 bytenr; @@ -1526,10 +1524,10 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) int stripe_len; int i, nr, ret; - if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { - stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; + if (cache->start < BTRFS_SUPER_INFO_OFFSET) { + stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; cache->bytes_super += stripe_len; - ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, + ret = btrfs_add_excluded_extent(fs_info, cache->start, stripe_len); if (ret) return ret; @@ -1537,7 +1535,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(fs_info, cache->key.objectid, + ret = btrfs_rmap_block(fs_info, cache->start, bytenr, &logical, &nr, &stripe_len); if (ret) return ret; @@ -1545,21 +1543,19 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) while (nr--) { u64 start, len; - if (logical[nr] > cache->key.objectid + - cache->key.offset) + if (logical[nr] > cache->start + cache->length) continue; - if (logical[nr] + stripe_len <= cache->key.objectid) + if (logical[nr] + stripe_len <= cache->start) continue; start = logical[nr]; - if (start < cache->key.objectid) { - start = cache->key.objectid; + if (start < cache->start) { + start = cache->start; len = (logical[nr] + stripe_len) - start; } else { len = min_t(u64, stripe_len, - cache->key.objectid + - cache->key.offset - start); + cache->start + cache->length - start); } cache->bytes_super += len; @@ -1575,7 +1571,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) return 0; } -static void link_block_group(struct btrfs_block_group_cache *cache) +static void link_block_group(struct btrfs_block_group *cache) { struct btrfs_space_info *space_info = cache->space_info; int index = btrfs_bg_flags_to_raid_index(cache->flags); @@ -1591,10 +1587,10 @@ static void link_block_group(struct btrfs_block_group_cache *cache) btrfs_sysfs_add_block_group_type(cache); } -static struct btrfs_block_group_cache *btrfs_create_block_group_cache( +static struct btrfs_block_group *btrfs_create_block_group_cache( struct btrfs_fs_info *fs_info, u64 start, u64 size) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) @@ -1607,9 +1603,8 @@ static struct btrfs_block_group_cache *btrfs_create_block_group_cache( return NULL; } - cache->key.objectid = start; - cache->key.offset = size; - cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + cache->start = start; + cache->length = size; cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); @@ -1640,7 +1635,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) { struct extent_map_tree *map_tree = &fs_info->mapping_tree; struct extent_map *em; - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; u64 start = 0; int ret = 0; @@ -1665,15 +1660,14 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) free_extent_map(em); break; } - if (bg->key.objectid != em->start || - bg->key.offset != em->len || + if (bg->start != em->start || bg->length != em->len || (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { btrfs_err(fs_info, "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", em->start, em->len, em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, - bg->key.objectid, bg->key.offset, + bg->start, bg->length, bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); ret = -EUCLEAN; free_extent_map(em); @@ -1687,22 +1681,117 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) return ret; } +static int read_one_block_group(struct btrfs_fs_info *info, + struct btrfs_path *path, + const struct btrfs_key *key, + int need_clear) +{ + struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_block_group *cache; + struct btrfs_space_info *space_info; + struct btrfs_block_group_item bgi; + const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); + int slot = path->slots[0]; + int ret; + + ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); + + cache = btrfs_create_block_group_cache(info, key->objectid, key->offset); + if (!cache) + return -ENOMEM; + + if (need_clear) { + /* + * When we mount with old space cache, we need to + * set BTRFS_DC_CLEAR and set dirty flag. + * + * a) Setting 'BTRFS_DC_CLEAR' makes sure that we + * truncate the old free space cache inode and + * setup a new one. + * b) Setting 'dirty flag' makes sure that we flush + * the new space cache info onto disk. + */ + if (btrfs_test_opt(info, SPACE_CACHE)) + cache->disk_cache_state = BTRFS_DC_CLEAR; + } + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), + sizeof(bgi)); + cache->used = btrfs_stack_block_group_used(&bgi); + cache->flags = btrfs_stack_block_group_flags(&bgi); + if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && + (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { + btrfs_err(info, +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", + cache->start); + ret = -EINVAL; + goto error; + } + + /* + * We need to exclude the super stripes now so that the space info has + * super bytes accounted for, otherwise we'll think we have more space + * than we actually do. + */ + ret = exclude_super_stripes(cache); + if (ret) { + /* We may have excluded something, so call this just in case. */ + btrfs_free_excluded_extents(cache); + goto error; + } + + /* + * Check for two cases, either we are full, and therefore don't need + * to bother with the caching work since we won't find any space, or we + * are empty, and we can just add all the space in and be done with it. + * This saves us _a_lot_ of time, particularly in the full case. + */ + if (key->offset == cache->used) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + btrfs_free_excluded_extents(cache); + } else if (cache->used == 0) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + add_new_free_space(cache, key->objectid, + key->objectid + key->offset); + btrfs_free_excluded_extents(cache); + } + + ret = btrfs_add_block_group_cache(info, cache); + if (ret) { + btrfs_remove_free_space_cache(cache); + goto error; + } + trace_btrfs_add_block_group(info, cache, 0); + btrfs_update_space_info(info, cache->flags, key->offset, + cache->used, cache->bytes_super, &space_info); + + cache->space_info = space_info; + + link_block_group(cache); + + set_avail_alloc_bits(info, cache->flags); + if (btrfs_chunk_readonly(info, cache->start)) { + inc_block_group_ro(cache, 1); + } else if (cache->used == 0) { + ASSERT(list_empty(&cache->bg_list)); + btrfs_mark_bg_unused(cache); + } + return 0; +error: + btrfs_put_block_group(cache); + return ret; +} + int btrfs_read_block_groups(struct btrfs_fs_info *info) { struct btrfs_path *path; int ret; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_space_info *space_info; struct btrfs_key key; - struct btrfs_key found_key; - struct extent_buffer *leaf; int need_clear = 0; u64 cache_gen; - u64 feature; - int mixed; - - feature = btrfs_super_incompat_flags(info->super_copy); - mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS); key.objectid = 0; key.offset = 0; @@ -1726,107 +1815,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) if (ret != 0) goto error; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - cache = btrfs_create_block_group_cache(info, found_key.objectid, - found_key.offset); - if (!cache) { - ret = -ENOMEM; - goto error; - } - - if (need_clear) { - /* - * When we mount with old space cache, we need to - * set BTRFS_DC_CLEAR and set dirty flag. - * - * a) Setting 'BTRFS_DC_CLEAR' makes sure that we - * truncate the old free space cache inode and - * setup a new one. - * b) Setting 'dirty flag' makes sure that we flush - * the new space cache info onto disk. - */ - if (btrfs_test_opt(info, SPACE_CACHE)) - cache->disk_cache_state = BTRFS_DC_CLEAR; - } - - read_extent_buffer(leaf, &cache->item, - btrfs_item_ptr_offset(leaf, path->slots[0]), - sizeof(cache->item)); - cache->flags = btrfs_block_group_flags(&cache->item); - if (!mixed && - ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && - (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { - btrfs_err(info, -"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", - cache->key.objectid); - ret = -EINVAL; + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); + ret = read_one_block_group(info, path, &key, need_clear); + if (ret < 0) goto error; - } - - key.objectid = found_key.objectid + found_key.offset; + key.objectid += key.offset; + key.offset = 0; btrfs_release_path(path); - - /* - * We need to exclude the super stripes now so that the space - * info has super bytes accounted for, otherwise we'll think - * we have more space than we actually do. - */ - ret = exclude_super_stripes(cache); - if (ret) { - /* - * We may have excluded something, so call this just in - * case. - */ - btrfs_free_excluded_extents(cache); - btrfs_put_block_group(cache); - goto error; - } - - /* - * Check for two cases, either we are full, and therefore - * don't need to bother with the caching work since we won't - * find any space, or we are empty, and we can just add all - * the space in and be done with it. This saves us _a_lot_ of - * time, particularly in the full case. - */ - if (found_key.offset == btrfs_block_group_used(&cache->item)) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - btrfs_free_excluded_extents(cache); - } else if (btrfs_block_group_used(&cache->item) == 0) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, found_key.objectid, - found_key.objectid + - found_key.offset); - btrfs_free_excluded_extents(cache); - } - - ret = btrfs_add_block_group_cache(info, cache); - if (ret) { - btrfs_remove_free_space_cache(cache); - btrfs_put_block_group(cache); - goto error; - } - - trace_btrfs_add_block_group(info, cache, 0); - btrfs_update_space_info(info, cache->flags, found_key.offset, - btrfs_block_group_used(&cache->item), - cache->bytes_super, &space_info); - - cache->space_info = space_info; - - link_block_group(cache); - - set_avail_alloc_bits(info, cache->flags); - if (btrfs_chunk_readonly(info, cache->key.objectid)) { - inc_block_group_ro(cache, 1); - } else if (btrfs_block_group_used(&cache->item) == 0) { - ASSERT(list_empty(&cache->bg_list)); - btrfs_mark_bg_unused(cache); - } } list_for_each_entry_rcu(space_info, &info->space_info, list) { @@ -1860,7 +1855,7 @@ error: void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_block_group_item item; struct btrfs_key key; @@ -1871,14 +1866,19 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) while (!list_empty(&trans->new_bgs)) { block_group = list_first_entry(&trans->new_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); if (ret) goto next; spin_lock(&block_group->lock); - memcpy(&item, &block_group->item, sizeof(item)); - memcpy(&key, &block_group->key, sizeof(key)); + btrfs_set_stack_block_group_used(&item, block_group->used); + btrfs_set_stack_block_group_chunk_objectid(&item, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); + btrfs_set_stack_block_group_flags(&item, block_group->flags); + key.objectid = block_group->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = block_group->length; spin_unlock(&block_group->lock); ret = btrfs_insert_item(trans, extent_root, &key, &item, @@ -1901,7 +1901,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret; btrfs_set_log_full_commit(trans); @@ -1910,11 +1910,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, if (!cache) return -ENOMEM; - btrfs_set_block_group_used(&cache->item, bytes_used); - btrfs_set_block_group_chunk_objectid(&cache->item, - BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_block_group_flags(&cache->item, type); - + cache->used = bytes_used; cache->flags = type; cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; @@ -2021,8 +2017,17 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) return flags; } -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) - +/* + * Mark one block group RO, can be called several times for the same block + * group. + * + * @cache: the destination block group + * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to + * ensure we still have some free space after marking this + * block group RO. + */ +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, + bool do_chunk_alloc) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_trans_handle *trans; @@ -2052,25 +2057,29 @@ again: goto again; } - /* - * if we are changing raid levels, try to allocate a corresponding - * block group with the new raid level. - */ - alloc_flags = update_block_group_flags(fs_info, cache->flags); - if (alloc_flags != cache->flags) { - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + if (do_chunk_alloc) { /* - * ENOSPC is allowed here, we may have enough space - * already allocated at the new raid level to - * carry on + * If we are changing raid levels, try to allocate a + * corresponding block group with the new raid level. */ - if (ret == -ENOSPC) - ret = 0; - if (ret < 0) - goto out; + alloc_flags = update_block_group_flags(fs_info, cache->flags); + if (alloc_flags != cache->flags) { + ret = btrfs_chunk_alloc(trans, alloc_flags, + CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } } - ret = inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, !do_chunk_alloc); + if (!do_chunk_alloc) + goto unlock_out; if (!ret) goto out; alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); @@ -2085,13 +2094,14 @@ out: check_system_chunk(trans, alloc_flags); mutex_unlock(&fs_info->chunk_mutex); } +unlock_out: mutex_unlock(&fs_info->ro_block_group_mutex); btrfs_end_transaction(trans); return ret; } -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) +void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -2101,9 +2111,8 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_lock(&sinfo->lock); spin_lock(&cache->lock); if (!--cache->ro) { - num_bytes = cache->key.offset - cache->reserved - - cache->pinned - cache->bytes_super - - btrfs_block_group_used(&cache->item); + num_bytes = cache->length - cache->reserved - + cache->pinned - cache->bytes_super - cache->used; sinfo->bytes_readonly -= num_bytes; list_del_init(&cache->ro_list); } @@ -2113,15 +2122,21 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_path *path, - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; struct btrfs_root *extent_root = fs_info->extent_root; unsigned long bi; struct extent_buffer *leaf; + struct btrfs_block_group_item bgi; + struct btrfs_key key; + + key.objectid = cache->start; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + key.offset = cache->length; - ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); + ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2130,7 +2145,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); - write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); + btrfs_set_stack_block_group_used(&bgi, cache->used); + btrfs_set_stack_block_group_chunk_objectid(&bgi, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); + btrfs_set_stack_block_group_flags(&bgi, cache->flags); + write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); btrfs_mark_buffer_dirty(leaf); fail: btrfs_release_path(path); @@ -2138,7 +2157,7 @@ fail: } -static int cache_save_setup(struct btrfs_block_group_cache *block_group, +static int cache_save_setup(struct btrfs_block_group *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) { @@ -2156,7 +2175,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, * If this block group is smaller than 100 megs don't bother caching the * block group. */ - if (block_group->key.offset < (100 * SZ_1M)) { + if (block_group->length < (100 * SZ_1M)) { spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_WRITTEN; spin_unlock(&block_group->lock); @@ -2257,7 +2276,7 @@ again: * taking up quite a bit since it's not folded into the other space * cache. */ - num_pages = div_u64(block_group->key.offset, SZ_256M); + num_pages = div_u64(block_group->length, SZ_256M); if (!num_pages) num_pages = 1; @@ -2302,7 +2321,7 @@ out: int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache, *tmp; + struct btrfs_block_group *cache, *tmp; struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_path *path; @@ -2340,7 +2359,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; int should_put; @@ -2377,8 +2396,7 @@ again: while (!list_empty(&dirty)) { bool drop_reserve = true; - cache = list_first_entry(&dirty, - struct btrfs_block_group_cache, + cache = list_first_entry(&dirty, struct btrfs_block_group, dirty_list); /* * This can happen if something re-dirties a block group that @@ -2503,7 +2521,7 @@ again: int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_transaction *cur_trans = trans->transaction; int ret = 0; int should_put; @@ -2533,7 +2551,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) spin_lock(&cur_trans->dirty_bgs_lock); while (!list_empty(&cur_trans->dirty_bgs)) { cache = list_first_entry(&cur_trans->dirty_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, dirty_list); /* @@ -2615,7 +2633,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) * to use it without any locking */ while (!list_empty(io)) { - cache = list_first_entry(io, struct btrfs_block_group_cache, + cache = list_first_entry(io, struct btrfs_block_group, io_list); list_del_init(&cache->io_list); btrfs_wait_cache_io(trans, cache, path); @@ -2630,7 +2648,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int alloc) { struct btrfs_fs_info *info = trans->fs_info; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; u64 total = num_bytes; u64 old_val; u64 byte_in_group; @@ -2661,11 +2679,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * is because we need the unpinning stage to actually add the * space back to the block group, otherwise we will leak space. */ - if (!alloc && cache->cached == BTRFS_CACHE_NO) + if (!alloc && !btrfs_block_group_done(cache)) btrfs_cache_block_group(cache, 1); - byte_in_group = bytenr - cache->key.objectid; - WARN_ON(byte_in_group > cache->key.offset); + byte_in_group = bytenr - cache->start; + WARN_ON(byte_in_group > cache->length); spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); @@ -2674,11 +2692,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, cache->disk_cache_state < BTRFS_DC_CLEAR) cache->disk_cache_state = BTRFS_DC_CLEAR; - old_val = btrfs_block_group_used(&cache->item); - num_bytes = min(total, cache->key.offset - byte_in_group); + old_val = cache->used; + num_bytes = min(total, cache->length - byte_in_group); if (alloc) { old_val += num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); + cache->used = old_val; cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; cache->space_info->bytes_used += num_bytes; @@ -2687,7 +2705,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, spin_unlock(&cache->space_info->lock); } else { old_val -= num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); + cache->used = old_val; cache->pinned += num_bytes; btrfs_space_info_update_bytes_pinned(info, cache->space_info, num_bytes); @@ -2745,7 +2763,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, * reservation and the block group has become read only we cannot make the * reservation and return -EAGAIN, otherwise this function always succeeds. */ -int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, +int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -2781,7 +2799,7 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, * A and before transaction A commits you free that leaf, you call this with * reserve set to 0 in order to clear the reservation. */ -void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -2987,9 +3005,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) } /* - * If @is_allocation is true, reserve space in the system space info necessary - * for allocating a chunk, otherwise if it's false, reserve space necessary for - * removing a chunk. + * Reserve space in the system space for allocating or removing a chunk */ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) { @@ -3046,7 +3062,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) void btrfs_put_block_group_cache(struct btrfs_fs_info *info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 last = 0; while (1) { @@ -3074,7 +3090,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) spin_unlock(&block_group->lock); ASSERT(block_group->io_ctl.inode == NULL); iput(inode); - last = block_group->key.objectid + block_group->key.offset; + last = block_group->start + block_group->length; btrfs_put_block_group(block_group); } } @@ -3086,7 +3102,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) */ int btrfs_free_block_groups(struct btrfs_fs_info *info) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; struct btrfs_caching_control *caching_ctl; struct rb_node *n; @@ -3103,7 +3119,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) spin_lock(&info->unused_bgs_lock); while (!list_empty(&info->unused_bgs)) { block_group = list_first_entry(&info->unused_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&block_group->bg_list); btrfs_put_block_group(block_group); @@ -3112,7 +3128,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) spin_lock(&info->block_group_cache_lock); while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { - block_group = rb_entry(n, struct btrfs_block_group_cache, + block_group = rb_entry(n, struct btrfs_block_group, cache_node); rb_erase(&block_group->cache_node, &info->block_group_cache_tree); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index c391800388dd..9b409676c4b2 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -34,7 +34,7 @@ struct btrfs_caching_control { struct mutex mutex; wait_queue_head_t wait; struct btrfs_work work; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 progress; refcount_t count; }; @@ -42,14 +42,15 @@ struct btrfs_caching_control { /* Once caching_thread() finds this much free space, it will wake up waiters. */ #define CACHING_CTL_WAKE_UP SZ_2M -struct btrfs_block_group_cache { - struct btrfs_key key; - struct btrfs_block_group_item item; +struct btrfs_block_group { struct btrfs_fs_info *fs_info; struct inode *inode; spinlock_t lock; + u64 start; + u64 length; u64 pinned; u64 reserved; + u64 used; u64 delalloc_bytes; u64 bytes_super; u64 flags; @@ -159,7 +160,7 @@ struct btrfs_block_group_cache { #ifdef CONFIG_BTRFS_DEBUG static inline int btrfs_should_fragment_free_space( - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -170,29 +171,29 @@ static inline int btrfs_should_fragment_free_space( } #endif -struct btrfs_block_group_cache *btrfs_lookup_first_block_group( +struct btrfs_block_group *btrfs_lookup_first_block_group( struct btrfs_fs_info *info, u64 bytenr); -struct btrfs_block_group_cache *btrfs_lookup_block_group( +struct btrfs_block_group *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); -struct btrfs_block_group_cache *btrfs_next_block_group( - struct btrfs_block_group_cache *cache); -void btrfs_get_block_group(struct btrfs_block_group_cache *cache); -void btrfs_put_block_group(struct btrfs_block_group_cache *cache); +struct btrfs_block_group *btrfs_next_block_group( + struct btrfs_block_group *cache); +void btrfs_get_block_group(struct btrfs_block_group *cache); +void btrfs_put_block_group(struct btrfs_block_group *cache); void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, const u64 start); -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); +void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); -void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, +void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, u64 num_bytes); -int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); -int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, +int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache); +int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only); void btrfs_put_caching_control(struct btrfs_caching_control *ctl); struct btrfs_caching_control *btrfs_get_caching_control( - struct btrfs_block_group_cache *cache); -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *cache); +u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end); struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_fs_info *fs_info, @@ -200,21 +201,22 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( int btrfs_remove_block_group(struct btrfs_trans_handle *trans, u64 group_start, struct extent_map *em); void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); +void btrfs_mark_bg_unused(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); +int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, + bool do_chunk_alloc); +void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_update_block_group(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int alloc); -int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, +int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc); -void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, int delalloc); int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, enum btrfs_chunk_alloc_enum force); @@ -239,8 +241,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); } -static inline int btrfs_block_group_cache_done( - struct btrfs_block_group_cache *cache) +static inline int btrfs_block_group_done(struct btrfs_block_group *cache) { smp_mb(); return cache->cached == BTRFS_CACHE_FINISHED || diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index f853835c409c..4e12a477d32e 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -63,9 +63,6 @@ struct btrfs_inode { /* held while logging the inode in tree-log.c */ struct mutex log_mutex; - /* held while doing delalloc reservations */ - struct mutex delalloc_mutex; - /* used to order data wrt metadata */ struct btrfs_ordered_inode_tree ordered_tree; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b05b361e2062..ee834ef7beb4 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -29,6 +29,41 @@ #include "extent_io.h" #include "extent_map.h" +int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); +struct list_head *zlib_alloc_workspace(unsigned int level); +void zlib_free_workspace(struct list_head *ws); +struct list_head *zlib_get_workspace(unsigned int level); + +int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); +struct list_head *lzo_alloc_workspace(unsigned int level); +void lzo_free_workspace(struct list_head *ws); + +int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out); +int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb); +int zstd_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen); +void zstd_init_workspace_manager(void); +void zstd_cleanup_workspace_manager(void); +struct list_head *zstd_alloc_workspace(unsigned int level); +void zstd_free_workspace(struct list_head *ws); +struct list_head *zstd_get_workspace(unsigned int level); +void zstd_put_workspace(struct list_head *ws); + static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) @@ -39,6 +74,8 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type) case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; + default: + break; } return NULL; @@ -60,6 +97,70 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len) return false; } +static int compression_compress_pages(int type, struct list_head *ws, + struct address_space *mapping, u64 start, struct page **pages, + unsigned long *out_pages, unsigned long *total_in, + unsigned long *total_out) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: + return zlib_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_LZO: + return lzo_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_ZSTD: + return zstd_compress_pages(ws, mapping, start, pages, + out_pages, total_in, total_out); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. As a sane fallback, return what the + * callers will understand as 'no compression happened'. + */ + return -E2BIG; + } +} + +static int compression_decompress_bio(int type, struct list_head *ws, + struct compressed_bio *cb) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); + case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static int compression_decompress(int type, struct list_head *ws, + unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen) +{ + switch (type) { + case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, + start_byte, srclen, destlen); + case BTRFS_COMPRESS_NONE: + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + static int btrfs_decompress_bio(struct compressed_bio *cb); static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, @@ -311,7 +412,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages, - unsigned int write_flags) + unsigned int write_flags, + struct cgroup_subsys_state *blkcg_css) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio *bio = NULL; @@ -320,7 +422,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, int pg_index = 0; struct page *page; u64 first_byte = disk_start; - struct block_device *bdev; blk_status_t ret; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; @@ -339,13 +440,15 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, cb->orig_bio = NULL; cb->nr_pages = nr_pages; - bdev = fs_info->fs_devices->latest_bdev; - bio = btrfs_bio_alloc(first_byte); - bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; + + if (blkcg_css) { + bio->bi_opf |= REQ_CGROUP_PUNT; + bio_associate_blkg_from_css(bio, blkcg_css); + } refcount_set(&cb->pending_bios, 1); /* create and submit bios for the compressed pages */ @@ -378,14 +481,13 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); } bio = btrfs_bio_alloc(first_byte); - bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; @@ -409,7 +511,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); @@ -553,7 +655,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, unsigned long nr_pages; unsigned long pg_index; struct page *page; - struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; u64 em_len; @@ -604,8 +705,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (!cb->compressed_pages) goto fail1; - bdev = fs_info->fs_devices->latest_bdev; - for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); @@ -624,7 +723,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = bio->bi_iter.bi_size; comp_bio = btrfs_bio_alloc(cur_disk_byte); - bio_set_dev(comp_bio, bdev); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -668,14 +766,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, fs_info->sectorsize); sums += csum_size * nr_sectors; - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); } comp_bio = btrfs_bio_alloc(cur_disk_byte); - bio_set_dev(comp_bio, bdev); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -693,7 +790,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); @@ -764,26 +861,6 @@ struct heuristic_ws { static struct workspace_manager heuristic_wsm; -static void heuristic_init_workspace_manager(void) -{ - btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress); -} - -static void heuristic_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(&heuristic_wsm); -} - -static struct list_head *heuristic_get_workspace(unsigned int level) -{ - return btrfs_get_workspace(&heuristic_wsm, level); -} - -static void heuristic_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&heuristic_wsm, ws); -} - static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; @@ -824,12 +901,7 @@ fail: } const struct btrfs_compress_op btrfs_heuristic_compress = { - .init_workspace_manager = heuristic_init_workspace_manager, - .cleanup_workspace_manager = heuristic_cleanup_workspace_manager, - .get_workspace = heuristic_get_workspace, - .put_workspace = heuristic_put_workspace, - .alloc_workspace = alloc_heuristic_ws, - .free_workspace = free_heuristic_ws, + .workspace_manager = &heuristic_wsm, }; static const struct btrfs_compress_op * const btrfs_compress_op[] = { @@ -840,13 +912,44 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zstd_compress, }; -void btrfs_init_workspace_manager(struct workspace_manager *wsm, - const struct btrfs_compress_op *ops) +static struct list_head *alloc_workspace(int type, unsigned int level) { - struct list_head *workspace; + switch (type) { + case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); + case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); + case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); + case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} - wsm->ops = ops; +static void free_workspace(int type, struct list_head *ws) +{ + switch (type) { + case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); + case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); + case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); + case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } +} + +static void btrfs_init_workspace_manager(int type) +{ + struct workspace_manager *wsm; + struct list_head *workspace; + wsm = btrfs_compress_op[type]->workspace_manager; INIT_LIST_HEAD(&wsm->idle_ws); spin_lock_init(&wsm->ws_lock); atomic_set(&wsm->total_ws, 0); @@ -856,7 +959,7 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm, * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ - workspace = wsm->ops->alloc_workspace(0); + workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); @@ -867,14 +970,16 @@ void btrfs_init_workspace_manager(struct workspace_manager *wsm, } } -void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) +static void btrfs_cleanup_workspace_manager(int type) { + struct workspace_manager *wsman; struct list_head *ws; + wsman = btrfs_compress_op[type]->workspace_manager; while (!list_empty(&wsman->idle_ws)) { ws = wsman->idle_ws.next; list_del(ws); - wsman->ops->free_workspace(ws); + free_workspace(type, ws); atomic_dec(&wsman->total_ws); } } @@ -885,9 +990,9 @@ void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman) * Preallocation makes a forward progress guarantees and we do not return * errors. */ -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, - unsigned int level) +struct list_head *btrfs_get_workspace(int type, unsigned int level) { + struct workspace_manager *wsm; struct list_head *workspace; int cpus = num_online_cpus(); unsigned nofs_flag; @@ -897,6 +1002,7 @@ struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, wait_queue_head_t *ws_wait; int *free_ws; + wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; @@ -932,7 +1038,7 @@ again: * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); - workspace = wsm->ops->alloc_workspace(level); + workspace = alloc_workspace(type, level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { @@ -965,21 +1071,34 @@ again: static struct list_head *get_workspace(int type, int level) { - return btrfs_compress_op[type]->get_workspace(level); + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); + case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); + case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); + case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ -void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws) +void btrfs_put_workspace(int type, struct list_head *ws) { + struct workspace_manager *wsm; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; + wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; @@ -995,7 +1114,7 @@ void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws) } spin_unlock(ws_lock); - wsm->ops->free_workspace(ws); + free_workspace(type, ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); @@ -1003,7 +1122,18 @@ wake: static void put_workspace(int type, struct list_head *ws) { - return btrfs_compress_op[type]->put_workspace(ws); + switch (type) { + case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); + case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); + default: + /* + * This can't happen, the type is validated several times + * before we get here. + */ + BUG(); + } } /* @@ -1042,10 +1172,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, level = btrfs_compress_set_level(type, level); workspace = get_workspace(type, level); - ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, - start, pages, - out_pages, - total_in, total_out); + ret = compression_compress_pages(type, workspace, mapping, start, pages, + out_pages, total_in, total_out); put_workspace(type, workspace); return ret; } @@ -1071,7 +1199,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) int type = cb->compress_type; workspace = get_workspace(type, 0); - ret = btrfs_compress_op[type]->decompress_bio(workspace, cb); + ret = compression_decompress_bio(type, workspace, cb); put_workspace(type, workspace); return ret; @@ -1089,9 +1217,8 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int ret; workspace = get_workspace(type, 0); - ret = btrfs_compress_op[type]->decompress(workspace, data_in, - dest_page, start_byte, - srclen, destlen); + ret = compression_decompress(type, workspace, data_in, dest_page, + start_byte, srclen, destlen); put_workspace(type, workspace); return ret; @@ -1099,18 +1226,18 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, void __init btrfs_init_compress(void) { - int i; - - for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_compress_op[i]->init_workspace_manager(); + btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_init_workspace_manager(); } void __cold btrfs_exit_compress(void) { - int i; - - for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) - btrfs_compress_op[i]->cleanup_workspace_manager(); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); + btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); + zstd_cleanup_workspace_manager(); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 4cb8be9ff88b..d253f7aa8ed5 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long compressed_len, struct page **compressed_pages, unsigned long nr_pages, - unsigned int write_flags); + unsigned int write_flags, + struct cgroup_subsys_state *blkcg_css); blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); @@ -104,11 +105,10 @@ enum btrfs_compression_type { BTRFS_COMPRESS_ZLIB = 1, BTRFS_COMPRESS_LZO = 2, BTRFS_COMPRESS_ZSTD = 3, - BTRFS_COMPRESS_TYPES = 3, + BTRFS_NR_COMPRESS_TYPES = 4, }; struct workspace_manager { - const struct btrfs_compress_op *ops; struct list_head idle_ws; spinlock_t ws_lock; /* Number of free workspaces */ @@ -119,50 +119,18 @@ struct workspace_manager { wait_queue_head_t ws_wait; }; -void btrfs_init_workspace_manager(struct workspace_manager *wsm, - const struct btrfs_compress_op *ops); -struct list_head *btrfs_get_workspace(struct workspace_manager *wsm, - unsigned int level); -void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws); -void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm); +struct list_head *btrfs_get_workspace(int type, unsigned int level); +void btrfs_put_workspace(int type, struct list_head *ws); struct btrfs_compress_op { - void (*init_workspace_manager)(void); - - void (*cleanup_workspace_manager)(void); - - struct list_head *(*get_workspace)(unsigned int level); - - void (*put_workspace)(struct list_head *ws); - - struct list_head *(*alloc_workspace)(unsigned int level); - - void (*free_workspace)(struct list_head *workspace); - - int (*compress_pages)(struct list_head *workspace, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out); - - int (*decompress_bio)(struct list_head *workspace, - struct compressed_bio *cb); - - int (*decompress)(struct list_head *workspace, - unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen); - + struct workspace_manager *workspace_manager; /* Maximum level supported by the compression algorithm */ unsigned int max_level; unsigned int default_level; }; /* The heuristic workspaces are managed via the 0th workspace manager */ -#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1) +#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES extern const struct btrfs_compress_op btrfs_heuristic_compress; extern const struct btrfs_compress_op btrfs_zlib_compress; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index e59cde204b2f..5b6e86aaf2e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -32,8 +32,13 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, static const struct btrfs_csums { u16 size; const char *name; + const char *driver; } btrfs_csums[] = { [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, + [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, + [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, + [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", + .driver = "blake2b-256" }, }; int btrfs_super_csum_size(const struct btrfs_super_block *s) @@ -51,34 +56,25 @@ const char *btrfs_super_csum_name(u16 csum_type) return btrfs_csums[csum_type].name; } -struct btrfs_path *btrfs_alloc_path(void) +/* + * Return driver name if defined, otherwise the name that's also a valid driver + * name + */ +const char *btrfs_super_csum_driver(u16 csum_type) { - return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); + /* csum type is validated at mount time */ + return btrfs_csums[csum_type].driver ?: + btrfs_csums[csum_type].name; } -/* - * set all locked nodes in the path to blocking locks. This should - * be done before scheduling - */ -noinline void btrfs_set_path_blocking(struct btrfs_path *p) +size_t __const btrfs_get_num_csums(void) { - int i; - for (i = 0; i < BTRFS_MAX_LEVEL; i++) { - if (!p->nodes[i] || !p->locks[i]) - continue; - /* - * If we currently have a spinning reader or writer lock this - * will bump the count of blocking holders and drop the - * spinlock. - */ - if (p->locks[i] == BTRFS_READ_LOCK) { - btrfs_set_lock_blocking_read(p->nodes[i]); - p->locks[i] = BTRFS_READ_LOCK_BLOCKING; - } else if (p->locks[i] == BTRFS_WRITE_LOCK) { - btrfs_set_lock_blocking_write(p->nodes[i]); - p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; - } - } + return ARRAY_SIZE(btrfs_csums); +} + +struct btrfs_path *btrfs_alloc_path(void) +{ + return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); } /* this also releases the path */ @@ -1125,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) parent_start = buf->start; - extent_buffer_get(cow); + atomic_inc(&cow->refs); ret = tree_mod_log_insert_root(root->node, cow, 1); BUG_ON(ret < 0); rcu_assign_pointer(root->node, cow); @@ -1563,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk, /* * same as comp_keys only with two btrfs_key's */ -int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) +int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) { if (k1->objectid > k2->objectid) return 1; @@ -2036,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, /* update the path */ if (left) { if (btrfs_header_nritems(left) > orig_slot) { - extent_buffer_get(left); + atomic_inc(&left->refs); /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; @@ -2379,32 +2375,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level, } /* - * This releases any locks held in the path starting at level and - * going all the way up to the root. - * - * btrfs_search_slot will keep the lock held on higher nodes in a few - * corner cases, such as COW of the block at slot zero in the node. This - * ignores those rules, and it should only be called when there are no - * more updates to be done higher up in the tree. - */ -noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) -{ - int i; - - if (path->keep_locks) - return; - - for (i = level; i < BTRFS_MAX_LEVEL; i++) { - if (!path->nodes[i]) - continue; - if (!path->locks[i]) - continue; - btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); - path->locks[i] = 0; - } -} - -/* * helper function for btrfs_search_slot. The goal is to find a block * in cache without setting the path to blocking. If we find the block * we return zero and the path is unchanged. @@ -2652,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, } else { b = root->commit_root; - extent_buffer_get(b); + atomic_inc(&b->refs); } level = btrfs_header_level(b); /* @@ -2785,12 +2755,10 @@ again: } while (b) { + int dec = 0; + level = btrfs_header_level(b); - /* - * setup the path here so we can release it under lock - * contention with the cow code - */ if (cow) { bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); @@ -2861,73 +2829,7 @@ cow_done: if (ret < 0) goto done; - if (level != 0) { - int dec = 0; - if (ret && slot > 0) { - dec = 1; - slot -= 1; - } - p->slots[level] = slot; - err = setup_nodes_for_search(trans, root, p, b, level, - ins_len, &write_lock_level); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } - b = p->nodes[level]; - slot = p->slots[level]; - - /* - * slot 0 is special, if we change the key - * we have to update the parent pointer - * which means we must have a write lock - * on the parent - */ - if (slot == 0 && ins_len && - write_lock_level < level + 1) { - write_lock_level = level + 1; - btrfs_release_path(p); - goto again; - } - - unlock_up(p, level, lowest_unlock, - min_write_lock_level, &write_lock_level); - - if (level == lowest_level) { - if (dec) - p->slots[level]++; - goto done; - } - - err = read_block_for_search(root, p, &b, level, - slot, key); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } - - if (!p->skip_locking) { - level = btrfs_header_level(b); - if (level <= write_lock_level) { - if (!btrfs_try_tree_write_lock(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_lock(b); - } - p->locks[level] = BTRFS_WRITE_LOCK; - } else { - if (!btrfs_tree_read_lock_atomic(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_read_lock(b); - } - p->locks[level] = BTRFS_READ_LOCK; - } - p->nodes[level] = b; - } - } else { + if (level == 0) { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(b) < ins_len) { @@ -2952,6 +2854,67 @@ cow_done: min_write_lock_level, NULL); goto done; } + if (ret && slot > 0) { + dec = 1; + slot--; + } + p->slots[level] = slot; + err = setup_nodes_for_search(trans, root, p, b, level, ins_len, + &write_lock_level); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; + goto done; + } + b = p->nodes[level]; + slot = p->slots[level]; + + /* + * Slot 0 is special, if we change the key we have to update + * the parent pointer which means we must have a write lock on + * the parent + */ + if (slot == 0 && ins_len && write_lock_level < level + 1) { + write_lock_level = level + 1; + btrfs_release_path(p); + goto again; + } + + unlock_up(p, level, lowest_unlock, min_write_lock_level, + &write_lock_level); + + if (level == lowest_level) { + if (dec) + p->slots[level]++; + goto done; + } + + err = read_block_for_search(root, p, &b, level, slot, key); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; + goto done; + } + + if (!p->skip_locking) { + level = btrfs_header_level(b); + if (level <= write_lock_level) { + if (!btrfs_try_tree_write_lock(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_lock(b); + } + p->locks[level] = BTRFS_WRITE_LOCK; + } else { + if (!btrfs_tree_read_lock_atomic(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_read_lock(b); + } + p->locks[level] = BTRFS_READ_LOCK; + } + p->nodes[level] = b; + } } ret = 1; done: @@ -3008,6 +2971,8 @@ again: p->locks[level] = BTRFS_READ_LOCK; while (b) { + int dec = 0; + level = btrfs_header_level(b); p->nodes[level] = b; @@ -3028,47 +2993,45 @@ again: if (ret < 0) goto done; - if (level != 0) { - int dec = 0; - if (ret && slot > 0) { - dec = 1; - slot -= 1; - } + if (level == 0) { p->slots[level] = slot; unlock_up(p, level, lowest_unlock, 0, NULL); + goto done; + } - if (level == lowest_level) { - if (dec) - p->slots[level]++; - goto done; - } + if (ret && slot > 0) { + dec = 1; + slot--; + } + p->slots[level] = slot; + unlock_up(p, level, lowest_unlock, 0, NULL); - err = read_block_for_search(root, p, &b, level, - slot, key); - if (err == -EAGAIN) - goto again; - if (err) { - ret = err; - goto done; - } + if (level == lowest_level) { + if (dec) + p->slots[level]++; + goto done; + } - level = btrfs_header_level(b); - if (!btrfs_tree_read_lock_atomic(b)) { - btrfs_set_path_blocking(p); - btrfs_tree_read_lock(b); - } - b = tree_mod_log_rewind(fs_info, p, b, time_seq); - if (!b) { - ret = -ENOMEM; - goto done; - } - p->locks[level] = BTRFS_READ_LOCK; - p->nodes[level] = b; - } else { - p->slots[level] = slot; - unlock_up(p, level, lowest_unlock, 0, NULL); + err = read_block_for_search(root, p, &b, level, slot, key); + if (err == -EAGAIN) + goto again; + if (err) { + ret = err; + goto done; + } + + level = btrfs_header_level(b); + if (!btrfs_tree_read_lock_atomic(b)) { + btrfs_set_path_blocking(p); + btrfs_tree_read_lock(b); + } + b = tree_mod_log_rewind(fs_info, p, b, time_seq); + if (!b) { + ret = -ENOMEM; goto done; } + p->locks[level] = BTRFS_READ_LOCK; + p->nodes[level] = b; } ret = 1; done: @@ -3433,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, free_extent_buffer(old); add_root_to_dirty_list(root); - extent_buffer_get(c); + atomic_inc(&c->refs); path->nodes[level] = c; path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; path->slots[level] = 0; @@ -4966,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, root_sub_used(root, leaf->len); - extent_buffer_get(leaf); + atomic_inc(&leaf->refs); btrfs_free_tree_block(trans, root, leaf, 0, 1); free_extent_buffer_stale(leaf); } @@ -5047,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, * for possible call to del_ptr below */ slot = path->slots[1]; - extent_buffer_get(leaf); + atomic_inc(&leaf->refs); btrfs_set_path_blocking(path); wret = push_leaf_left(trans, root, path, 1, 1, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 19d669d12ca1..b2e8fd8a8e59 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -28,6 +28,7 @@ #include <linux/dynamic_debug.h> #include <linux/refcount.h> #include <linux/crc32c.h> +#include "extent-io-tree.h" #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" @@ -38,7 +39,7 @@ struct btrfs_transaction; struct btrfs_pending_snapshot; struct btrfs_delayed_ref_root; struct btrfs_space_info; -struct btrfs_block_group_cache; +struct btrfs_block_group; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; @@ -56,9 +57,9 @@ struct btrfs_ref; * filesystem data as well that can be used to read data in order to repair * read errors on other disks. * - * Current value is derived from RAID1 with 2 copies. + * Current value is derived from RAID1C4 with 4 copies. */ -#define BTRFS_MAX_MIRRORS (2 + 1) +#define BTRFS_MAX_MIRRORS (4 + 1) #define BTRFS_MAX_LEVEL 8 @@ -291,7 +292,8 @@ struct btrfs_super_block { BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ - BTRFS_FEATURE_INCOMPAT_METADATA_UUID) + BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ + BTRFS_FEATURE_INCOMPAT_RAID1C34) #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) @@ -413,7 +415,7 @@ struct btrfs_free_cluster { /* We did a full search and couldn't create a cluster */ bool fragmented; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; /* * when a cluster is allocated from a block group, we put the * cluster onto a list in the block group so that it can @@ -476,8 +478,8 @@ struct btrfs_swapfile_pin { void *ptr; struct inode *inode; /* - * If true, ptr points to a struct btrfs_block_group_cache. Otherwise, - * ptr points to a struct btrfs_device. + * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr + * points to a struct btrfs_device. */ bool is_block_group; }; @@ -722,7 +724,6 @@ struct btrfs_fs_info { struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_write_workers; struct btrfs_workqueue *endio_freespace_worker; - struct btrfs_workqueue *submit_workers; struct btrfs_workqueue *caching_workers; struct btrfs_workqueue *readahead_workers; @@ -734,8 +735,6 @@ struct btrfs_fs_info { struct btrfs_workqueue *fixup_workers; struct btrfs_workqueue *delayed_workers; - /* the extent workers do delayed refs on the extent allocation tree */ - struct btrfs_workqueue *extent_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; u32 thread_pool_size; @@ -1521,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, } /* struct btrfs_block_group_item */ -BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item, used, 64); -BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, +BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); -BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); -BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, +BTRFS_SETGET_FUNCS(block_group_chunk_objectid, struct btrfs_block_group_item, chunk_objectid, 64); -BTRFS_SETGET_FUNCS(disk_block_group_flags, +BTRFS_SETGET_FUNCS(block_group_flags, struct btrfs_block_group_item, flags, 64); -BTRFS_SETGET_STACK_FUNCS(block_group_flags, +BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags, struct btrfs_block_group_item, flags, 64); /* struct btrfs_free_space_info */ @@ -2165,6 +2164,9 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, int btrfs_super_csum_size(const struct btrfs_super_block *s); const char *btrfs_super_csum_name(u16 csum_type); +const char *btrfs_super_csum_driver(u16 csum_type); +size_t __const btrfs_get_num_csums(void); + /* * The leaf data grows from end-to-front in the node. @@ -2399,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes); -void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache); +void btrfs_free_excluded_extents(struct btrfs_block_group *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, @@ -2455,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); -void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); +void btrfs_get_block_group_trimming(struct btrfs_block_group *cache); +void btrfs_put_block_group_trimming(struct btrfs_block_group *cache); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); enum btrfs_reserve_flush_enum { @@ -2489,8 +2491,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, int nitems, bool use_global_rsv); void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); -void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, - bool qgroup_free); +void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); @@ -2510,7 +2511,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int level, int *slot); -int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); +int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type); @@ -2570,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, void btrfs_release_path(struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); -void btrfs_set_path_blocking(struct btrfs_path *p); -void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int slot, int nr); @@ -2873,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode); int __init btrfs_init_cachep(void); void __cold btrfs_destroy_cachep(void); struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new, - struct btrfs_path *path); + struct btrfs_root *root, struct btrfs_path *path); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *was_new); + struct btrfs_root *root); struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 end, int create); @@ -2912,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int btrfs_ioctl_get_supported_features(void __user *arg); void btrfs_sync_inode_flags_to_i_flags(struct inode *inode); -int btrfs_is_empty_uuid(u8 *uuid); +int __pure btrfs_is_empty_uuid(u8 *uuid); int btrfs_defrag_file(struct inode *inode, struct file *file, struct btrfs_ioctl_defrag_range_args *range, u64 newer_than, unsigned long max_pages); @@ -3146,7 +3144,7 @@ __cold void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, unsigned int line, int errno, const char *fmt, ...); -const char *btrfs_decode_error(int errno); +const char * __attribute_const__ btrfs_decode_error(int errno); __cold void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index d949d7d2abed..4cdac4d834f5 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) unsigned nr_extents; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; int ret = 0; - bool delalloc_lock = true; /* * If we are a free space inode we need to not flush since we will be in @@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) */ if (btrfs_is_free_space_inode(inode)) { flush = BTRFS_RESERVE_NO_FLUSH; - delalloc_lock = false; } else { if (current->journal_info) flush = BTRFS_RESERVE_FLUSH_LIMIT; @@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) schedule_timeout(1); } - if (delalloc_lock) - mutex_lock(&inode->delalloc_mutex); - num_bytes = ALIGN(num_bytes, fs_info->sectorsize); /* @@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) &qgroup_reserve); ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true); if (ret) - goto out_fail; + return ret; ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush); - if (ret) - goto out_qgroup; + if (ret) { + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve); + return ret; + } /* * Now we need to update our outstanding extents and csum bytes _first_ @@ -375,16 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) block_rsv->qgroup_rsv_reserved += qgroup_reserve; spin_unlock(&block_rsv->lock); - if (delalloc_lock) - mutex_unlock(&inode->delalloc_mutex); return 0; -out_qgroup: - btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve); -out_fail: - btrfs_inode_rsv_release(inode, true); - if (delalloc_lock) - mutex_unlock(&inode->delalloc_mutex); - return ret; } /** @@ -418,7 +406,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, * btrfs_delalloc_release_extents - release our outstanding_extents * @inode: the inode to balance the reservation for. * @num_bytes: the number of bytes we originally reserved with - * @qgroup_free: do we need to free qgroup meta reservation or convert them. * * When we reserve space we increase outstanding_extents for the extents we may * add. Once we've set the range as delalloc or created our ordered extents we @@ -426,8 +413,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, * temporarily tracked outstanding_extents. This _must_ be used in conjunction * with btrfs_delalloc_reserve_metadata. */ -void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, - bool qgroup_free) +void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes) { struct btrfs_fs_info *fs_info = inode->root->fs_info; unsigned num_extents; @@ -441,7 +427,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, if (btrfs_is_testing(fs_info)) return; - btrfs_inode_rsv_release(inode, qgroup_free); + btrfs_inode_rsv_release(inode, true); } /** diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 1f7f39b10bd0..d3e15e1d4a91 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -12,6 +12,7 @@ #include "transaction.h" #include "ctree.h" #include "qgroup.h" +#include "locking.h" #define BTRFS_DELAYED_WRITEBACK 512 #define BTRFS_DELAYED_BACKGROUND 128 @@ -1367,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, return -ENOMEM; async_work->delayed_root = delayed_root; - btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, - btrfs_async_run_delayed_root, NULL, NULL); + btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, + NULL); async_work->nr = nr; btrfs_queue_work(fs_info->delayed_workers, &async_work->work); @@ -1949,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) } inode_id = delayed_nodes[n - 1]->inode_id + 1; - - for (i = 0; i < n; i++) - refcount_inc(&delayed_nodes[i]->refs); + for (i = 0; i < n; i++) { + /* + * Don't increase refs in case the node is dead and + * about to be removed from the tree in the loop below + */ + if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) + delayed_nodes[i] = NULL; + } spin_unlock(&root->inode_lock); for (i = 0; i < n; i++) { + if (!delayed_nodes[i]) + continue; __btrfs_kill_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]); } diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 48890826b5e6..f639dde2a679 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data) return 0; } -int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) +int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) { if (!dev_replace->is_valid) return 0; diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 78c5d8f1adda..60b70dacc299 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info); int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info); -int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); +int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 044981cf6df9..e0edfdc9c82b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -205,7 +205,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { - struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; @@ -213,7 +212,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (em) { - em->bdev = fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); goto out; } @@ -228,7 +226,6 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode, em->len = (u64)-1; em->block_len = (u64)-1; em->block_start = 0; - em->bdev = fs_info->fs_devices->latest_bdev; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); @@ -352,6 +349,9 @@ static bool btrfs_supported_super_csum(u16 csum_type) { switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: + case BTRFS_CSUM_TYPE_XXHASH: + case BTRFS_CSUM_TYPE_SHA256: + case BTRFS_CSUM_TYPE_BLAKE2: return true; default: return false; @@ -545,9 +545,11 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) ret = btrfs_check_leaf_full(eb); if (ret < 0) { + btrfs_print_tree(eb, 0); btrfs_err(fs_info, "block=%llu write time tree block corruption detected", eb->start); + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); return ret; } write_extent_buffer(eb, result, 0, csum_size); @@ -608,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, /* the pending IO might have been the only thing that kept this buffer * in memory. Make sure we have a ref for all this other checks */ - extent_buffer_get(eb); + atomic_inc(&eb->refs); reads_done = atomic_dec_and_test(&eb->io_pages); if (!reads_done) @@ -706,43 +708,31 @@ static void end_workqueue_bio(struct bio *bio) struct btrfs_end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; struct btrfs_workqueue *wq; - btrfs_work_func_t func; fs_info = end_io_wq->info; end_io_wq->status = bio->bi_status; if (bio_op(bio) == REQ_OP_WRITE) { - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) wq = fs_info->endio_meta_write_workers; - func = btrfs_endio_meta_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } } else { - if (unlikely(end_io_wq->metadata == - BTRFS_WQ_ENDIO_DIO_REPAIR)) { + if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) wq = fs_info->endio_repair_workers; - func = btrfs_endio_repair_helper; - } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { + else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) wq = fs_info->endio_raid56_workers; - func = btrfs_endio_raid56_helper; - } else if (end_io_wq->metadata) { + else if (end_io_wq->metadata) wq = fs_info->endio_meta_workers; - func = btrfs_endio_meta_helper; - } else { + else wq = fs_info->endio_workers; - func = btrfs_endio_helper; - } } - btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); + btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); btrfs_queue_work(wq, &end_io_wq->work); } @@ -803,8 +793,13 @@ static void run_one_async_done(struct btrfs_work *work) return; } - ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, - async->mirror_num, 1); + /* + * All of the bios that pass through here are from async helpers. + * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. + * This changes nothing when cgroups aren't in use. + */ + async->bio->bi_opf |= REQ_CGROUP_PUNT; + ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); if (ret) { async->bio->bi_status = ret; bio_endio(async->bio); @@ -835,8 +830,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; - btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, - run_one_async_done, run_one_async_free); + btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, + run_one_async_free); async->bio_offset = bio_offset; @@ -904,12 +899,12 @@ static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, BTRFS_WQ_ENDIO_METADATA); if (ret) goto out_w_error; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); } else if (!async) { ret = btree_csum_one_bio(bio); if (ret) goto out_w_error; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); } else { /* * kthread helpers are used to submit writes so that @@ -1657,8 +1652,8 @@ static void end_workqueue_fn(struct btrfs_work *work) bio->bi_status = end_io_wq->status; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; - kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); bio_endio(bio); + kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); } static int cleaner_kthread(void *arg) @@ -1753,7 +1748,7 @@ static int transaction_kthread(void *arg) } now = ktime_get_seconds(); - if (cur->state < TRANS_STATE_BLOCKED && + if (cur->state < TRANS_STATE_COMMIT_START && !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { @@ -1792,18 +1787,18 @@ sleep: } /* - * this will find the highest generation in the array of - * root backups. The index of the highest array is returned, - * or -1 if we can't find anything. + * This will find the highest generation in the array of root backups. The + * index of the highest array is returned, or -EINVAL if we can't find + * anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it. */ -static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) +static int find_newest_super_backup(struct btrfs_fs_info *info) { + const u64 newest_gen = btrfs_super_generation(info->super_copy); u64 cur; - int newest_index = -1; struct btrfs_root_backup *root_backup; int i; @@ -1811,37 +1806,10 @@ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) root_backup = info->super_copy->super_roots + i; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) - newest_index = i; + return i; } - /* check to see if we actually wrapped around */ - if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { - root_backup = info->super_copy->super_roots; - cur = btrfs_backup_tree_root_gen(root_backup); - if (cur == newest_gen) - newest_index = 0; - } - return newest_index; -} - - -/* - * find the oldest backup so we know where to store new entries - * in the backup array. This will set the backup_root_index - * field in the fs_info struct - */ -static void find_oldest_super_backup(struct btrfs_fs_info *info, - u64 newest_gen) -{ - int newest_index = -1; - - newest_index = find_newest_super_backup(info, newest_gen); - /* if there was garbage in there, just move along */ - if (newest_index == -1) { - info->backup_root_index = 0; - } else { - info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; - } + return -EINVAL; } /* @@ -1851,22 +1819,8 @@ static void find_oldest_super_backup(struct btrfs_fs_info *info, */ static void backup_super_roots(struct btrfs_fs_info *info) { - int next_backup; + const int next_backup = info->backup_root_index; struct btrfs_root_backup *root_backup; - int last_backup; - - next_backup = info->backup_root_index; - last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % - BTRFS_NUM_BACKUP_ROOTS; - - /* - * just overwrite the last backup if we're at the same generation - * this happens only at umount - */ - root_backup = info->super_for_commit->super_roots + last_backup; - if (btrfs_backup_tree_root_gen(root_backup) == - btrfs_header_generation(info->tree_root->node)) - next_backup = last_backup; root_backup = info->super_for_commit->super_roots + next_backup; @@ -1939,40 +1893,31 @@ static void backup_super_roots(struct btrfs_fs_info *info) } /* - * this copies info out of the root backup array and back into - * the in-memory super block. It is meant to help iterate through - * the array, so you send it the number of backups you've already - * tried and the last backup index you used. + * read_backup_root - Reads a backup root based on the passed priority. Prio 0 + * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots * - * this returns -1 when it has tried all the backups + * fs_info - filesystem whose backup roots need to be read + * priority - priority of backup root required + * + * Returns backup root index on success and -EINVAL otherwise. */ -static noinline int next_root_backup(struct btrfs_fs_info *info, - struct btrfs_super_block *super, - int *num_backups_tried, int *backup_index) +static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) { + int backup_index = find_newest_super_backup(fs_info); + struct btrfs_super_block *super = fs_info->super_copy; struct btrfs_root_backup *root_backup; - int newest = *backup_index; - - if (*num_backups_tried == 0) { - u64 gen = btrfs_super_generation(super); - newest = find_newest_super_backup(info, gen); - if (newest == -1) - return -1; + if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { + if (priority == 0) + return backup_index; - *backup_index = newest; - *num_backups_tried = 1; - } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { - /* we've tried all the backups, all done */ - return -1; + backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; + backup_index %= BTRFS_NUM_BACKUP_ROOTS; } else { - /* jump to the next oldest backup */ - newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % - BTRFS_NUM_BACKUP_ROOTS; - *backup_index = newest; - *num_backups_tried += 1; + return -EINVAL; } - root_backup = super->super_roots + newest; + + root_backup = super->super_roots + backup_index; btrfs_set_super_generation(super, btrfs_backup_tree_root_gen(root_backup)); @@ -1982,12 +1927,13 @@ static noinline int next_root_backup(struct btrfs_fs_info *info, btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); /* - * fixme: the total bytes and num_devices need to match or we should + * Fixme: the total bytes and num_devices need to match or we should * need a fsck */ btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); - return 0; + + return backup_index; } /* helper to cleanup workers */ @@ -2002,13 +1948,11 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->rmw_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); - btrfs_destroy_workqueue(fs_info->submit_workers); btrfs_destroy_workqueue(fs_info->delayed_workers); btrfs_destroy_workqueue(fs_info->caching_workers); btrfs_destroy_workqueue(fs_info->readahead_workers); btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); - btrfs_destroy_workqueue(fs_info->extent_workers); /* * Now that all other work queues are destroyed, we can safely destroy * the queues used for metadata I/O, since tasks from those other work @@ -2029,7 +1973,7 @@ static void free_root_extent_buffers(struct btrfs_root *root) } /* helper to cleanup tree roots */ -static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) +static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) { free_root_extent_buffers(info->tree_root); @@ -2038,7 +1982,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) free_root_extent_buffers(info->csum_root); free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); - if (chunk_root) + if (free_chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); } @@ -2168,16 +2112,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->caching_workers = btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); - /* - * a higher idle thresh on the submit workers makes it much more - * likely that bios will be send down in a sane order to the - * devices - */ - fs_info->submit_workers = - btrfs_alloc_workqueue(fs_info, "submit", flags, - min_t(u64, fs_devices->num_devices, - max_active), 64); - fs_info->fixup_workers = btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); @@ -2214,13 +2148,9 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, max_active, 2); fs_info->qgroup_rescan_workers = btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); - fs_info->extent_workers = - btrfs_alloc_workqueue(fs_info, "extent-refs", flags, - min_t(u64, fs_devices->num_devices, - max_active), 8); if (!(fs_info->workers && fs_info->delalloc_workers && - fs_info->submit_workers && fs_info->flush_workers && + fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_meta_write_workers && fs_info->endio_repair_workers && @@ -2228,7 +2158,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, fs_info->endio_freespace_worker && fs_info->rmw_workers && fs_info->caching_workers && fs_info->readahead_workers && fs_info->fixup_workers && fs_info->delayed_workers && - fs_info->extent_workers && fs_info->qgroup_rescan_workers)) { return -ENOMEM; } @@ -2239,13 +2168,13 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) { struct crypto_shash *csum_shash; - const char *csum_name = btrfs_super_csum_name(csum_type); + const char *csum_driver = btrfs_super_csum_driver(csum_type); - csum_shash = crypto_alloc_shash(csum_name, 0, 0); + csum_shash = crypto_alloc_shash(csum_driver, 0, 0); if (IS_ERR(csum_shash)) { btrfs_err(fs_info, "error allocating %s hash for checksum", - csum_name); + csum_driver); return PTR_ERR(csum_shash); } @@ -2595,7 +2524,101 @@ out: return ret; } -int open_ctree(struct super_block *sb, +static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) +{ + int backup_index = find_newest_super_backup(fs_info); + struct btrfs_super_block *sb = fs_info->super_copy; + struct btrfs_root *tree_root = fs_info->tree_root; + bool handle_error = false; + int ret = 0; + int i; + + for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { + u64 generation; + int level; + + if (handle_error) { + if (!IS_ERR(tree_root->node)) + free_extent_buffer(tree_root->node); + tree_root->node = NULL; + + if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) + break; + + free_root_pointers(fs_info, 0); + + /* + * Don't use the log in recovery mode, it won't be + * valid + */ + btrfs_set_super_log_root(sb, 0); + + /* We can't trust the free space cache either */ + btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); + + ret = read_backup_root(fs_info, i); + backup_index = ret; + if (ret < 0) + return ret; + } + generation = btrfs_super_generation(sb); + level = btrfs_super_root_level(sb); + tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), + generation, level, NULL); + if (IS_ERR(tree_root->node) || + !extent_buffer_uptodate(tree_root->node)) { + handle_error = true; + + if (IS_ERR(tree_root->node)) + ret = PTR_ERR(tree_root->node); + else if (!extent_buffer_uptodate(tree_root->node)) + ret = -EUCLEAN; + + btrfs_warn(fs_info, "failed to read tree root"); + continue; + } + + btrfs_set_root_node(&tree_root->root_item, tree_root->node); + tree_root->commit_root = btrfs_root_node(tree_root); + btrfs_set_root_refs(&tree_root->root_item, 1); + + /* + * No need to hold btrfs_root::objectid_mutex since the fs + * hasn't been fully initialised and we are the only user + */ + ret = btrfs_find_highest_objectid(tree_root, + &tree_root->highest_objectid); + if (ret < 0) { + handle_error = true; + continue; + } + + ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); + + ret = btrfs_read_roots(fs_info); + if (ret < 0) { + handle_error = true; + continue; + } + + /* All successful */ + fs_info->generation = generation; + fs_info->last_trans_committed = generation; + + /* Always begin writing backup roots after the one being used */ + if (backup_index < 0) { + fs_info->backup_root_index = 0; + } else { + fs_info->backup_root_index = backup_index + 1; + fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; + } + break; + } + + return ret; +} + +int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) { @@ -2613,8 +2636,6 @@ int open_ctree(struct super_block *sb, struct btrfs_root *chunk_root; int ret; int err = -EINVAL; - int num_backups_tried = 0; - int backup_index = 0; int clear_free_space_tree = 0; int level; @@ -2885,13 +2906,6 @@ int open_ctree(struct super_block *sb, set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); /* - * run through our array of backup supers and setup - * our ring pointer to the oldest one - */ - generation = btrfs_super_generation(disk_super); - find_oldest_super_backup(fs_info, generation); - - /* * In the long term, we'll store the compression type in the super * block, and it'll be used for per file compression control. */ @@ -3037,44 +3051,9 @@ int open_ctree(struct super_block *sb, goto fail_tree_roots; } -retry_root_backup: - generation = btrfs_super_generation(disk_super); - level = btrfs_super_root_level(disk_super); - - tree_root->node = read_tree_block(fs_info, - btrfs_super_root(disk_super), - generation, level, NULL); - if (IS_ERR(tree_root->node) || - !extent_buffer_uptodate(tree_root->node)) { - btrfs_warn(fs_info, "failed to read tree root"); - if (!IS_ERR(tree_root->node)) - free_extent_buffer(tree_root->node); - tree_root->node = NULL; - goto recovery_tree_root; - } - - btrfs_set_root_node(&tree_root->root_item, tree_root->node); - tree_root->commit_root = btrfs_root_node(tree_root); - btrfs_set_root_refs(&tree_root->root_item, 1); - - mutex_lock(&tree_root->objectid_mutex); - ret = btrfs_find_highest_objectid(tree_root, - &tree_root->highest_objectid); - if (ret) { - mutex_unlock(&tree_root->objectid_mutex); - goto recovery_tree_root; - } - - ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); - - mutex_unlock(&tree_root->objectid_mutex); - - ret = btrfs_read_roots(fs_info); + ret = init_tree_roots(fs_info); if (ret) - goto recovery_tree_root; - - fs_info->generation = generation; - fs_info->last_trans_committed = generation; + goto fail_tree_roots; ret = btrfs_verify_dev_extents(fs_info); if (ret) { @@ -3342,7 +3321,7 @@ fail_block_groups: btrfs_put_block_group_cache(fs_info); fail_tree_roots: - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: @@ -3369,24 +3348,6 @@ fail: btrfs_free_stripe_hash_table(fs_info); btrfs_close_devices(fs_info->fs_devices); return err; - -recovery_tree_root: - if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) - goto fail_tree_roots; - - free_root_pointers(fs_info, 0); - - /* don't use the log in recovery mode, it won't be valid */ - btrfs_set_super_log_root(disk_super, 0); - - /* we can't trust the free space cache either */ - btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); - - ret = next_root_backup(fs_info, fs_info->super_copy, - &num_backups_tried, &backup_index); - if (ret == -1) - goto fail_block_groups; - goto retry_root_backup; } ALLOW_ERROR_INJECTION(open_ctree, ERRNO); @@ -3980,7 +3941,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info) return btrfs_commit_transaction(trans); } -void close_ctree(struct btrfs_fs_info *fs_info) +void __cold close_ctree(struct btrfs_fs_info *fs_info) { int ret; @@ -4068,7 +4029,7 @@ void close_ctree(struct btrfs_fs_info *fs_info) btrfs_free_block_groups(fs_info); clear_bit(BTRFS_FS_OPEN, &fs_info->flags); - free_root_pointers(fs_info, 1); + free_root_pointers(fs_info, true); iput(fs_info->btree_inode); @@ -4445,7 +4406,7 @@ again: return 0; } -static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) +static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) { struct inode *inode; @@ -4462,12 +4423,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, struct btrfs_fs_info *fs_info) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; spin_lock(&cur_trans->dirty_bgs_lock); while (!list_empty(&cur_trans->dirty_bgs)) { cache = list_first_entry(&cur_trans->dirty_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, dirty_list); if (!list_empty(&cache->io_list)) { @@ -4495,7 +4456,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, */ while (!list_empty(&cur_trans->io_bgs)) { cache = list_first_entry(&cur_trans->io_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, io_list); list_del_init(&cache->io_list); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index a6958103d87e..76f123ebb292 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_clean_tree_block(struct extent_buffer *buf); -int open_ctree(struct super_block *sb, +int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options); -void close_ctree(struct btrfs_fs_info *fs_info); +void __cold close_ctree(struct btrfs_fs_info *fs_info); int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ddf28ecf17f9..72e312cae69d 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(sb, &key, root, NULL); + inode = btrfs_iget(sb, &key, root); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail; @@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL)); + return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h new file mode 100644 index 000000000000..a3febe746c79 --- /dev/null +++ b/fs/btrfs/extent-io-tree.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef BTRFS_EXTENT_IO_TREE_H +#define BTRFS_EXTENT_IO_TREE_H + +struct extent_changeset; +struct io_failure_record; + +/* Bits for the extent state */ +#define EXTENT_DIRTY (1U << 0) +#define EXTENT_UPTODATE (1U << 1) +#define EXTENT_LOCKED (1U << 2) +#define EXTENT_NEW (1U << 3) +#define EXTENT_DELALLOC (1U << 4) +#define EXTENT_DEFRAG (1U << 5) +#define EXTENT_BOUNDARY (1U << 6) +#define EXTENT_NODATASUM (1U << 7) +#define EXTENT_CLEAR_META_RESV (1U << 8) +#define EXTENT_NEED_WAIT (1U << 9) +#define EXTENT_DAMAGED (1U << 10) +#define EXTENT_NORESERVE (1U << 11) +#define EXTENT_QGROUP_RESERVED (1U << 12) +#define EXTENT_CLEAR_DATA_RESV (1U << 13) +#define EXTENT_DELALLOC_NEW (1U << 14) +#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ + EXTENT_CLEAR_DATA_RESV) +#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) + +/* + * Redefined bits above which are used only in the device allocation tree, + * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV + * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit + * manipulation functions + */ +#define CHUNK_ALLOCATED EXTENT_DIRTY +#define CHUNK_TRIMMED EXTENT_DEFRAG + +enum { + IO_TREE_FS_INFO_FREED_EXTENTS0, + IO_TREE_FS_INFO_FREED_EXTENTS1, + IO_TREE_INODE_IO, + IO_TREE_INODE_IO_FAILURE, + IO_TREE_RELOC_BLOCKS, + IO_TREE_TRANS_DIRTY_PAGES, + IO_TREE_ROOT_DIRTY_LOG_PAGES, + IO_TREE_SELFTEST, +}; + +struct extent_io_tree { + struct rb_root state; + struct btrfs_fs_info *fs_info; + void *private_data; + u64 dirty_bytes; + bool track_uptodate; + + /* Who owns this io tree, should be one of IO_TREE_* */ + u8 owner; + + spinlock_t lock; + const struct extent_io_ops *ops; +}; + +struct extent_state { + u64 start; + u64 end; /* inclusive */ + struct rb_node rb_node; + + /* ADD NEW ELEMENTS AFTER THIS */ + wait_queue_head_t wq; + refcount_t refs; + unsigned state; + + struct io_failure_record *failrec; + +#ifdef CONFIG_BTRFS_DEBUG + struct list_head leak_list; +#endif +}; + +int __init extent_state_cache_init(void); +void __cold extent_state_cache_exit(void); + +void extent_io_tree_init(struct btrfs_fs_info *fs_info, + struct extent_io_tree *tree, unsigned int owner, + void *private_data); +void extent_io_tree_release(struct extent_io_tree *tree); + +int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + struct extent_state **cached); + +static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) +{ + return lock_extent_bits(tree, start, end, NULL); +} + +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); + +int __init extent_io_init(void); +void __cold extent_io_exit(void); + +u64 count_range_bits(struct extent_io_tree *tree, + u64 *start, u64 search_end, + u64 max_bytes, unsigned bits, int contig); + +void free_extent_state(struct extent_state *state); +int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int filled, + struct extent_state *cached_state); +int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, struct extent_changeset *changeset); +int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int wake, int delete, + struct extent_state **cached); +int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, int wake, int delete, + struct extent_state **cached, gfp_t mask, + struct extent_changeset *changeset); + +static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) +{ + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); +} + +static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) +{ + return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + GFP_NOFS, NULL); +} + +static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, + u64 start, u64 end, struct extent_state **cached) +{ + return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, + GFP_ATOMIC, NULL); +} + +static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, + u64 end, unsigned bits) +{ + int wake = 0; + + if (bits & EXTENT_LOCKED) + wake = 1; + + return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); +} + +int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, struct extent_changeset *changeset); +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, u64 *failed_start, + struct extent_state **cached_state, gfp_t mask); +int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits); + +static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, + u64 end, unsigned bits) +{ + return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); +} + +static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state) +{ + return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, + cached_state, GFP_NOFS, NULL); +} + +static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, + NULL, mask); +} + +static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) +{ + return clear_extent_bit(tree, start, end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, cached); +} + +int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + unsigned bits, unsigned clear_bits, + struct extent_state **cached_state); + +static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, + u64 end, unsigned int extra_bits, + struct extent_state **cached_state) +{ + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, + NULL, cached_state, GFP_NOFS); +} + +static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state) +{ + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, + NULL, cached_state, GFP_NOFS); +} + +static inline int set_extent_new(struct extent_io_tree *tree, u64 start, + u64 end) +{ + return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, + GFP_NOFS); +} + +static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached_state, gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, + cached_state, mask); +} + +int find_first_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, unsigned bits, + struct extent_state **cached_state); +void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, unsigned bits); +int extent_invalidatepage(struct extent_io_tree *tree, + struct page *page, unsigned long offset); +bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, + u64 *end, u64 max_bytes, + struct extent_state **cached_state); + +/* This should be reworked in the future and put elsewhere. */ +int get_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record **failrec); +int set_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record *failrec); +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, + u64 end); +int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, + struct io_failure_record **failrec_ret); +int free_io_failure(struct extent_io_tree *failure_tree, + struct extent_io_tree *io_tree, + struct io_failure_record *rec); +int clean_io_failure(struct btrfs_fs_info *fs_info, + struct extent_io_tree *failure_tree, + struct extent_io_tree *io_tree, u64 start, + struct page *page, u64 ino, unsigned int pg_offset); + +#endif /* BTRFS_EXTENT_IO_TREE_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 49cb26fa7c63..153f71a5bba9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -54,7 +54,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); -static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) +static int block_group_bits(struct btrfs_block_group *cache, u64 bits) { return (cache->flags & bits) == bits; } @@ -70,13 +70,13 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, return 0; } -void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) +void btrfs_free_excluded_extents(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; u64 start, end; - start = cache->key.objectid; - end = start + cache->key.offset - 1; + start = cache->start; + end = start + cache->length - 1; clear_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_UPTODATE); @@ -1306,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, u64 *actual_bytes) { - int ret; + int ret = 0; u64 discarded_bytes = 0; + u64 end = bytenr + num_bytes; + u64 cur = bytenr; struct btrfs_bio *bbio = NULL; @@ -1316,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, * associated to its stripes that don't go away while we are discarding. */ btrfs_bio_counter_inc_blocked(fs_info); - /* Tell the block device(s) that the sectors can be discarded */ - ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes, - &bbio, 0); - /* Error condition is -ENOMEM */ - if (!ret) { - struct btrfs_bio_stripe *stripe = bbio->stripes; + while (cur < end) { + struct btrfs_bio_stripe *stripe; int i; + num_bytes = end - cur; + /* Tell the block device(s) that the sectors can be discarded */ + ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur, + &num_bytes, &bbio, 0); + /* + * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or + * -EOPNOTSUPP. For any such error, @num_bytes is not updated, + * thus we can't continue anyway. + */ + if (ret < 0) + goto out; + stripe = bbio->stripes; for (i = 0; i < bbio->num_stripes; i++, stripe++) { u64 bytes; struct request_queue *req_q; @@ -1341,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, stripe->physical, stripe->length, &bytes); - if (!ret) + if (!ret) { discarded_bytes += bytes; - else if (ret != -EOPNOTSUPP) - break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */ + } else if (ret != -EOPNOTSUPP) { + /* + * Logic errors or -ENOMEM, or -EIO, but + * unlikely to happen. + * + * And since there are two loops, explicitly + * go to out to avoid confusion. + */ + btrfs_put_bbio(bbio); + goto out; + } /* * Just in case we get back EOPNOTSUPP for some reason, @@ -1354,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ret = 0; } btrfs_put_bbio(bbio); + cur += num_bytes; } +out: btrfs_bio_counter_dec(fs_info); if (actual_bytes) @@ -2516,7 +2537,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; int readonly = 0; block_group = btrfs_lookup_block_group(fs_info, bytenr); @@ -2546,7 +2567,7 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 bytenr; spin_lock(&fs_info->block_group_cache_lock); @@ -2560,13 +2581,13 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) if (!cache) return 0; - bytenr = cache->key.objectid; + bytenr = cache->start; btrfs_put_block_group(cache); return bytenr; } -static int pin_down_extent(struct btrfs_block_group_cache *cache, +static int pin_down_extent(struct btrfs_block_group *cache, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_fs_info *fs_info = cache->fs_info; @@ -2590,13 +2611,12 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache, return 0; } -/* - * this function must be called within transaction - */ int btrfs_pin_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, int reserved) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; + + ASSERT(fs_info->running_transaction); cache = btrfs_lookup_block_group(fs_info, bytenr); BUG_ON(!cache); /* Logic error */ @@ -2613,7 +2633,7 @@ int btrfs_pin_extent(struct btrfs_fs_info *fs_info, int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret; cache = btrfs_lookup_block_group(fs_info, bytenr); @@ -2640,7 +2660,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { int ret; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_caching_control *caching_ctl; block_group = btrfs_lookup_block_group(fs_info, start); @@ -2652,7 +2672,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, if (!caching_ctl) { /* Logic error */ - BUG_ON(!btrfs_block_group_cache_done(block_group)); + BUG_ON(!btrfs_block_group_done(block_group)); ret = btrfs_remove_free_space(block_group, start, num_bytes); } else { mutex_lock(&caching_ctl->mutex); @@ -2717,7 +2737,7 @@ int btrfs_exclude_logged_extents(struct extent_buffer *eb) } static void -btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) +btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) { atomic_inc(&bg->reservations); } @@ -2726,14 +2746,14 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) { struct btrfs_caching_control *next; struct btrfs_caching_control *caching_ctl; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; down_write(&fs_info->commit_root_sem); list_for_each_entry_safe(caching_ctl, next, &fs_info->caching_block_groups, list) { cache = caching_ctl->block_group; - if (btrfs_block_group_cache_done(cache)) { + if (btrfs_block_group_done(cache)) { cache->last_byte_to_unpin = (u64)-1; list_del_init(&caching_ctl->list); btrfs_put_caching_control(caching_ctl); @@ -2785,7 +2805,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end, const bool return_free_space) { - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_space_info *space_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_free_cluster *cluster = NULL; @@ -2797,7 +2817,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, while (start <= end) { readonly = false; if (!cache || - start >= cache->key.objectid + cache->key.offset) { + start >= cache->start + cache->length) { if (cache) btrfs_put_block_group(cache); total_unpinned = 0; @@ -2810,7 +2830,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, empty_cluster <<= 1; } - len = cache->key.objectid + cache->key.offset - start; + len = cache->start + cache->length - start; len = min(len, end + 1 - start); if (start < cache->last_byte_to_unpin) { @@ -2880,7 +2900,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group, *tmp; + struct btrfs_block_group *block_group, *tmp; struct list_head *deleted_bgs; struct extent_io_tree *unpin; u64 start; @@ -2926,8 +2946,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) ret = -EROFS; if (!trans->aborted) ret = btrfs_discard_extent(fs_info, - block_group->key.objectid, - block_group->key.offset, + block_group->start, + block_group->length, &trimmed); list_del_init(&block_group->bg_list); @@ -3262,7 +3282,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, } if (last_ref && btrfs_header_generation(buf) == trans->transid) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, buf->start); @@ -3349,15 +3369,14 @@ enum btrfs_loop_type { }; static inline void -btrfs_lock_block_group(struct btrfs_block_group_cache *cache, +btrfs_lock_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) down_read(&cache->data_rwsem); } -static inline void -btrfs_grab_block_group(struct btrfs_block_group_cache *cache, +static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, int delalloc) { btrfs_get_block_group(cache); @@ -3365,12 +3384,12 @@ btrfs_grab_block_group(struct btrfs_block_group_cache *cache, down_read(&cache->data_rwsem); } -static struct btrfs_block_group_cache * -btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, +static struct btrfs_block_group *btrfs_lock_cluster( + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, int delalloc) { - struct btrfs_block_group_cache *used_bg = NULL; + struct btrfs_block_group *used_bg = NULL; spin_lock(&cluster->refill_lock); while (1) { @@ -3404,7 +3423,7 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, } static inline void -btrfs_release_block_group(struct btrfs_block_group_cache *cache, +btrfs_release_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) @@ -3475,12 +3494,12 @@ struct find_free_extent_ctl { * Return >0 to inform caller that we find nothing * Return 0 means we have found a location and set ffe_ctl->found_offset. */ -static int find_free_extent_clustered(struct btrfs_block_group_cache *bg, +static int find_free_extent_clustered(struct btrfs_block_group *bg, struct btrfs_free_cluster *last_ptr, struct find_free_extent_ctl *ffe_ctl, - struct btrfs_block_group_cache **cluster_bg_ret) + struct btrfs_block_group **cluster_bg_ret) { - struct btrfs_block_group_cache *cluster_bg; + struct btrfs_block_group *cluster_bg; u64 aligned_cluster; u64 offset; int ret; @@ -3493,7 +3512,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg, goto release_cluster; offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, - ffe_ctl->num_bytes, cluster_bg->key.objectid, + ffe_ctl->num_bytes, cluster_bg->start, &ffe_ctl->max_extent_size); if (offset) { /* We have a block, we're done */ @@ -3579,7 +3598,7 @@ refill_cluster: * Return 0 when we found an free extent and set ffe_ctrl->found_offset * Return -EAGAIN to inform caller that we need to re-search this block group */ -static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg, +static int find_free_extent_unclustered(struct btrfs_block_group *bg, struct btrfs_free_cluster *last_ptr, struct find_free_extent_ctl *ffe_ctl) { @@ -3781,7 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, { int ret = 0; struct btrfs_free_cluster *last_ptr = NULL; - struct btrfs_block_group_cache *block_group = NULL; + struct btrfs_block_group *block_group = NULL; struct find_free_extent_ctl ffe_ctl = {0}; struct btrfs_space_info *space_info; bool use_cluster = true; @@ -3904,7 +3923,7 @@ search: continue; btrfs_grab_block_group(block_group, delalloc); - ffe_ctl.search_start = block_group->key.objectid; + ffe_ctl.search_start = block_group->start; /* * this can happen if we end up cycling through all the @@ -3935,7 +3954,7 @@ search: } have_block_group: - ffe_ctl.cached = btrfs_block_group_cache_done(block_group); + ffe_ctl.cached = btrfs_block_group_done(block_group); if (unlikely(!ffe_ctl.cached)) { ffe_ctl.have_caching_bg = true; ret = btrfs_cache_block_group(block_group, 0); @@ -3951,7 +3970,7 @@ have_block_group: * lets look there */ if (last_ptr && use_cluster) { - struct btrfs_block_group_cache *cluster_bg = NULL; + struct btrfs_block_group *cluster_bg = NULL; ret = find_free_extent_clustered(block_group, last_ptr, &ffe_ctl, &cluster_bg); @@ -3984,7 +4003,7 @@ checks: /* move on to the next group */ if (ffe_ctl.search_start + num_bytes > - block_group->key.objectid + block_group->key.offset) { + block_group->start + block_group->length) { btrfs_add_free_space(block_group, ffe_ctl.found_offset, num_bytes); goto loop; @@ -4133,7 +4152,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int pin, int delalloc) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int ret = 0; cache = btrfs_lookup_block_group(fs_info, start); @@ -4366,7 +4385,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; /* @@ -5436,7 +5455,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, btrfs_assert_tree_locked(parent); parent_level = btrfs_header_level(parent); - extent_buffer_get(parent); + atomic_inc(&parent->refs); path->nodes[parent_level] = parent; path->slots[parent_level] = btrfs_header_nritems(parent); @@ -5480,7 +5499,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, */ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; u64 free_bytes = 0; int factor; @@ -5498,9 +5517,8 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) } factor = btrfs_bg_type_to_factor(block_group->flags); - free_bytes += (block_group->key.offset - - btrfs_block_group_used(&block_group->item)) * - factor; + free_bytes += (block_group->length - + block_group->used) * factor; spin_unlock(&block_group->lock); } @@ -5623,7 +5641,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) */ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) { - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_device *device; struct list_head *devices; u64 group_trimmed; @@ -5647,16 +5665,16 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) cache = btrfs_lookup_first_block_group(fs_info, range->start); for (; cache; cache = btrfs_next_block_group(cache)) { - if (cache->key.objectid >= range_end) { + if (cache->start >= range_end) { btrfs_put_block_group(cache); break; } - start = max(range->start, cache->key.objectid); - end = min(range_end, cache->key.objectid + cache->key.offset); + start = max(range->start, cache->start); + end = min(range_end, cache->start + cache->length); if (end - start >= range->minlen) { - if (!btrfs_block_group_cache_done(cache)) { + if (!btrfs_block_group_done(cache)) { ret = btrfs_cache_block_group(cache, 0); if (ret) { bg_failed++; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cceaf05aada2..eb8bd0258360 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -14,6 +14,7 @@ #include <linux/prefetch.h> #include <linux/cleancache.h> #include "extent_io.h" +#include "extent-io-tree.h" #include "extent_map.h" #include "ctree.h" #include "btrfs_inode.h" @@ -59,12 +60,23 @@ void btrfs_leak_debug_del(struct list_head *entry) spin_unlock_irqrestore(&leak_lock, flags); } -static inline -void btrfs_leak_debug_check(void) +static inline void btrfs_extent_buffer_leak_debug_check(void) { - struct extent_state *state; struct extent_buffer *eb; + while (!list_empty(&buffers)) { + eb = list_entry(buffers.next, struct extent_buffer, leak_list); + pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n", + eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); + list_del(&eb->leak_list); + kmem_cache_free(extent_buffer_cache, eb); + } +} + +static inline void btrfs_extent_state_leak_debug_check(void) +{ + struct extent_state *state; + while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", @@ -74,14 +86,6 @@ void btrfs_leak_debug_check(void) list_del(&state->leak_list); kmem_cache_free(extent_state_cache, state); } - - while (!list_empty(&buffers)) { - eb = list_entry(buffers.next, struct extent_buffer, leak_list); - pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n", - eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); - list_del(&eb->leak_list); - kmem_cache_free(extent_buffer_cache, eb); - } } #define btrfs_debug_check_extent_io_range(tree, start, end) \ @@ -105,7 +109,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, #else #define btrfs_leak_debug_add(new, head) do {} while (0) #define btrfs_leak_debug_del(entry) do {} while (0) -#define btrfs_leak_debug_check() do {} while (0) +#define btrfs_extent_buffer_leak_debug_check() do {} while (0) +#define btrfs_extent_state_leak_debug_check() do {} while (0) #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #endif @@ -196,19 +201,23 @@ static int __must_check flush_write_bio(struct extent_page_data *epd) return ret; } -int __init extent_io_init(void) +int __init extent_state_cache_init(void) { extent_state_cache = kmem_cache_create("btrfs_extent_state", sizeof(struct extent_state), 0, SLAB_MEM_SPREAD, NULL); if (!extent_state_cache) return -ENOMEM; + return 0; +} +int __init extent_io_init(void) +{ extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", sizeof(struct extent_buffer), 0, SLAB_MEM_SPREAD, NULL); if (!extent_buffer_cache) - goto free_state_cache; + return -ENOMEM; if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_io_bio, bio), @@ -226,23 +235,24 @@ free_bioset: free_buffer_cache: kmem_cache_destroy(extent_buffer_cache); extent_buffer_cache = NULL; + return -ENOMEM; +} -free_state_cache: +void __cold extent_state_cache_exit(void) +{ + btrfs_extent_state_leak_debug_check(); kmem_cache_destroy(extent_state_cache); - extent_state_cache = NULL; - return -ENOMEM; } void __cold extent_io_exit(void) { - btrfs_leak_debug_check(); + btrfs_extent_buffer_leak_debug_check(); /* * Make sure all delayed rcu free are flushed before we * destroy caches. */ rcu_barrier(); - kmem_cache_destroy(extent_state_cache); kmem_cache_destroy(extent_buffer_cache); bioset_exit(&btrfs_bioset); } @@ -1676,9 +1686,9 @@ out: * * true is returned if we find something, false if nothing was in the tree */ -static noinline bool find_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes, - struct extent_state **cached_state) +bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, + u64 *end, u64 max_bytes, + struct extent_state **cached_state) { struct rb_node *node; struct extent_state *state; @@ -1796,8 +1806,8 @@ again: /* step one, find a bunch of delalloc bytes starting at start */ delalloc_start = *start; delalloc_end = 0; - found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, - max_bytes, &cached_state); + found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, + max_bytes, &cached_state); if (!found || delalloc_end <= *start) { *start = delalloc_start; *end = delalloc_end; @@ -1899,7 +1909,7 @@ static int __process_pages_contig(struct address_space *mapping, if (page_ops & PAGE_SET_PRIVATE2) SetPagePrivate2(pages[i]); - if (pages[i] == locked_page) { + if (locked_page && pages[i] == locked_page) { put_page(pages[i]); pages_locked++; continue; @@ -2014,8 +2024,8 @@ out: * set the private field for a given byte offset in the tree. If there isn't * an extent_state there already, this does nothing. */ -static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record *failrec) +int set_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record *failrec) { struct rb_node *node; struct extent_state *state; @@ -2042,8 +2052,8 @@ out: return ret; } -static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start, - struct io_failure_record **failrec) +int get_state_failrec(struct extent_io_tree *tree, u64 start, + struct io_failure_record **failrec) { struct rb_node *node; struct extent_state *state; @@ -2534,7 +2544,6 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, bio = btrfs_io_bio_alloc(1); bio->bi_end_io = endio_func; bio->bi_iter.bi_sector = failrec->logical >> 9; - bio_set_dev(bio, fs_info->fs_devices->latest_bdev); bio->bi_iter.bi_size = 0; bio->bi_private = data; @@ -2920,7 +2929,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size) * a contiguous page to the previous one * @size: portion of page that we want to write * @offset: starting offset in the page - * @bdev: attach newly created bios to this bdev * @bio_ret: must be valid pointer, newly allocated bio will be stored there * @end_io_func: end_io callback for new bio * @mirror_num: desired mirror to read/write @@ -2931,7 +2939,6 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, struct writeback_control *wbc, struct page *page, u64 offset, size_t size, unsigned long pg_offset, - struct block_device *bdev, struct bio **bio_ret, bio_end_io_t end_io_func, int mirror_num, @@ -2977,13 +2984,16 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, } bio = btrfs_bio_alloc(offset); - bio_set_dev(bio, bdev); bio_add_page(bio, page, page_size, pg_offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; bio->bi_write_hint = page->mapping->host->i_write_hint; bio->bi_opf = opf; if (wbc) { + struct block_device *bdev; + + bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev; + bio_set_dev(bio, bdev); wbc_init_bio(wbc, bio); wbc_account_cgroup_owner(wbc, page, page_size); } @@ -3065,7 +3075,6 @@ static int __do_readpage(struct extent_io_tree *tree, u64 block_start; u64 cur_end; struct extent_map *em; - struct block_device *bdev; int ret = 0; int nr = 0; size_t pg_offset = 0; @@ -3142,7 +3151,6 @@ static int __do_readpage(struct extent_io_tree *tree, offset = em->block_start + extent_offset; disk_io_size = iosize; } - bdev = em->bdev; block_start = em->block_start; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) block_start = EXTENT_MAP_HOLE; @@ -3232,7 +3240,7 @@ static int __do_readpage(struct extent_io_tree *tree, ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL, page, offset, disk_io_size, - pg_offset, bdev, bio, + pg_offset, bio, end_bio_extent_readpage, mirror_num, *bio_flags, this_bio_flag, @@ -3409,7 +3417,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, struct extent_page_data *epd, loff_t i_size, unsigned long nr_written, - unsigned int write_flags, int *nr_ret) + int *nr_ret) { struct extent_io_tree *tree = epd->tree; u64 start = page_offset(page); @@ -3420,11 +3428,11 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, u64 block_start; u64 iosize; struct extent_map *em; - struct block_device *bdev; size_t pg_offset = 0; size_t blocksize; int ret = 0; int nr = 0; + const unsigned int write_flags = wbc_to_write_flags(wbc); bool compressed; ret = btrfs_writepage_cow_fixup(page, start, page_end); @@ -3478,7 +3486,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, iosize = min(em_end - cur, end - cur + 1); iosize = ALIGN(iosize, blocksize); offset = em->block_start + extent_offset; - bdev = em->bdev; block_start = em->block_start; compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); free_extent_map(em); @@ -3520,7 +3527,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, page, offset, iosize, pg_offset, - bdev, &epd->bio, + &epd->bio, end_bio_extent_writepage, 0, 0, 0, false); if (ret) { @@ -3558,11 +3565,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, size_t pg_offset = 0; loff_t i_size = i_size_read(inode); unsigned long end_index = i_size >> PAGE_SHIFT; - unsigned int write_flags = 0; unsigned long nr_written = 0; - write_flags = wbc_to_write_flags(wbc); - trace___extent_writepage(page, inode, wbc); WARN_ON(!PageLocked(page)); @@ -3600,7 +3604,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } ret = __extent_writepage_io(inode, page, wbc, epd, - i_size, nr_written, write_flags, &nr); + i_size, nr_written, &nr); if (ret == 1) goto done_unlocked; @@ -3849,7 +3853,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, struct extent_page_data *epd) { struct btrfs_fs_info *fs_info = eb->fs_info; - struct block_device *bdev = fs_info->fs_devices->latest_bdev; struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; u64 offset = eb->start; u32 nritems; @@ -3884,7 +3887,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc, - p, offset, PAGE_SIZE, 0, bdev, + p, offset, PAGE_SIZE, 0, &epd->bio, end_bio_extent_buffer_writepage, 0, 0, 0, false); @@ -4121,7 +4124,7 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; - done_index = page->index; + done_index = page->index + 1; /* * At this point we hold neither the i_pages lock nor * the page lock: the page may be truncated or @@ -4156,16 +4159,6 @@ retry: ret = __extent_writepage(page, wbc, epd); if (ret < 0) { - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - done_index = page->index + 1; done = 1; break; } @@ -4240,8 +4233,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, .nr_to_write = nr_pages * 2, .range_start = start, .range_end = end + 1, + /* We're called from an async helper function */ + .punt_to_cgroup = 1, + .no_cgroup_owner = 1, }; + wbc_attach_fdatawrite_inode(&wbc_writepages, inode); while (start <= end) { page = find_get_page(mapping, start >> PAGE_SHIFT); if (clear_page_dirty_for_io(page)) @@ -4256,11 +4253,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, } ASSERT(ret <= 0); - if (ret < 0) { + if (ret == 0) + ret = flush_write_bio(&epd); + else end_write_bio(&epd, ret); - return ret; - } - ret = flush_write_bio(&epd); + + wbc_detach_inode(&wbc_writepages); return ret; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index cf3424d58fec..a8551a1f56e2 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -7,35 +7,6 @@ #include <linux/refcount.h> #include "ulist.h" -/* bits for the extent state */ -#define EXTENT_DIRTY (1U << 0) -#define EXTENT_UPTODATE (1U << 1) -#define EXTENT_LOCKED (1U << 2) -#define EXTENT_NEW (1U << 3) -#define EXTENT_DELALLOC (1U << 4) -#define EXTENT_DEFRAG (1U << 5) -#define EXTENT_BOUNDARY (1U << 6) -#define EXTENT_NODATASUM (1U << 7) -#define EXTENT_CLEAR_META_RESV (1U << 8) -#define EXTENT_NEED_WAIT (1U << 9) -#define EXTENT_DAMAGED (1U << 10) -#define EXTENT_NORESERVE (1U << 11) -#define EXTENT_QGROUP_RESERVED (1U << 12) -#define EXTENT_CLEAR_DATA_RESV (1U << 13) -#define EXTENT_DELALLOC_NEW (1U << 14) -#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ - EXTENT_CLEAR_DATA_RESV) -#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) - -/* - * Redefined bits above which are used only in the device allocation tree, - * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV - * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit - * manipulation functions - */ -#define CHUNK_ALLOCATED EXTENT_DIRTY -#define CHUNK_TRIMMED EXTENT_DEFRAG - /* * flags for bio submission. The high bits indicate the compression * type for this bio @@ -89,12 +60,11 @@ enum { #define BITMAP_LAST_BYTE_MASK(nbits) \ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) -struct extent_state; struct btrfs_root; struct btrfs_inode; struct btrfs_io_bio; struct io_failure_record; - +struct extent_io_tree; typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, struct bio *bio, u64 bio_offset); @@ -111,47 +81,6 @@ struct extent_io_ops { int mirror); }; -enum { - IO_TREE_FS_INFO_FREED_EXTENTS0, - IO_TREE_FS_INFO_FREED_EXTENTS1, - IO_TREE_INODE_IO, - IO_TREE_INODE_IO_FAILURE, - IO_TREE_RELOC_BLOCKS, - IO_TREE_TRANS_DIRTY_PAGES, - IO_TREE_ROOT_DIRTY_LOG_PAGES, - IO_TREE_SELFTEST, -}; - -struct extent_io_tree { - struct rb_root state; - struct btrfs_fs_info *fs_info; - void *private_data; - u64 dirty_bytes; - bool track_uptodate; - - /* Who owns this io tree, should be one of IO_TREE_* */ - u8 owner; - - spinlock_t lock; - const struct extent_io_ops *ops; -}; - -struct extent_state { - u64 start; - u64 end; /* inclusive */ - struct rb_node rb_node; - - /* ADD NEW ELEMENTS AFTER THIS */ - wait_queue_head_t wq; - refcount_t refs; - unsigned state; - - struct io_failure_record *failrec; - -#ifdef CONFIG_BTRFS_DEBUG - struct list_head leak_list; -#endif -}; #define INLINE_EXTENT_BUFFER_PAGES 16 #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) @@ -259,152 +188,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, u64 start, u64 len, int create); -void extent_io_tree_init(struct btrfs_fs_info *fs_info, - struct extent_io_tree *tree, unsigned int owner, - void *private_data); -void extent_io_tree_release(struct extent_io_tree *tree); int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_buffer(struct page *page); -int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - struct extent_state **cached); -static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) -{ - return lock_extent_bits(tree, start, end, NULL); -} - -int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, int mirror_num); -int __init extent_io_init(void); -void __cold extent_io_exit(void); - -u64 count_range_bits(struct extent_io_tree *tree, - u64 *start, u64 search_end, - u64 max_bytes, unsigned bits, int contig); - -void free_extent_state(struct extent_state *state); -int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int filled, - struct extent_state *cached_state); -int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, struct extent_changeset *changeset); -int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int wake, int delete, - struct extent_state **cached); -int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, int wake, int delete, - struct extent_state **cached, gfp_t mask, - struct extent_changeset *changeset); - -static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) -{ - return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); -} - -static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) -{ - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - GFP_NOFS, NULL); -} - -static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, - u64 start, u64 end, struct extent_state **cached) -{ - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, - GFP_ATOMIC, NULL); -} - -static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, unsigned bits) -{ - int wake = 0; - - if (bits & EXTENT_LOCKED) - wake = 1; - - return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); -} - -int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, struct extent_changeset *changeset); -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, u64 *failed_start, - struct extent_state **cached_state, gfp_t mask); -int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits); - -static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, unsigned bits) -{ - return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); -} - -static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state) -{ - return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, - cached_state, GFP_NOFS, NULL); -} - -static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, - u64 end, gfp_t mask) -{ - return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, - NULL, mask); -} - -static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) -{ - return clear_extent_bit(tree, start, end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, cached); -} - -int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned bits, unsigned clear_bits, - struct extent_state **cached_state); - -static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, - u64 end, unsigned int extra_bits, - struct extent_state **cached_state) -{ - return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, - NULL, cached_state, GFP_NOFS); -} - -static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state) -{ - return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, - NULL, cached_state, GFP_NOFS); -} - -static inline int set_extent_new(struct extent_io_tree *tree, u64 start, - u64 end) -{ - return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, - GFP_NOFS); -} - -static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state, gfp_t mask) -{ - return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, - cached_state, mask); -} - -int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned bits, - struct extent_state **cached_state); -void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned bits); -int extent_invalidatepage(struct extent_io_tree *tree, - struct page *page, unsigned long offset); int extent_write_full_page(struct page *page, struct writeback_control *wbc); int extent_write_locked_range(struct inode *inode, u64 start, u64 end, int mode); @@ -442,11 +230,6 @@ static inline int num_extent_pages(const struct extent_buffer *eb) (eb->start >> PAGE_SHIFT); } -static inline void extent_buffer_get(struct extent_buffer *eb) -{ - atomic_inc(&eb->refs); -} - static inline int extent_buffer_uptodate(struct extent_buffer *eb) { return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); @@ -508,10 +291,6 @@ struct btrfs_inode; int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, u64 length, u64 logical, struct page *page, unsigned int pg_offset, int mirror_num); -int clean_io_failure(struct btrfs_fs_info *fs_info, - struct extent_io_tree *failure_tree, - struct extent_io_tree *io_tree, u64 start, - struct page *page, u64 ino, unsigned int pg_offset); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); @@ -535,19 +314,12 @@ struct io_failure_record { }; -void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, - u64 end); -int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, - struct io_failure_record **failrec_ret); bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, struct io_failure_record *failrec, int fail_mirror); struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, struct io_failure_record *failrec, struct page *page, int pg_offset, int icsum, bio_end_io_t *endio_func, void *data); -int free_io_failure(struct extent_io_tree *failure_tree, - struct extent_io_tree *io_tree, - struct io_failure_record *rec); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS bool find_lock_delalloc_range(struct inode *inode, struct page *locked_page, u64 *start, @@ -555,5 +327,4 @@ bool find_lock_delalloc_range(struct inode *inode, #endif struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); - #endif diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 9d30acca55e1..6f417ff68980 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ASSERT(next->block_start != EXTENT_MAP_DELALLOC && prev->block_start != EXTENT_MAP_DELALLOC); + if (prev->map_lookup || next->map_lookup) + ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) && + test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags)); + if (extent_map_end(prev) == next->start && prev->flags == next->flags && - prev->bdev == next->bdev && + prev->map_lookup == next->map_lookup && ((next->block_start == EXTENT_MAP_HOLE && prev->block_start == EXTENT_MAP_HOLE) || (next->block_start == EXTENT_MAP_INLINE && diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 473f039fcd7c..8e217337dff9 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -42,15 +42,8 @@ struct extent_map { u64 block_len; u64 generation; unsigned long flags; - union { - struct block_device *bdev; - - /* - * used for chunk mappings - * flags & EXTENT_FLAG_FS_MAPPING must be set - */ - struct map_lookup *map_lookup; - }; + /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */ + struct map_lookup *map_lookup; refcount_t refs; unsigned int compress_type; struct list_head list; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 1a599f50837b..3270a40b0777 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, u8 type = btrfs_file_extent_type(leaf, fi); int compress_type = btrfs_file_extent_compression(leaf, fi); - em->bdev = fs_info->fs_devices->latest_bdev; btrfs_item_key_to_cpu(leaf, &key, slot); extent_start = key.offset; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 27e5b269e729..0cb43b682789 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, key.objectid = defrag->ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); + inode = btrfs_iget(fs_info->sb, &key, inode_root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto cleanup; @@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, } split->generation = gen; - split->bdev = em->bdev; split->flags = flags; split->compress_type = em->compress_type; replace_extent_mapping(em_tree, em, split, modified); @@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, split->start = start + len; split->len = em->start + em->len - (start + len); - split->bdev = em->bdev; split->flags = flags; split->compress_type = em->compress_type; split->generation = gen; @@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, break; } + only_release_metadata = false; sector_offset = pos & (fs_info->sectorsize - 1); reserve_bytes = round_up(write_bytes + sector_offset, fs_info->sectorsize); @@ -1692,7 +1691,7 @@ again: force_page_uptodate); if (ret) { btrfs_delalloc_release_extents(BTRFS_I(inode), - reserve_bytes, true); + reserve_bytes); break; } @@ -1704,7 +1703,7 @@ again: if (extents_locked == -EAGAIN) goto again; btrfs_delalloc_release_extents(BTRFS_I(inode), - reserve_bytes, true); + reserve_bytes); ret = extents_locked; break; } @@ -1772,8 +1771,7 @@ again: else free_extent_state(cached_state); - btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, - true); + btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); if (ret) { btrfs_drop_pages(pages, num_pages); break; @@ -1792,7 +1790,6 @@ again: set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_NORESERVE, NULL, NULL, GFP_NOFS); - only_release_metadata = false; } btrfs_drop_pages(pages, num_pages); @@ -1904,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, (iocb->ki_flags & IOCB_NOWAIT)) return -EOPNOTSUPP; - if (!inode_trylock(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock(inode)) return -EAGAIN; + } else { inode_lock(inode); } @@ -2068,25 +2066,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) struct btrfs_trans_handle *trans; struct btrfs_log_ctx ctx; int ret = 0, err; - u64 len; - - /* - * If the inode needs a full sync, make sure we use a full range to - * avoid log tree corruption, due to hole detection racing with ordered - * extent completion for adjacent ranges, and assertion failures during - * hole detection. - */ - if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags)) { - start = 0; - end = LLONG_MAX; - } - /* - * The range length can be represented by u64, we have to do the typecasts - * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() - */ - len = (u64)end - (u64)start + 1; trace_btrfs_sync_file(file, datasync); btrfs_init_log_ctx(&ctx, inode); @@ -2113,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) atomic_inc(&root->log_batch); /* + * If the inode needs a full sync, make sure we use a full range to + * avoid log tree corruption, due to hole detection racing with ordered + * extent completion for adjacent ranges, and assertion failures during + * hole detection. Do this while holding the inode lock, to avoid races + * with other tasks. + */ + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags)) { + start = 0; + end = LLONG_MAX; + } + + /* * Before we acquired the inode's lock, someone may have dirtied more * pages in the target range. We need to make sure that writeback for * any such pages does not start while we are logging the inode, because @@ -2139,8 +2132,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) /* * We have to do this here to avoid the priority inversion of waiting on * IO of a lower priority task while holding a transaction open. + * + * Also, the range length can be represented by u64, we have to do the + * typecasts to avoid signed overflow if it's [0, LLONG_MAX]. */ - ret = btrfs_wait_ordered_range(inode, start, len); + ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1); if (ret) { up_write(&BTRFS_I(inode)->dio_sem); inode_unlock(inode); @@ -2362,7 +2358,6 @@ out: hole_em->block_start = EXTENT_MAP_HOLE; hole_em->block_len = 0; hole_em->orig_block_len = 0; - hole_em->bdev = fs_info->fs_devices->latest_bdev; hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->generation = trans->transid; @@ -3353,29 +3348,30 @@ out: return ret; } -static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) +static loff_t find_desired_extent(struct inode *inode, loff_t offset, + int whence) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em = NULL; struct extent_state *cached_state = NULL; + loff_t i_size = inode->i_size; u64 lockstart; u64 lockend; u64 start; u64 len; int ret = 0; - if (inode->i_size == 0) + if (i_size == 0 || offset >= i_size) return -ENXIO; /* - * *offset can be negative, in this case we start finding DATA/HOLE from + * offset can be negative, in this case we start finding DATA/HOLE from * the very start of the file. */ - start = max_t(loff_t, 0, *offset); + start = max_t(loff_t, 0, offset); lockstart = round_down(start, fs_info->sectorsize); - lockend = round_up(i_size_read(inode), - fs_info->sectorsize); + lockend = round_up(i_size, fs_info->sectorsize); if (lockend <= lockstart) lockend = lockstart + fs_info->sectorsize; lockend--; @@ -3384,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); - while (start < inode->i_size) { + while (start < i_size) { em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); @@ -3407,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) cond_resched(); } free_extent_map(em); - if (!ret) { - if (whence == SEEK_DATA && start >= inode->i_size) - ret = -ENXIO; - else - *offset = min_t(loff_t, start, inode->i_size); - } unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); - return ret; + if (ret) { + offset = ret; + } else { + if (whence == SEEK_DATA && start >= i_size) + offset = -ENXIO; + else + offset = min_t(loff_t, start, i_size); + } + + return offset; } static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; - int ret; - inode_lock(inode); switch (whence) { - case SEEK_END: - case SEEK_CUR: - offset = generic_file_llseek(file, offset, whence); - goto out; + default: + return generic_file_llseek(file, offset, whence); case SEEK_DATA: case SEEK_HOLE: - if (offset >= i_size_read(inode)) { - inode_unlock(inode); - return -ENXIO; - } - - ret = find_desired_extent(inode, &offset, whence); - if (ret) { - inode_unlock(inode); - return ret; - } + inode_lock_shared(inode); + offset = find_desired_extent(inode, offset, whence); + inode_unlock_shared(inode); + break; } - offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); -out: - inode_unlock(inode); - return offset; + if (offset < 0) + return offset; + + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); } static int btrfs_file_open(struct inode *inode, struct file *filp) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d54dcd0ab230..3283da419200 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -78,7 +78,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, * sure NOFS is set to keep us from deadlocking. */ nofs_flag = memalloc_nofs_save(); - inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path); + inode = btrfs_iget_path(fs_info->sb, &location, root, path); btrfs_release_path(path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(inode)) @@ -91,8 +91,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, return inode; } -struct inode *lookup_free_space_inode( - struct btrfs_block_group_cache *block_group, +struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -107,7 +106,7 @@ struct inode *lookup_free_space_inode( return inode; inode = __lookup_free_space_inode(fs_info->tree_root, path, - block_group->key.objectid); + block_group->start); if (IS_ERR(inode)) return inode; @@ -190,7 +189,7 @@ static int __create_free_space_inode(struct btrfs_root *root, } int create_free_space_inode(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { int ret; @@ -201,7 +200,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans, return ret; return __create_free_space_inode(trans->fs_info->tree_root, trans, path, - ino, block_group->key.objectid); + ino, block_group->start); } int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, @@ -224,7 +223,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, } int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -385,6 +384,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode if (uptodate && !PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); + if (page->mapping != inode->i_mapping) { + btrfs_err(BTRFS_I(inode)->root->fs_info, + "free space cache page truncated"); + io_ctl_drop_pages(io_ctl); + return -EIO; + } if (!PageUptodate(page)) { btrfs_err(BTRFS_I(inode)->root->fs_info, "error reading free space cache"); @@ -814,7 +819,7 @@ free_cache: goto out; } -int load_free_space_cache(struct btrfs_block_group_cache *block_group) +int load_free_space_cache(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -822,7 +827,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) struct btrfs_path *path; int ret = 0; bool matched; - u64 used = btrfs_block_group_used(&block_group->item); + u64 used = block_group->used; /* * If this block group has been marked to be cleared for one reason or @@ -876,13 +881,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) spin_unlock(&block_group->lock); ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, - path, block_group->key.objectid); + path, block_group->start); btrfs_free_path(path); if (ret <= 0) goto out; spin_lock(&ctl->tree_lock); - matched = (ctl->free_space == (block_group->key.offset - used - + matched = (ctl->free_space == (block_group->length - used - block_group->bytes_super)); spin_unlock(&ctl->tree_lock); @@ -890,7 +895,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) __btrfs_remove_free_space_cache(ctl); btrfs_warn(fs_info, "block group %llu has wrong amount of free space", - block_group->key.objectid); + block_group->start); ret = -1; } out: @@ -903,7 +908,7 @@ out: btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", - block_group->key.objectid); + block_group->start); } iput(inode); @@ -913,7 +918,7 @@ out: static noinline_for_stack int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, struct btrfs_free_space_ctl *ctl, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, int *entries, int *bitmaps, struct list_head *bitmap_list) { @@ -1041,7 +1046,7 @@ fail: } static noinline_for_stack int write_pinned_extent_entries( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, int *entries) { @@ -1061,9 +1066,9 @@ static noinline_for_stack int write_pinned_extent_entries( */ unpin = block_group->fs_info->pinned_extents; - start = block_group->key.objectid; + start = block_group->start; - while (start < block_group->key.objectid + block_group->key.offset) { + while (start < block_group->start + block_group->length) { ret = find_first_extent_bit(unpin, start, &extent_start, &extent_end, EXTENT_DIRTY, NULL); @@ -1071,13 +1076,12 @@ static noinline_for_stack int write_pinned_extent_entries( return 0; /* This pinned extent is out of our range */ - if (extent_start >= block_group->key.objectid + - block_group->key.offset) + if (extent_start >= block_group->start + block_group->length) return 0; extent_start = max(extent_start, start); - extent_end = min(block_group->key.objectid + - block_group->key.offset, extent_end + 1); + extent_end = min(block_group->start + block_group->length, + extent_end + 1); len = extent_end - extent_start; *entries += 1; @@ -1141,7 +1145,7 @@ cleanup_write_cache_enospc(struct inode *inode, static int __btrfs_wait_cache_io(struct btrfs_root *root, struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, struct btrfs_path *path, u64 offset) { @@ -1168,7 +1172,7 @@ out: #ifdef DEBUG btrfs_err(root->fs_info, "failed to write free space cache for block group %llu", - block_group->key.objectid); + block_group->start); #endif } } @@ -1210,12 +1214,12 @@ static int btrfs_wait_cache_io_root(struct btrfs_root *root, } int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, block_group, &block_group->io_ctl, - path, block_group->key.objectid); + path, block_group->start); } /** @@ -1231,7 +1235,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, */ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_io_ctl *io_ctl, struct btrfs_trans_handle *trans) { @@ -1369,7 +1373,7 @@ out_unlock: } int btrfs_write_out_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -1394,7 +1398,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans, #ifdef DEBUG btrfs_err(fs_info, "failed to write free space cache for block group %llu", - block_group->key.objectid); + block_group->start); #endif spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; @@ -1647,11 +1651,11 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { - struct btrfs_block_group_cache *block_group = ctl->private; + struct btrfs_block_group *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; - u64 size = block_group->key.offset; + u64 size = block_group->length; u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); @@ -1991,7 +1995,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, static bool use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - struct btrfs_block_group_cache *block_group = ctl->private; + struct btrfs_block_group *block_group = ctl->private; struct btrfs_fs_info *fs_info = block_group->fs_info; bool forced = false; @@ -2028,7 +2032,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, * so allow those block groups to still be allowed to have a bitmap * entry. */ - if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset) + if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) return false; return true; @@ -2043,7 +2047,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { struct btrfs_free_space *bitmap_info; - struct btrfs_block_group_cache *block_group = NULL; + struct btrfs_block_group *block_group = NULL; int added = 0; u64 bytes, offset, bytes_added; int ret; @@ -2380,7 +2384,7 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size) { return __btrfs_add_free_space(block_group->fs_info, @@ -2388,7 +2392,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, bytenr, size); } -int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 offset, u64 bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2478,7 +2482,7 @@ out: return ret; } -void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, +void btrfs_dump_free_space(struct btrfs_block_group *block_group, u64 bytes) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -2503,14 +2507,14 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "%d blocks of free space at or bigger than bytes is", count); } -void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; spin_lock_init(&ctl->tree_lock); ctl->unit = fs_info->sectorsize; - ctl->start = block_group->key.objectid; + ctl->start = block_group->start; ctl->private = block_group; ctl->op = &free_space_op; INIT_LIST_HEAD(&ctl->trimming_ranges); @@ -2532,7 +2536,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) */ static int __btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2598,7 +2602,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) spin_unlock(&ctl->tree_lock); } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; @@ -2620,7 +2624,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) } -u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, +u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size) { @@ -2674,7 +2678,7 @@ out: * cluster and remove the cluster from it. */ int btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster) { struct btrfs_free_space_ctl *ctl; @@ -2708,7 +2712,7 @@ int btrfs_return_cluster_to_free_space( return ret; } -static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, +static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct btrfs_free_space *entry, u64 bytes, u64 min_start, @@ -2741,7 +2745,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, * if it couldn't find anything suitably large, or a logical disk offset * if things worked out */ -u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, +u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start, u64 *max_extent_size) { @@ -2827,7 +2831,7 @@ out: return ret; } -static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, +static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group, struct btrfs_free_space *entry, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, @@ -2909,7 +2913,7 @@ again: * extent of cont1_bytes, and other clusters of at least min_bytes. */ static noinline int -setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, +setup_cluster_no_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct list_head *bitmaps, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) @@ -3000,7 +3004,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * that we have already failed to find extents that will work. */ static noinline int -setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, +setup_cluster_bitmap(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, struct list_head *bitmaps, u64 offset, u64 bytes, u64 cont1_bytes, u64 min_bytes) @@ -3050,7 +3054,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, * returns zero and sets up cluster if things worked out, otherwise * it returns -enospc */ -int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, +int btrfs_find_space_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { @@ -3141,7 +3145,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) cluster->block_group = NULL; } -static int do_trimming(struct btrfs_block_group_cache *block_group, +static int do_trimming(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 bytes, u64 reserved_start, u64 reserved_bytes, struct btrfs_trim_range *trim_entry) @@ -3186,7 +3190,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group, return ret; } -static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, +static int trim_no_bitmap(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -3271,7 +3275,7 @@ out: return ret; } -static int trim_bitmaps(struct btrfs_block_group_cache *block_group, +static int trim_bitmaps(struct btrfs_block_group *block_group, u64 *total_trimmed, u64 start, u64 end, u64 minlen) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -3352,12 +3356,12 @@ next: return ret; } -void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache) +void btrfs_get_block_group_trimming(struct btrfs_block_group *cache) { atomic_inc(&cache->trimming); } -void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) +void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct extent_map_tree *em_tree; @@ -3373,7 +3377,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) mutex_lock(&fs_info->chunk_mutex); em_tree = &fs_info->mapping_tree; write_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, block_group->key.objectid, + em = lookup_extent_mapping(em_tree, block_group->start, 1); BUG_ON(!em); /* logic error, can't happen */ remove_extent_mapping(em_tree, em); @@ -3392,7 +3396,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) } } -int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, +int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { int ret; @@ -3590,7 +3594,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, * how the free space cache loading stuff works, so you can get really weird * configurations. */ -int test_add_free_space_entry(struct btrfs_block_group_cache *cache, +int test_add_free_space_entry(struct btrfs_block_group *cache, u64 offset, u64 bytes, bool bitmap) { struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; @@ -3658,7 +3662,7 @@ again: * just used to check the absence of space, so if there is free space in the * range at all we will return 1. */ -int test_check_exists(struct btrfs_block_group_cache *cache, +int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes) { struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 39c32c8fc24f..ba9a23241101 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -50,24 +50,23 @@ struct btrfs_io_ctl { unsigned check_crcs:1; }; -struct inode *lookup_free_space_inode( - struct btrfs_block_group_cache *block_group, +struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, struct btrfs_path *path); int create_free_space_inode(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode); -int load_free_space_cache(struct btrfs_block_group_cache *block_group); +int load_free_space_cache(struct btrfs_block_group *block_group); int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int btrfs_write_out_cache(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); struct inode *lookup_free_ino_inode(struct btrfs_root *root, struct btrfs_path *path); @@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct btrfs_path *path, struct inode *inode); -void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); +void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group); int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_add_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); -int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, +int btrfs_remove_free_space(struct btrfs_block_group *block_group, u64 bytenr, u64 size); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); -u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, +void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group); +u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, u64 offset, u64 bytes, u64 empty_size, u64 *max_extent_size); u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); -void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, +void btrfs_dump_free_space(struct btrfs_block_group *block_group, u64 bytes); -int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, +int btrfs_find_space_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size); void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); -u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, +u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start, u64 *max_extent_size); int btrfs_return_cluster_to_free_space( - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster); -int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, +int btrfs_trim_block_group(struct btrfs_block_group *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen); /* Support functions for running our sanity tests */ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS -int test_add_free_space_entry(struct btrfs_block_group_cache *cache, +int test_add_free_space_entry(struct btrfs_block_group *cache, u64 offset, u64 bytes, bool bitmap); -int test_check_exists(struct btrfs_block_group_cache *cache, - u64 offset, u64 bytes); +int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes); #endif #endif diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 48a03f5240f5..258cb3fae17a 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -13,10 +13,10 @@ #include "block-group.h" static int __add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); -void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) +void set_free_space_tree_thresholds(struct btrfs_block_group *cache) { u32 bitmap_range; size_t bitmap_size; @@ -27,8 +27,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) * exceeds that required for using bitmaps. */ bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; - num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, - bitmap_range); + num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range); bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; total_bitmap_size = num_bitmaps * bitmap_size; cache->bitmap_high_thresh = div_u64(total_bitmap_size, @@ -45,7 +44,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) } static int add_new_free_space_info(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_root *root = trans->fs_info->free_space_root; @@ -54,9 +53,9 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; - key.offset = block_group->key.offset; + key.offset = block_group->length; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); if (ret) @@ -78,7 +77,7 @@ out: EXPORT_FOR_TESTS struct btrfs_free_space_info *search_free_space_info( struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int cow) { struct btrfs_fs_info *fs_info = block_group->fs_info; @@ -86,16 +85,16 @@ struct btrfs_free_space_info *search_free_space_info( struct btrfs_key key; int ret; - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; - key.offset = block_group->key.offset; + key.offset = block_group->length; ret = btrfs_search_slot(trans, root, &key, path, 0, cow); if (ret < 0) return ERR_PTR(ret); if (ret != 0) { btrfs_warn(fs_info, "missing free space info for %llu", - block_group->key.objectid); + block_group->start); ASSERT(0); return ERR_PTR(-ENOENT); } @@ -180,7 +179,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len) EXPORT_FOR_TESTS int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -197,7 +196,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, int done = 0, nr; int ret; - bitmap_size = free_space_bitmap_size(block_group->key.offset, + bitmap_size = free_space_bitmap_size(block_group->length, fs_info->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { @@ -205,8 +204,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -224,8 +223,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { @@ -271,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -320,7 +319,7 @@ out: EXPORT_FOR_TESTS int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -336,7 +335,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, int done = 0, nr; int ret; - bitmap_size = free_space_bitmap_size(block_group->key.offset, + bitmap_size = free_space_bitmap_size(block_group->length, fs_info->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { @@ -344,8 +343,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -363,8 +362,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { @@ -413,7 +412,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); - nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize); + nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize); start_bit = find_next_bit_le(bitmap, nrbits, 0); while (start_bit < nrbits) { @@ -437,7 +436,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -453,7 +452,7 @@ out: } static int update_free_space_extent_count(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int new_extents) { @@ -491,7 +490,7 @@ out: } EXPORT_FOR_TESTS -int free_space_test_bit(struct btrfs_block_group_cache *block_group, +int free_space_test_bit(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 offset) { struct extent_buffer *leaf; @@ -513,7 +512,7 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group, return !!extent_buffer_test_bit(leaf, ptr, i); } -static void free_space_set_bits(struct btrfs_block_group_cache *block_group, +static void free_space_set_bits(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 *start, u64 *size, int bit) { @@ -581,7 +580,7 @@ static int free_space_next_bitmap(struct btrfs_trans_handle *trans, * the bitmap. */ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size, int remove) { @@ -597,7 +596,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, * Read the bit for the block immediately before the extent of space if * that block is within the block group. */ - if (start > block_group->key.objectid) { + if (start > block_group->start) { u64 prev_block = start - block_group->fs_info->sectorsize; key.objectid = prev_block; @@ -649,7 +648,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, * Read the bit for the block immediately after the extent of space if * that block is within the block group. */ - if (end < block_group->key.objectid + block_group->key.offset) { + if (end < block_group->start + block_group->length) { /* The next block may be in the next bitmap. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (end >= key.objectid + key.offset) { @@ -694,7 +693,7 @@ out: } static int remove_free_space_extent(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { @@ -781,7 +780,7 @@ out: EXPORT_FOR_TESTS int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; @@ -812,7 +811,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, int remove_from_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; @@ -846,7 +845,7 @@ out: } static int add_free_space_extent(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { @@ -880,7 +879,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, new_key.offset = size; /* Search for a neighbor on the left. */ - if (start == block_group->key.objectid) + if (start == block_group->start) goto right; key.objectid = start - 1; key.type = (u8)-1; @@ -900,8 +899,8 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, found_start = key.objectid; found_end = key.objectid + key.offset; - ASSERT(found_start >= block_group->key.objectid && - found_end > block_group->key.objectid); + ASSERT(found_start >= block_group->start && + found_end > block_group->start); ASSERT(found_start < start && found_end <= start); /* @@ -920,7 +919,7 @@ static int add_free_space_extent(struct btrfs_trans_handle *trans, right: /* Search for a neighbor on the right. */ - if (end == block_group->key.objectid + block_group->key.offset) + if (end == block_group->start + block_group->length) goto insert; key.objectid = end; key.type = (u8)-1; @@ -940,8 +939,8 @@ right: found_start = key.objectid; found_end = key.objectid + key.offset; - ASSERT(found_start >= block_group->key.objectid && - found_end > block_group->key.objectid); + ASSERT(found_start >= block_group->start && + found_end > block_group->start); ASSERT((found_start < start && found_end <= start) || (found_start >= end && found_end > end)); @@ -974,7 +973,7 @@ out: EXPORT_FOR_TESTS int __add_to_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; @@ -1005,7 +1004,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans, int add_to_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; @@ -1043,7 +1042,7 @@ out: * through the normal add/remove hooks. */ static int populate_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_root *extent_root = trans->fs_info->extent_root; struct btrfs_path *path, *path2; @@ -1075,7 +1074,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's * contained in. */ - key.objectid = block_group->key.objectid; + key.objectid = block_group->start; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = 0; @@ -1084,8 +1083,8 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, goto out_locked; ASSERT(ret == 0); - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; while (1) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -1109,7 +1108,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, else start += key.offset; } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { - if (key.objectid != block_group->key.objectid) + if (key.objectid != block_group->start) break; } @@ -1140,7 +1139,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *free_space_root; - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct rb_node *node; int ret; @@ -1159,7 +1158,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) node = rb_first(&fs_info->block_group_cache_tree); while (node) { - block_group = rb_entry(node, struct btrfs_block_group_cache, + block_group = rb_entry(node, struct btrfs_block_group, cache_node); ret = populate_free_space_tree(trans, block_group); if (ret) @@ -1265,7 +1264,7 @@ abort: } static int __add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path) { int ret; @@ -1277,12 +1276,12 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans, return ret; return __add_to_free_space_tree(trans, block_group, path, - block_group->key.objectid, - block_group->key.offset); + block_group->start, + block_group->length); } int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_path *path = NULL; @@ -1312,7 +1311,7 @@ out: } int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { struct btrfs_root *root = trans->fs_info->free_space_root; struct btrfs_path *path; @@ -1336,8 +1335,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, goto out; } - start = block_group->key.objectid; - end = block_group->key.objectid + block_group->key.offset; + start = block_group->start; + end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; @@ -1355,8 +1354,8 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { - ASSERT(found_key.objectid == block_group->key.objectid); - ASSERT(found_key.offset == block_group->key.offset); + ASSERT(found_key.objectid == block_group->start); + ASSERT(found_key.offset == block_group->length); done = 1; nr++; path->slots[0]--; @@ -1391,7 +1390,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; @@ -1407,7 +1406,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, fs_info = block_group->fs_info; root = fs_info->free_space_root; - end = block_group->key.objectid + block_group->key.offset; + end = block_group->start + block_group->length; while (1) { ret = btrfs_next_item(root, path); @@ -1454,7 +1453,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -1472,7 +1471,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; @@ -1485,7 +1484,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, fs_info = block_group->fs_info; root = fs_info->free_space_root; - end = block_group->key.objectid + block_group->key.offset; + end = block_group->start + block_group->length; while (1) { ret = btrfs_next_item(root, path); @@ -1516,7 +1515,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", - block_group->key.objectid, extent_count, + block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; @@ -1532,7 +1531,7 @@ out: int load_free_space_tree(struct btrfs_caching_control *caching_ctl) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; struct btrfs_free_space_info *info; struct btrfs_path *path; u32 extent_count, flags; diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h index 360d50e1cdea..dc2463e4cfe3 100644 --- a/fs/btrfs/free-space-tree.h +++ b/fs/btrfs/free-space-tree.h @@ -16,14 +16,14 @@ struct btrfs_caching_control; #define BTRFS_FREE_SPACE_BITMAP_SIZE 256 #define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE) -void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group); +void set_free_space_tree_thresholds(struct btrfs_block_group *block_group); int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info); int load_free_space_tree(struct btrfs_caching_control *caching_ctl); int add_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group); + struct btrfs_block_group *block_group); int remove_block_group_free_space(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group); + struct btrfs_block_group *block_group); int add_to_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size); int remove_from_free_space_tree(struct btrfs_trans_handle *trans, @@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_free_space_info * search_free_space_info(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, int cow); int __add_to_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size); int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size); int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); int convert_free_space_to_extents(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct btrfs_path *path); -int free_space_test_bit(struct btrfs_block_group_cache *block_group, +int free_space_test_bit(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 offset); #endif diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 63cad7865d75..37345fb6191d 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -501,13 +501,13 @@ again: ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, prealloc, prealloc, &alloc_hint); if (ret) { - btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true); goto out_put; } ret = btrfs_write_out_ino_cache(root, trans, path, inode); - btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false); + btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); out_put: iput(inode); out_release: diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0f2754eaa05b..56032c518b26 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -368,6 +368,7 @@ struct async_chunk { u64 end; unsigned int write_flags; struct list_head extents; + struct cgroup_subsys_state *blkcg_css; struct btrfs_work work; atomic_t *pending; }; @@ -474,6 +475,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) u64 start = async_chunk->start; u64 end = async_chunk->end; u64 actual_end; + u64 i_size; int ret = 0; struct page **pages = NULL; unsigned long nr_pages; @@ -488,7 +490,19 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, SZ_16K); - actual_end = min_t(u64, i_size_read(inode), end + 1); + /* + * We need to save i_size before now because it could change in between + * us evaluating the size and assigning it. This is because we lock and + * unlock the page in truncate and fallocate, and then modify the i_size + * later on. + * + * The barriers are to emulate READ_ONCE, remove that once i_size_read + * does that for us. + */ + barrier(); + i_size = i_size_read(inode); + barrier(); + actual_end = min_t(u64, i_size, end + 1); again: will_compress = 0; nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; @@ -699,10 +713,12 @@ cleanup_and_bail_uncompressed: * to our extent and set things up for the async work queue to run * cow_file_range to do the normal delalloc dance. */ - if (page_offset(async_chunk->locked_page) >= start && - page_offset(async_chunk->locked_page) <= end) + if (async_chunk->locked_page && + (page_offset(async_chunk->locked_page) >= start && + page_offset(async_chunk->locked_page)) <= end) { __set_page_dirty_nobuffers(async_chunk->locked_page); /* unlocked later on in the async handlers */ + } if (redirty) extent_range_redirty_for_io(inode, start, end); @@ -782,7 +798,7 @@ retry: async_extent->start + async_extent->ram_size - 1, WB_SYNC_ALL); - else if (ret) + else if (ret && async_chunk->locked_page) unlock_page(async_chunk->locked_page); kfree(async_extent); cond_resched(); @@ -865,7 +881,8 @@ retry: ins.objectid, ins.offset, async_extent->pages, async_extent->nr_pages, - async_chunk->write_flags)) { + async_chunk->write_flags, + async_chunk->blkcg_css)) { struct page *p = async_extent->pages[0]; const u64 start = async_extent->start; const u64 end = start + async_extent->ram_size - 1; @@ -1183,6 +1200,8 @@ static noinline void async_cow_free(struct btrfs_work *work) async_chunk = container_of(work, struct async_chunk, work); if (async_chunk->inode) btrfs_add_delayed_iput(async_chunk->inode); + if (async_chunk->blkcg_css) + css_put(async_chunk->blkcg_css); /* * Since the pointer to 'pending' is at the beginning of the array of * async_chunk's, freeing it ensures the whole array has been freed. @@ -1191,12 +1210,14 @@ static noinline void async_cow_free(struct btrfs_work *work) kvfree(async_chunk->pending); } -static int cow_file_range_async(struct inode *inode, struct page *locked_page, +static int cow_file_range_async(struct inode *inode, + struct writeback_control *wbc, + struct page *locked_page, u64 start, u64 end, int *page_started, - unsigned long *nr_written, - unsigned int write_flags) + unsigned long *nr_written) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); struct async_cow *ctx; struct async_chunk *async_chunk; unsigned long nr_pages; @@ -1205,6 +1226,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, int i; bool should_compress; unsigned nofs_flag; + const unsigned int write_flags = wbc_to_write_flags(wbc); unlock_extent(&BTRFS_I(inode)->io_tree, start, end); @@ -1251,14 +1273,45 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_chunk[i].inode = inode; async_chunk[i].start = start; async_chunk[i].end = cur_end; - async_chunk[i].locked_page = locked_page; async_chunk[i].write_flags = write_flags; INIT_LIST_HEAD(&async_chunk[i].extents); - btrfs_init_work(&async_chunk[i].work, - btrfs_delalloc_helper, - async_cow_start, async_cow_submit, - async_cow_free); + /* + * The locked_page comes all the way from writepage and its + * the original page we were actually given. As we spread + * this large delalloc region across multiple async_chunk + * structs, only the first struct needs a pointer to locked_page + * + * This way we don't need racey decisions about who is supposed + * to unlock it. + */ + if (locked_page) { + /* + * Depending on the compressibility, the pages might or + * might not go through async. We want all of them to + * be accounted against wbc once. Let's do it here + * before the paths diverge. wbc accounting is used + * only for foreign writeback detection and doesn't + * need full accuracy. Just account the whole thing + * against the first page. + */ + wbc_account_cgroup_owner(wbc, locked_page, + cur_end - start); + async_chunk[i].locked_page = locked_page; + locked_page = NULL; + } else { + async_chunk[i].locked_page = NULL; + } + + if (blkcg_css != blkcg_root_css) { + css_get(blkcg_css); + async_chunk[i].blkcg_css = blkcg_css; + } else { + async_chunk[i].blkcg_css = NULL; + } + + btrfs_init_work(&async_chunk[i].work, async_cow_start, + async_cow_submit, async_cow_free); nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); atomic_add(nr_pages, &fs_info->async_delalloc_pages); @@ -1684,7 +1737,6 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, { int ret; int force_cow = need_force_cow(inode, start, end); - unsigned int write_flags = wbc_to_write_flags(wbc); if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { ret = run_delalloc_nocow(inode, locked_page, start, end, @@ -1699,9 +1751,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, } else { set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags); - ret = cow_file_range_async(inode, locked_page, start, end, - page_started, nr_written, - write_flags); + ret = cow_file_range_async(inode, wbc, locked_page, start, end, + page_started, nr_written); } if (ret) btrfs_cleanup_ordered_extents(inode, locked_page, start, @@ -2097,7 +2148,7 @@ static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, } mapit: - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); out: if (ret) { @@ -2201,12 +2252,16 @@ again: mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); ClearPageChecked(page); - goto out; + goto out_reserved; } ClearPageChecked(page); set_page_dirty(page); - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false); +out_reserved: + btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + if (ret) + btrfs_delalloc_release_space(inode, data_reserved, page_start, + PAGE_SIZE, true); out: unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); @@ -2247,8 +2302,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) SetPageChecked(page); get_page(page); - btrfs_init_work(&fixup->work, btrfs_fixup_helper, - btrfs_writepage_fixup_worker, NULL, NULL); + btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); fixup->page = page; btrfs_queue_work(fs_info->fixup_workers, &fixup->work); return -EBUSY; @@ -2662,7 +2716,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) { srcu_read_unlock(&fs_info->subvol_srcu, index); return 0; @@ -2986,7 +3040,7 @@ out_kfree: static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, u64 start, u64 len) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(fs_info, start); ASSERT(cache); @@ -3014,7 +3068,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) int compress_type = 0; int ret = 0; u64 logical_len = ordered_extent->len; - bool nolock; + bool freespace_inode; bool truncated = false; bool range_locked = false; bool clear_new_delalloc_bytes = false; @@ -3025,7 +3079,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) clear_new_delalloc_bytes = true; - nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); + freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { ret = -EIO; @@ -3056,8 +3110,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, ordered_extent->len); btrfs_ordered_update_i_size(inode, 0, ordered_extent); - if (nolock) - trans = btrfs_join_transaction_nolock(root); + if (freespace_inode) + trans = btrfs_join_transaction_spacecache(root); else trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { @@ -3091,8 +3145,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) EXTENT_DEFRAG, 0, 0, &cached_state); } - if (nolock) - trans = btrfs_join_transaction_nolock(root); + if (freespace_inode) + trans = btrfs_join_transaction_spacecache(root); else trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { @@ -3241,7 +3295,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered_extent = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); @@ -3250,16 +3303,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, end - start + 1, uptodate)) return; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } - btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, - NULL); + btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); } @@ -3518,7 +3567,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) found_key.objectid = found_key.offset; found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &found_key, root, NULL); + inode = btrfs_iget(fs_info->sb, &found_key, root); ret = PTR_ERR_OR_ZERO(inode); if (ret && ret != -ENOENT) goto out; @@ -4951,7 +5000,7 @@ again: if (!page) { btrfs_delalloc_release_space(inode, data_reserved, block_start, blocksize, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); ret = -ENOMEM; goto out; } @@ -5018,7 +5067,7 @@ out_unlock: if (ret) btrfs_delalloc_release_space(inode, data_reserved, block_start, blocksize, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0)); + btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); unlock_page(page); put_page(page); out: @@ -5140,7 +5189,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) hole_em->block_len = 0; hole_em->orig_block_len = 0; hole_em->ram_bytes = hole_size; - hole_em->bdev = fs_info->fs_devices->latest_bdev; hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->generation = fs_info->generation; @@ -5737,12 +5785,14 @@ static struct inode *btrfs_iget_locked(struct super_block *s, return inode; } -/* Get an inode object given its location and corresponding root. - * Returns in *is_new if the inode was read from disk +/* + * Get an inode object given its location and corresponding root. + * Path can be preallocated to prevent recursing back to iget through + * allocator. NULL is also valid but may require an additional allocation + * later. */ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new, - struct btrfs_path *path) + struct btrfs_root *root, struct btrfs_path *path) { struct inode *inode; @@ -5757,8 +5807,6 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, if (!ret) { inode_tree_add(inode); unlock_new_inode(inode); - if (new) - *new = 1; } else { iget_failed(inode); /* @@ -5776,9 +5824,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, } struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root, int *new) + struct btrfs_root *root) { - return btrfs_iget_path(s, location, root, new, NULL); + return btrfs_iget_path(s, location, root, NULL); } static struct inode *new_simple_dir(struct super_block *s, @@ -5844,7 +5892,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return ERR_PTR(ret); if (location.type == BTRFS_INODE_ITEM_KEY) { - inode = btrfs_iget(dir->i_sb, &location, root, NULL); + inode = btrfs_iget(dir->i_sb, &location, root); if (IS_ERR(inode)) return inode; @@ -5869,7 +5917,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) else inode = new_simple_dir(dir->i_sb, &location, sub_root); } else { - inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); + inode = btrfs_iget(dir->i_sb, &location, sub_root); } srcu_read_unlock(&fs_info->subvol_srcu, index); @@ -6918,8 +6966,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); - if (em) - em->bdev = fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); if (em) { @@ -6935,7 +6981,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, err = -ENOMEM; goto out; } - em->bdev = fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->orig_start = EXTENT_MAP_HOLE; em->len = (u64)-1; @@ -7194,7 +7239,6 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, err = -ENOMEM; goto out; } - em->bdev = NULL; ASSERT(hole_em); /* @@ -7554,7 +7598,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, { struct extent_map_tree *em_tree; struct extent_map *em; - struct btrfs_root *root = BTRFS_I(inode)->root; int ret; ASSERT(type == BTRFS_ORDERED_PREALLOC || @@ -7572,7 +7615,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, em->len = len; em->block_len = block_len; em->block_start = block_start; - em->bdev = root->fs_info->fs_devices->latest_bdev; em->orig_block_len = orig_block_len; em->ram_bytes = ram_bytes; em->generation = -1; @@ -7611,6 +7653,8 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em, struct inode *inode, u64 start, u64 len) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + if (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) return -ENOENT; @@ -7620,7 +7664,7 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em, bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; - bh_result->b_bdev = em->bdev; + bh_result->b_bdev = fs_info->fs_devices->latest_bdev; set_buffer_mapped(bh_result); return 0; @@ -7703,7 +7747,7 @@ skip_cow: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; - bh_result->b_bdev = em->bdev; + bh_result->b_bdev = fs_info->fs_devices->latest_bdev; set_buffer_mapped(bh_result); if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) @@ -7845,7 +7889,7 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode, if (ret) return ret; - ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, bio, mirror_num); return ret; } @@ -8198,18 +8242,14 @@ static void __endio_write_update_ordered(struct inode *inode, struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ordered_extent *ordered = NULL; struct btrfs_workqueue *wq; - btrfs_work_func_t func; u64 ordered_offset = offset; u64 ordered_bytes = bytes; u64 last_offset; - if (btrfs_is_free_space_inode(BTRFS_I(inode))) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) wq = fs_info->endio_freespace_worker; - func = btrfs_freespace_write_helper; - } else { + else wq = fs_info->endio_write_workers; - func = btrfs_endio_write_helper; - } while (ordered_offset < offset + bytes) { last_offset = ordered_offset; @@ -8217,9 +8257,8 @@ static void __endio_write_update_ordered(struct inode *inode, &ordered_offset, ordered_bytes, uptodate)) { - btrfs_init_work(&ordered->work, func, - finish_ordered_fn, - NULL, NULL); + btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, + NULL); btrfs_queue_work(wq, &ordered->work); } /* @@ -8376,7 +8415,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, goto err; } map: - ret = btrfs_map_bio(fs_info, bio, 0, 0); + ret = btrfs_map_bio(fs_info, bio, 0); err: return ret; } @@ -8709,7 +8748,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } else if (ret >= 0 && (size_t)ret < count) btrfs_delalloc_release_space(inode, data_reserved, offset, count - (size_t)ret, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), count, false); + btrfs_delalloc_release_extents(BTRFS_I(inode), count); } out: if (wakeup) @@ -9059,7 +9098,7 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state); if (!ret2) { - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); sb_end_pagefault(inode->i_sb); extent_changeset_free(data_reserved); return VM_FAULT_LOCKED; @@ -9068,7 +9107,7 @@ again: out_unlock: unlock_page(page); out: - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0)); + btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); btrfs_delalloc_release_space(inode, data_reserved, page_start, reserved_space, (ret != 0)); out_noreserve: @@ -9308,7 +9347,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->io_failure_tree.track_uptodate = true; atomic_set(&ei->sync_writers, 0); mutex_init(&ei->log_mutex); - mutex_init(&ei->delalloc_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->delalloc_inodes); INIT_LIST_HEAD(&ei->delayed_iput); @@ -9537,6 +9575,9 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_notrans; } + if (dest != root) + btrfs_record_root_in_trans(trans, dest); + /* * We need to find a free sequence number both in the source and * in the destination directory for the exchange. @@ -9731,6 +9772,18 @@ out_fail: commit_transaction = true; } if (commit_transaction) { + /* + * We may have set commit_transaction when logging the new name + * in the destination root, in which case we left the source + * root context in the list of log contextes. So make sure we + * remove it to avoid invalid memory accesses, since the context + * was allocated in our stack frame. + */ + if (sync_log_root) { + mutex_lock(&root->log_mutex); + list_del_init(&ctx_root.list); + mutex_unlock(&root->log_mutex); + } ret = btrfs_commit_transaction(trans); } else { int ret2; @@ -9744,6 +9797,9 @@ out_notrans: if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); + ASSERT(list_empty(&ctx_root.list)); + ASSERT(list_empty(&ctx_dest.list)); + return ret; } @@ -10088,8 +10144,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode init_completion(&work->completion); INIT_LIST_HEAD(&work->list); work->inode = inode; - btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, - btrfs_run_delalloc_work, NULL, NULL); + btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); return work; } @@ -10422,7 +10477,6 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, em->block_len = ins.offset; em->orig_block_len = ins.offset; em->ram_bytes = ins.offset; - em->bdev = fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PREALLOC, &em->flags); em->generation = trans->transid; @@ -10778,7 +10832,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, start = 0; while (start < isize) { u64 logical_block_start, physical_block_start; - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; u64 len = isize - start; em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index de730e56d3f5..a1ee0b775e65 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -479,10 +479,9 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg) return put_user(inode->i_generation, arg); } -static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) +static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_device *device; struct request_queue *q; struct fstrim_range range; @@ -541,7 +540,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) return 0; } -int btrfs_is_empty_uuid(u8 *uuid) +int __pure btrfs_is_empty_uuid(u8 *uuid) { int i; @@ -1360,8 +1359,7 @@ again: unlock_page(pages[i]); put_page(pages[i]); } - btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, - false); + btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); extent_changeset_free(data_reserved); return i_done; out: @@ -1372,8 +1370,7 @@ out: btrfs_delalloc_release_space(inode, data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT, true); - btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, - true); + btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); extent_changeset_free(data_reserved); return ret; @@ -1411,7 +1408,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, return -EINVAL; if (do_compress) { - if (range->compress_type > BTRFS_COMPRESS_TYPES) + if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) return -EINVAL; if (range->compress_type) compress_type = range->compress_type; @@ -2464,7 +2461,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, goto out; } - temp_inode = btrfs_iget(sb, &key2, root, NULL); + temp_inode = btrfs_iget(sb, &key2, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); goto out; @@ -4034,16 +4031,15 @@ out: static void get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space) { - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; space->total_bytes = 0; space->used_bytes = 0; space->flags = 0; list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; - space->total_bytes += block_group->key.offset; - space->used_bytes += - btrfs_block_group_used(&block_group->item); + space->total_bytes += block_group->length; + space->used_bytes += block_group->used; } } @@ -4197,9 +4193,6 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, u64 transid; int ret; - btrfs_warn(root->fs_info, - "START_SYNC ioctl is deprecated and will be removed in kernel 5.7"); - trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) @@ -4227,9 +4220,6 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, { u64 transid; - btrfs_warn(fs_info, - "WAIT_SYNC ioctl is deprecated and will be removed in kernel 5.7"); - if (argp) { if (copy_from_user(&transid, argp, sizeof(transid))) return -EFAULT; @@ -4960,10 +4950,9 @@ drop_write: return ret; } -static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_quota_rescan_args *qsa; int ret = 0; @@ -4986,11 +4975,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) return ret; } -static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) +static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -5162,10 +5149,9 @@ out: return ret; } -static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) +static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); size_t len; int ret; char label[BTRFS_LABEL_SIZE]; @@ -5249,10 +5235,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg) return 0; } -static int btrfs_ioctl_get_features(struct file *file, void __user *arg) +static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info, + void __user *arg) { - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags features; @@ -5453,11 +5438,11 @@ long btrfs_ioctl(struct file *file, unsigned int case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case FS_IOC_GETFSLABEL: - return btrfs_ioctl_get_fslabel(file, argp); + return btrfs_ioctl_get_fslabel(fs_info, argp); case FS_IOC_SETFSLABEL: return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: - return btrfs_ioctl_fitrim(file, argp); + return btrfs_ioctl_fitrim(fs_info, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: @@ -5562,15 +5547,15 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_QUOTA_RESCAN: return btrfs_ioctl_quota_rescan(file, argp); case BTRFS_IOC_QUOTA_RESCAN_STATUS: - return btrfs_ioctl_quota_rescan_status(file, argp); + return btrfs_ioctl_quota_rescan_status(fs_info, argp); case BTRFS_IOC_QUOTA_RESCAN_WAIT: - return btrfs_ioctl_quota_rescan_wait(file, argp); + return btrfs_ioctl_quota_rescan_wait(fs_info, argp); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: - return btrfs_ioctl_get_features(file, argp); + return btrfs_ioctl_get_features(fs_info, argp); case BTRFS_IOC_SET_FEATURES: return btrfs_ioctl_set_features(file, argp); case FS_IOC_FSGETXATTR: diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 7f9a578a1a20..571c4826c428 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -13,65 +13,164 @@ #include "extent_io.h" #include "locking.h" +/* + * Extent buffer locking + * ===================== + * + * The locks use a custom scheme that allows to do more operations than are + * available fromt current locking primitives. The building blocks are still + * rwlock and wait queues. + * + * Required semantics: + * + * - reader/writer exclusion + * - writer/writer exclusion + * - reader/reader sharing + * - spinning lock semantics + * - blocking lock semantics + * - try-lock semantics for readers and writers + * - one level nesting, allowing read lock to be taken by the same thread that + * already has write lock + * + * The extent buffer locks (also called tree locks) manage access to eb data + * related to the storage in the b-tree (keys, items, but not the individual + * members of eb). + * We want concurrency of many readers and safe updates. The underlying locking + * is done by read-write spinlock and the blocking part is implemented using + * counters and wait queues. + * + * spinning semantics - the low-level rwlock is held so all other threads that + * want to take it are spinning on it. + * + * blocking semantics - the low-level rwlock is not held but the counter + * denotes how many times the blocking lock was held; + * sleeping is possible + * + * Write lock always allows only one thread to access the data. + * + * + * Debugging + * --------- + * + * There are additional state counters that are asserted in various contexts, + * removed from non-debug build to reduce extent_buffer size and for + * performance reasons. + * + * + * Lock nesting + * ------------ + * + * A write operation on a tree might indirectly start a look up on the same + * tree. This can happen when btrfs_cow_block locks the tree and needs to + * lookup free extents. + * + * btrfs_cow_block + * .. + * alloc_tree_block_no_bg_flush + * btrfs_alloc_tree_block + * btrfs_reserve_extent + * .. + * load_free_space_cache + * .. + * btrfs_lookup_file_extent + * btrfs_search_slot + * + * + * Locking pattern - spinning + * -------------------------- + * + * The simple locking scenario, the +--+ denotes the spinning section. + * + * +- btrfs_tree_lock + * | - extent_buffer::rwlock is held + * | - no heavy operations should happen, eg. IO, memory allocations, large + * | structure traversals + * +- btrfs_tree_unock +* +* + * Locking pattern - blocking + * -------------------------- + * + * The blocking write uses the following scheme. The +--+ denotes the spinning + * section. + * + * +- btrfs_tree_lock + * | + * +- btrfs_set_lock_blocking_write + * + * - allowed: IO, memory allocations, etc. + * + * -- btrfs_tree_unlock - note, no explicit unblocking necessary + * + * + * Blocking read is similar. + * + * +- btrfs_tree_read_lock + * | + * +- btrfs_set_lock_blocking_read + * + * - heavy operations allowed + * + * +- btrfs_tree_read_unlock_blocking + * | + * +- btrfs_tree_read_unlock + * + */ + #ifdef CONFIG_BTRFS_DEBUG -static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers); eb->spinning_writers++; } -static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers != 1); eb->spinning_writers--; } -static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) +static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { WARN_ON(eb->spinning_writers); } -static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { atomic_inc(&eb->spinning_readers); } -static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) +static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { WARN_ON(atomic_read(&eb->spinning_readers) == 0); atomic_dec(&eb->spinning_readers); } -static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { atomic_inc(&eb->read_locks); } -static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { atomic_dec(&eb->read_locks); } -static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) +static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { BUG_ON(!atomic_read(&eb->read_locks)); } -static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) +static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { eb->write_locks++; } -static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) +static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { eb->write_locks--; } -void btrfs_assert_tree_locked(struct extent_buffer *eb) -{ - BUG_ON(!eb->write_locks); -} - #else static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { } static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { } @@ -81,11 +180,19 @@ static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { } -void btrfs_assert_tree_locked(struct extent_buffer *eb) { } static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { } static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { } #endif +/* + * Mark already held read lock as blocking. Can be nested in write lock by the + * same thread. + * + * Use when there are potentially long operations ahead so other thread waiting + * on the lock will not actively spin but sleep instead. + * + * The rwlock is released and blocking reader counter is increased. + */ void btrfs_set_lock_blocking_read(struct extent_buffer *eb) { trace_btrfs_set_lock_blocking_read(eb); @@ -102,6 +209,14 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb) read_unlock(&eb->lock); } +/* + * Mark already held write lock as blocking. + * + * Use when there are potentially long operations ahead so other threads + * waiting on the lock will not actively spin but sleep instead. + * + * The rwlock is released and blocking writers is set. + */ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) { trace_btrfs_set_lock_blocking_write(eb); @@ -115,14 +230,19 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) if (eb->blocking_writers == 0) { btrfs_assert_spinning_writers_put(eb); btrfs_assert_tree_locked(eb); - eb->blocking_writers++; + WRITE_ONCE(eb->blocking_writers, 1); write_unlock(&eb->lock); } } /* - * take a spinning read lock. This will wait for any blocking - * writers + * Lock the extent buffer for read. Wait for any writers (spinning or blocking). + * Can be nested in write lock by the same thread. + * + * Use when the locked section does only lightweight actions and busy waiting + * would be cheaper than making other threads do the wait/wake loop. + * + * The rwlock is held upon exit. */ void btrfs_tree_read_lock(struct extent_buffer *eb) { @@ -134,23 +254,24 @@ again: read_lock(&eb->lock); BUG_ON(eb->blocking_writers == 0 && current->pid == eb->lock_owner); - if (eb->blocking_writers && current->pid == eb->lock_owner) { - /* - * This extent is already write-locked by our thread. We allow - * an additional read lock to be added because it's for the same - * thread. btrfs_find_all_roots() depends on this as it may be - * called on a partly (write-)locked tree. - */ - BUG_ON(eb->lock_nested); - eb->lock_nested = true; - read_unlock(&eb->lock); - trace_btrfs_tree_read_lock(eb, start_ns); - return; - } if (eb->blocking_writers) { + if (current->pid == eb->lock_owner) { + /* + * This extent is already write-locked by our thread. + * We allow an additional read lock to be added because + * it's for the same thread. btrfs_find_all_roots() + * depends on this as it may be called on a partly + * (write-)locked tree. + */ + BUG_ON(eb->lock_nested); + eb->lock_nested = true; + read_unlock(&eb->lock); + trace_btrfs_tree_read_lock(eb, start_ns); + return; + } read_unlock(&eb->lock); wait_event(eb->write_lock_wq, - eb->blocking_writers == 0); + READ_ONCE(eb->blocking_writers) == 0); goto again; } btrfs_assert_tree_read_locks_get(eb); @@ -159,17 +280,19 @@ again: } /* - * take a spinning read lock. - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers + * Lock extent buffer for read, optimistically expecting that there are no + * contending blocking writers. If there are, don't wait. + * + * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) { - if (eb->blocking_writers) + if (READ_ONCE(eb->blocking_writers)) return 0; read_lock(&eb->lock); - if (eb->blocking_writers) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers)) { read_unlock(&eb->lock); return 0; } @@ -180,18 +303,20 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) } /* - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers + * Try-lock for read. Don't block or wait for contending writers. + * + * Retrun 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_read_lock(struct extent_buffer *eb) { - if (eb->blocking_writers) + if (READ_ONCE(eb->blocking_writers)) return 0; if (!read_trylock(&eb->lock)) return 0; - if (eb->blocking_writers) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers)) { read_unlock(&eb->lock); return 0; } @@ -202,16 +327,19 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb) } /* - * returns 1 if we get the read lock and 0 if we don't - * this won't wait for blocking writers or readers + * Try-lock for write. May block until the lock is uncontended, but does not + * wait until it is free. + * + * Retrun 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { - if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) + if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) return 0; write_lock(&eb->lock); - if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) { + /* Refetch value after lock */ + if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); return 0; } @@ -223,7 +351,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) } /* - * drop a spinning read lock + * Release read lock. Must be used only if the lock is in spinning mode. If + * the read lock is nested, must pair with read lock before the write unlock. + * + * The rwlock is not held upon exit. */ void btrfs_tree_read_unlock(struct extent_buffer *eb) { @@ -245,7 +376,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) } /* - * drop a blocking read lock + * Release read lock, previously set to blocking by a pairing call to + * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same + * thread. + * + * State of rwlock is unchanged, last reader wakes waiting threads. */ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) { @@ -269,8 +404,10 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) } /* - * take a spinning write lock. This will wait for both - * blocking readers or writers + * Lock for write. Wait for all blocking and spinning readers and writers. This + * starts context where reader lock could be nested by the same thread. + * + * The rwlock is held for write upon exit. */ void btrfs_tree_lock(struct extent_buffer *eb) { @@ -282,9 +419,11 @@ void btrfs_tree_lock(struct extent_buffer *eb) WARN_ON(eb->lock_owner == current->pid); again: wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); - wait_event(eb->write_lock_wq, eb->blocking_writers == 0); + wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0); write_lock(&eb->lock); - if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) { + /* Refetch value after lock */ + if (atomic_read(&eb->blocking_readers) || + READ_ONCE(eb->blocking_writers)) { write_unlock(&eb->lock); goto again; } @@ -295,10 +434,19 @@ again: } /* - * drop a spinning or a blocking write lock. + * Release the write lock, either blocking or spinning (ie. there's no need + * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking). + * This also ends the context for nesting, the read lock must have been + * released already. + * + * Tasks blocked and waiting are woken, rwlock is not held upon exit. */ void btrfs_tree_unlock(struct extent_buffer *eb) { + /* + * This is read both locked and unlocked but always by the same thread + * that already owns the lock so we don't need to use READ_ONCE + */ int blockers = eb->blocking_writers; BUG_ON(blockers > 1); @@ -310,7 +458,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb) if (blockers) { btrfs_assert_no_spinning_writers(eb); - eb->blocking_writers--; + /* Unlocked write */ + WRITE_ONCE(eb->blocking_writers, 0); /* * We need to order modifying blocking_writers above with * actually waking up the sleepers to ensure they see the @@ -322,3 +471,55 @@ void btrfs_tree_unlock(struct extent_buffer *eb) write_unlock(&eb->lock); } } + +/* + * Set all locked nodes in the path to blocking locks. This should be done + * before scheduling + */ +void btrfs_set_path_blocking(struct btrfs_path *p) +{ + int i; + + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + if (!p->nodes[i] || !p->locks[i]) + continue; + /* + * If we currently have a spinning reader or writer lock this + * will bump the count of blocking holders and drop the + * spinlock. + */ + if (p->locks[i] == BTRFS_READ_LOCK) { + btrfs_set_lock_blocking_read(p->nodes[i]); + p->locks[i] = BTRFS_READ_LOCK_BLOCKING; + } else if (p->locks[i] == BTRFS_WRITE_LOCK) { + btrfs_set_lock_blocking_write(p->nodes[i]); + p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; + } + } +} + +/* + * This releases any locks held in the path starting at level and going all the + * way up to the root. + * + * btrfs_search_slot will keep the lock held on higher nodes in a few corner + * cases, such as COW of the block at slot zero in the node. This ignores + * those rules, and it should only be called when there are no more updates to + * be done higher up in the tree. + */ +void btrfs_unlock_up_safe(struct btrfs_path *path, int level) +{ + int i; + + if (path->keep_locks) + return; + + for (i = level; i < BTRFS_MAX_LEVEL; i++) { + if (!path->nodes[i]) + continue; + if (!path->locks[i]) + continue; + btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); + path->locks[i] = 0; + } +} diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b775a4207ed9..21a285883e89 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -6,6 +6,8 @@ #ifndef BTRFS_LOCKING_H #define BTRFS_LOCKING_H +#include "extent_io.h" + #define BTRFS_WRITE_LOCK 1 #define BTRFS_READ_LOCK 2 #define BTRFS_WRITE_LOCK_BLOCKING 3 @@ -19,11 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb); void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); void btrfs_set_lock_blocking_read(struct extent_buffer *eb); void btrfs_set_lock_blocking_write(struct extent_buffer *eb); -void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); +#ifdef CONFIG_BTRFS_DEBUG +static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { + BUG_ON(!eb->write_locks); +} +#else +static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } +#endif + +void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_unlock_up_safe(struct btrfs_path *path, int level); static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index acad4174f68d..aa9cd11f4b78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -63,27 +63,7 @@ struct workspace { static struct workspace_manager wsm; -static void lzo_init_workspace_manager(void) -{ - btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress); -} - -static void lzo_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(&wsm); -} - -static struct list_head *lzo_get_workspace(unsigned int level) -{ - return btrfs_get_workspace(&wsm, level); -} - -static void lzo_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&wsm, ws); -} - -static void lzo_free_workspace(struct list_head *ws) +void lzo_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -93,7 +73,7 @@ static void lzo_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *lzo_alloc_workspace(unsigned int level) +struct list_head *lzo_alloc_workspace(unsigned int level) { struct workspace *workspace; @@ -131,13 +111,9 @@ static inline size_t read_compress_length(const char *buf) return le32_to_cpu(dlen); } -static int lzo_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -303,7 +279,7 @@ out: return ret; } -static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0, ret2; @@ -444,10 +420,9 @@ done: return ret; } -static int lzo_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); size_t in_len; @@ -508,15 +483,7 @@ out: } const struct btrfs_compress_op btrfs_lzo_compress = { - .init_workspace_manager = lzo_init_workspace_manager, - .cleanup_workspace_manager = lzo_cleanup_workspace_manager, - .get_workspace = lzo_get_workspace, - .put_workspace = lzo_put_workspace, - .alloc_workspace = lzo_alloc_workspace, - .free_workspace = lzo_free_workspace, - .compress_pages = lzo_compress_pages, - .decompress_bio = lzo_decompress_bio, - .decompress = lzo_decompress, + .workspace_manager = &wsm, .max_level = 1, .default_level = 1, }; diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index 7d564924dfeb..72bab64ecf60 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -47,4 +47,15 @@ static inline u64 div_factor_fine(u64 num, int factor) return div_u64(num, 100); } +/* Copy of is_power_of_two that is 64bit safe */ +static inline bool is_power_of_two_u64(u64 n) +{ + return n != 0 && (n & (n - 1)) == 0; +} + +static inline bool has_single_bit_set(u64 n) +{ + return is_power_of_two_u64(n); +} + #endif diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 24b6c72b9a59..fb09bc2f8e4d 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, spin_unlock(&root->ordered_extent_lock); btrfs_init_work(&ordered->flush_work, - btrfs_flush_delalloc_helper, btrfs_run_ordered_extent_work, NULL, NULL); list_add_tail(&ordered->work_list, &works); btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); @@ -573,12 +572,11 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, return count; } -u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, const u64 range_start, const u64 range_len) { struct btrfs_root *root; struct list_head splice; - u64 total_done = 0; u64 done; INIT_LIST_HEAD(&splice); @@ -598,7 +596,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, done = btrfs_wait_ordered_extents(root, nr, range_start, range_len); btrfs_put_fs_root(root); - total_done += done; spin_lock(&fs_info->ordered_root_lock); if (nr != U64_MAX) { @@ -608,8 +605,6 @@ u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, list_splice_tail(&splice, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); mutex_unlock(&fs_info->ordered_operations_mutex); - - return total_done; } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 5204171ea962..4eb0319a86d7 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -186,7 +186,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u8 *sum, int len); u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, const u64 range_start, const u64 range_len); -u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, const u64 range_start, const u64 range_len); void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, struct btrfs_inode *inode, u64 start, diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 9cb50577d982..873b6b694107 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -266,9 +266,9 @@ void btrfs_print_leaf(struct extent_buffer *l) struct btrfs_block_group_item); pr_info( "\t\tblock group used %llu chunk_objectid %llu flags %llu\n", - btrfs_disk_block_group_used(l, bi), - btrfs_disk_block_group_chunk_objectid(l, bi), - btrfs_disk_block_group_flags(l, bi)); + btrfs_block_group_used(l, bi), + btrfs_block_group_chunk_objectid(l, bi), + btrfs_block_group_flags(l, bi)); break; case BTRFS_CHUNK_ITEM_KEY: print_chunk(l, btrfs_item_ptr(l, i, diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index 1e664e0b59b8..deb59e7cfcac 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -416,11 +416,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - parent_inode = btrfs_iget(sb, &key, parent_root, NULL); + parent_inode = btrfs_iget(sb, &key, parent_root); if (IS_ERR(parent_inode)) return PTR_ERR(parent_inode); - child_inode = btrfs_iget(sb, &key, root, NULL); + child_inode = btrfs_iget(sb, &key, root); if (IS_ERR(child_inode)) { iput(parent_inode); return PTR_ERR(child_inode); @@ -437,8 +437,6 @@ void __init btrfs_props_init(void) { int i; - hash_init(prop_handlers_ht); - for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) { struct prop_handler *p = &prop_handlers[i]; u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name)); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index c4bb69941c77..93aeb2e539a4 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1811,7 +1811,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); /* For src_path */ - extent_buffer_get(src_eb); + atomic_inc(&src_eb->refs); src_path->nodes[root_level] = src_eb; src_path->slots[root_level] = dst_path->slots[root_level]; src_path->locks[root_level] = 0; @@ -2067,7 +2067,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, goto out; } /* For dst_path */ - extent_buffer_get(dst_eb); + atomic_inc(&dst_eb->refs); dst_path->nodes[level] = dst_eb; dst_path->slots[level] = 0; dst_path->locks[level] = 0; @@ -2126,7 +2126,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, * walk back up the tree (adjusting slot pointers as we go) * and restart the search process. */ - extent_buffer_get(root_eb); /* For path */ + atomic_inc(&root_eb->refs); /* For path */ path->nodes[root_level] = root_eb; path->slots[root_level] = 0; path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ @@ -3277,10 +3277,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); - memset(&fs_info->qgroup_rescan_work, 0, - sizeof(fs_info->qgroup_rescan_work)); btrfs_init_work(&fs_info->qgroup_rescan_work, - btrfs_qgroup_rescan_helper, btrfs_qgroup_rescan_worker, NULL, NULL); return 0; } @@ -3629,7 +3626,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - trace_qgroup_meta_reserve(root, type, (s64)num_bytes); + trace_qgroup_meta_reserve(root, (s64)num_bytes, type); ret = qgroup_reserve(root, num_bytes, enforce, type); if (ret < 0) return ret; @@ -3676,7 +3673,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, */ num_bytes = sub_root_meta_rsv(root, num_bytes, type); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - trace_qgroup_meta_reserve(root, type, -(s64)num_bytes); + trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, num_bytes, type); } @@ -3826,7 +3823,7 @@ out: */ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *subvol_root, - struct btrfs_block_group_cache *bg, + struct btrfs_block_group *bg, struct extent_buffer *subvol_parent, int subvol_slot, struct extent_buffer *reloc_parent, int reloc_slot, u64 last_snapshot) diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 46ba7bd2961c..236f12224d52 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -408,7 +408,7 @@ void btrfs_qgroup_init_swapped_blocks( void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root); int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *subvol_root, - struct btrfs_block_group_cache *bg, + struct btrfs_block_group *bg, struct extent_buffer *subvol_parent, int subvol_slot, struct extent_buffer *reloc_parent, int reloc_slot, u64 last_snapshot); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 57a2ac721985..a8e53c8e7b01 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work); static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) { - btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); + btrfs_init_work(&rbio->work, work_func, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); } @@ -671,8 +671,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) */ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) { - int bucket = rbio_bucket(rbio); - struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; + struct btrfs_stripe_hash *h; struct btrfs_raid_bio *cur; struct btrfs_raid_bio *pending; unsigned long flags; @@ -680,64 +679,63 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) struct btrfs_raid_bio *cache_drop = NULL; int ret = 0; + h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio); + spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { - if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { - spin_lock(&cur->bio_list_lock); - - /* can we steal this cached rbio's pages? */ - if (bio_list_empty(&cur->bio_list) && - list_empty(&cur->plug_list) && - test_bit(RBIO_CACHE_BIT, &cur->flags) && - !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { - list_del_init(&cur->hash_list); - refcount_dec(&cur->refs); - - steal_rbio(cur, rbio); - cache_drop = cur; - spin_unlock(&cur->bio_list_lock); + if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) + continue; - goto lockit; - } + spin_lock(&cur->bio_list_lock); - /* can we merge into the lock owner? */ - if (rbio_can_merge(cur, rbio)) { - merge_rbio(cur, rbio); - spin_unlock(&cur->bio_list_lock); - freeit = rbio; - ret = 1; - goto out; - } + /* Can we steal this cached rbio's pages? */ + if (bio_list_empty(&cur->bio_list) && + list_empty(&cur->plug_list) && + test_bit(RBIO_CACHE_BIT, &cur->flags) && + !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { + list_del_init(&cur->hash_list); + refcount_dec(&cur->refs); + steal_rbio(cur, rbio); + cache_drop = cur; + spin_unlock(&cur->bio_list_lock); - /* - * we couldn't merge with the running - * rbio, see if we can merge with the - * pending ones. We don't have to - * check for rmw_locked because there - * is no way they are inside finish_rmw - * right now - */ - list_for_each_entry(pending, &cur->plug_list, - plug_list) { - if (rbio_can_merge(pending, rbio)) { - merge_rbio(pending, rbio); - spin_unlock(&cur->bio_list_lock); - freeit = rbio; - ret = 1; - goto out; - } - } + goto lockit; + } - /* no merging, put us on the tail of the plug list, - * our rbio will be started with the currently - * running rbio unlocks - */ - list_add_tail(&rbio->plug_list, &cur->plug_list); + /* Can we merge into the lock owner? */ + if (rbio_can_merge(cur, rbio)) { + merge_rbio(cur, rbio); spin_unlock(&cur->bio_list_lock); + freeit = rbio; ret = 1; goto out; } + + + /* + * We couldn't merge with the running rbio, see if we can merge + * with the pending ones. We don't have to check for rmw_locked + * because there is no way they are inside finish_rmw right now + */ + list_for_each_entry(pending, &cur->plug_list, plug_list) { + if (rbio_can_merge(pending, rbio)) { + merge_rbio(pending, rbio); + spin_unlock(&cur->bio_list_lock); + freeit = rbio; + ret = 1; + goto out; + } + } + + /* + * No merging, put us on the tail of the plug list, our rbio + * will be started with the currently running rbio unlocks + */ + list_add_tail(&rbio->plug_list, &cur->plug_list); + spin_unlock(&cur->bio_list_lock); + ret = 1; + goto out; } lockit: refcount_inc(&rbio->refs); @@ -1743,8 +1741,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) plug = container_of(cb, struct btrfs_plug_cb, cb); if (from_schedule) { - btrfs_init_work(&plug->work, btrfs_rmw_helper, - unplug_work, NULL, NULL); + btrfs_init_work(&plug->work, unplug_work, NULL, NULL); btrfs_queue_work(plug->info->rmw_workers, &plug->work); return; diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index ee6f60547a8d..243a2e44526e 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -227,7 +227,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, struct btrfs_fs_info *fs_info = dev->fs_info; int ret; struct reada_zone *zone; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; u64 start; u64 end; int i; @@ -248,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, if (!cache) return NULL; - start = cache->key.objectid; - end = start + cache->key.offset - 1; + start = cache->start; + end = start + cache->length - 1; btrfs_put_block_group(cache); zone = kzalloc(sizeof(*zone), GFP_KERNEL); @@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev) static void reada_start_machine_worker(struct btrfs_work *work) { struct reada_machine_work *rmw; - struct btrfs_fs_info *fs_info; int old_ioprio; rmw = container_of(work, struct reada_machine_work, work); - fs_info = rmw->fs_info; - - kfree(rmw); old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), task_nice_ioprio(current)); set_task_ioprio(current, BTRFS_IOPRIO_READA); - __reada_start_machine(fs_info); + __reada_start_machine(rmw->fs_info); set_task_ioprio(current, old_ioprio); - atomic_dec(&fs_info->reada_works_cnt); + atomic_dec(&rmw->fs_info->reada_works_cnt); + + kfree(rmw); } static void __reada_start_machine(struct btrfs_fs_info *fs_info) @@ -821,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) /* FIXME we cannot handle this properly right now */ BUG(); } - btrfs_init_work(&rmw->work, btrfs_readahead_helper, - reada_start_machine_worker, NULL, NULL); + btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); rmw->fs_info = fs_info; btrfs_queue_work(fs_info->readahead_workers, &rmw->work); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 00504657b602..d897a8e5e430 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -147,7 +147,7 @@ struct file_extent_cluster { struct reloc_control { /* block group to relocate */ - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; /* extent tree */ struct btrfs_root *extent_root; /* inode for moving data */ @@ -1560,11 +1560,10 @@ again: return NULL; } -static int in_block_group(u64 bytenr, - struct btrfs_block_group_cache *block_group) +static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group) { - if (bytenr >= block_group->key.objectid && - bytenr < block_group->key.objectid + block_group->key.offset) + if (bytenr >= block_group->start && + bytenr < block_group->start + block_group->length) return 1; return 0; } @@ -2246,7 +2245,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_root_level(root_item); - extent_buffer_get(reloc_root->node); + atomic_inc(&reloc_root->node->refs); path->nodes[level] = reloc_root->node; path->slots[level] = 0; } else { @@ -3195,7 +3194,6 @@ static noinline_for_stack int setup_extent_mapping(struct inode *inode, u64 start, u64 end, u64 block_start) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; int ret = 0; @@ -3208,7 +3206,6 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, em->len = end + 1 - start; em->block_len = em->len; em->block_start = block_start; - em->bdev = fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); lock_extent(&BTRFS_I(inode)->io_tree, start, end); @@ -3277,6 +3274,8 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!page) { btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), + PAGE_SIZE); ret = -ENOMEM; goto out; } @@ -3297,7 +3296,7 @@ static int relocate_file_extent_cluster(struct inode *inode, btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE, true); btrfs_delalloc_release_extents(BTRFS_I(inode), - PAGE_SIZE, true); + PAGE_SIZE); ret = -EIO; goto out; } @@ -3326,7 +3325,7 @@ static int relocate_file_extent_cluster(struct inode *inode, btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE, true); btrfs_delalloc_release_extents(BTRFS_I(inode), - PAGE_SIZE, true); + PAGE_SIZE); clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, @@ -3342,8 +3341,7 @@ static int relocate_file_extent_cluster(struct inode *inode, put_page(page); index++; - btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, - false); + btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); balance_dirty_pages_ratelimited(inode->i_mapping); btrfs_throttle(fs_info); } @@ -3543,7 +3541,7 @@ static int block_use_full_backref(struct reloc_control *rc, } static int delete_block_group_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group, + struct btrfs_block_group *block_group, struct inode *inode, u64 ino) { @@ -3559,7 +3557,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) return -ENOENT; @@ -3862,7 +3860,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, u64 start, end, last; int ret; - last = rc->block_group->key.objectid + rc->block_group->key.offset; + last = rc->block_group->start + rc->block_group->length; while (1) { cond_resched(); if (rc->search_start >= last) { @@ -3979,7 +3977,7 @@ int prepare_to_relocate(struct reloc_control *rc) return -ENOMEM; memset(&rc->cluster, 0, sizeof(rc->cluster)); - rc->search_start = rc->block_group->key.objectid; + rc->search_start = rc->block_group->start; rc->extents_found = 0; rc->nodes_relocated = 0; rc->merging_rsv_size = 0; @@ -4218,7 +4216,7 @@ out: */ static noinline_for_stack struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *group) + struct btrfs_block_group *group) { struct inode *inode = NULL; struct btrfs_trans_handle *trans; @@ -4245,9 +4243,9 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); BUG_ON(IS_ERR(inode)); - BTRFS_I(inode)->index_cnt = group->key.objectid; + BTRFS_I(inode)->index_cnt = group->start; err = btrfs_orphan_add(trans, BTRFS_I(inode)); out: @@ -4282,7 +4280,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) * Print the block group being relocated */ static void describe_relocation(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) + struct btrfs_block_group *block_group) { char buf[128] = {'\0'}; @@ -4290,7 +4288,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info, btrfs_info(fs_info, "relocating block group %llu flags %s", - block_group->key.objectid, buf); + block_group->start, buf); } /* @@ -4298,7 +4296,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info, */ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) { - struct btrfs_block_group_cache *bg; + struct btrfs_block_group *bg; struct btrfs_root *extent_root = fs_info->extent_root; struct reloc_control *rc; struct inode *inode; @@ -4325,7 +4323,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) rc->extent_root = extent_root; rc->block_group = bg; - ret = btrfs_inc_block_group_ro(rc->block_group); + ret = btrfs_inc_block_group_ro(rc->block_group, true); if (ret) { err = ret; goto out; @@ -4363,8 +4361,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_nocow_writers(rc->block_group); btrfs_wait_ordered_roots(fs_info, U64_MAX, - rc->block_group->key.objectid, - rc->block_group->key.offset); + rc->block_group->start, + rc->block_group->length); while (1) { mutex_lock(&fs_info->cleaner_mutex); @@ -4404,7 +4402,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) WARN_ON(rc->block_group->pinned > 0); WARN_ON(rc->block_group->reserved > 0); - WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); + WARN_ON(rc->block_group->used > 0); out: if (err && rw) btrfs_dec_block_group_ro(rc->block_group); @@ -4687,7 +4685,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, node->new_bytenr != buf->start); drop_node_buffer(node); - extent_buffer_get(cow); + atomic_inc(&cow->refs); node->eb = cow; node->new_bytenr = cow->start; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f7d4e03f4c5d..21de630b0730 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -389,8 +389,7 @@ static struct full_stripe_lock *search_full_stripe_lock( * * Caller must ensure @cache is a RAID56 block group. */ -static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, - u64 bytenr) +static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr) { u64 ret; @@ -404,8 +403,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, * round_down() can only handle power of 2, while RAID56 full * stripe length can be 64KiB * n, so we need to manually round down. */ - ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * - cache->full_stripe_len + cache->key.objectid; + ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * + cache->full_stripe_len + cache->start; return ret; } @@ -423,7 +422,7 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool *locked_ret) { - struct btrfs_block_group_cache *bg_cache; + struct btrfs_block_group *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *existing; u64 fstripe_start; @@ -470,7 +469,7 @@ out: static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, bool locked) { - struct btrfs_block_group_cache *bg_cache; + struct btrfs_block_group *bg_cache; struct btrfs_full_stripe_locks_tree *locks_root; struct full_stripe_lock *fstripe_lock; u64 fstripe_start; @@ -598,8 +597,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( sbio->index = i; sbio->sctx = sctx; sbio->page_count = 0; - btrfs_init_work(&sbio->work, btrfs_scrub_helper, - scrub_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, + NULL); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; @@ -1720,8 +1719,7 @@ static void scrub_wr_bio_end_io(struct bio *bio) sbio->status = bio->bi_status; sbio->bio = bio; - btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, - scrub_wr_bio_end_io_worker, NULL, NULL); + btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } @@ -2149,14 +2147,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work) scrub_write_block_to_dev_replace(sblock); } - scrub_block_put(sblock); - if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); } + scrub_block_put(sblock); scrub_pending_bio_dec(sctx); } @@ -2204,8 +2201,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) raid56_add_scrub_pages(rbio, spage->page, spage->logical); } - btrfs_init_work(&sblock->work, btrfs_scrub_helper, - scrub_missing_raid56_worker, NULL, NULL); + btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); @@ -2743,8 +2739,8 @@ static void scrub_parity_bio_endio(struct bio *bio) bio_put(bio); - btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, - scrub_parity_bio_endio_worker, NULL, NULL); + btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, + NULL); btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); } @@ -3420,7 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 chunk_offset, u64 length, u64 dev_offset, - struct btrfs_block_group_cache *cache) + struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct extent_map_tree *map_tree = &fs_info->mapping_tree; @@ -3484,7 +3480,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); @@ -3563,46 +3559,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); - ret = btrfs_inc_block_group_ro(cache); - if (!ret && sctx->is_dev_replace) { - /* - * If we are doing a device replace wait for any tasks - * that started delalloc right before we set the block - * group to RO mode, as they might have just allocated - * an extent from it or decided they could do a nocow - * write. And if any such tasks did that, wait for their - * ordered extents to complete and then commit the - * current transaction, so that we can later see the new - * extent items in the extent tree - the ordered extents - * create delayed data references (for cow writes) when - * they complete, which will be run and insert the - * corresponding extent items into the extent tree when - * we commit the transaction they used when running - * inode.c:btrfs_finish_ordered_io(). We later use - * the commit root of the extent tree to find extents - * to copy from the srcdev into the tgtdev, and we don't - * want to miss any new extents. - */ - btrfs_wait_block_group_reservations(cache); - btrfs_wait_nocow_writers(cache); - ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, - cache->key.objectid, - cache->key.offset); - if (ret > 0) { - struct btrfs_trans_handle *trans; - - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - ret = PTR_ERR(trans); - else - ret = btrfs_commit_transaction(trans); - if (ret) { - scrub_pause_off(fs_info); - btrfs_put_block_group(cache); - break; - } - } - } + + /* + * Don't do chunk preallocation for scrub. + * + * This is especially important for SYSTEM bgs, or we can hit + * -EFBIG from btrfs_finish_chunk_alloc() like: + * 1. The only SYSTEM bg is marked RO. + * Since SYSTEM bg is small, that's pretty common. + * 2. New SYSTEM bg will be allocated + * Due to regular version will allocate new chunk. + * 3. New SYSTEM bg is empty and will get cleaned up + * Before cleanup really happens, it's marked RO again. + * 4. Empty SYSTEM bg get scrubbed + * We go back to 2. + * + * This can easily boost the amount of SYSTEM chunks if cleaner + * thread can't be triggered fast enough, and use up all space + * of btrfs_super_block::sys_chunk_array + */ + ret = btrfs_inc_block_group_ro(cache, false); scrub_pause_off(fs_info); if (ret == 0) { @@ -3623,7 +3599,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, break; } - down_write(&fs_info->dev_replace.rwsem); + down_write(&dev_replace->rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; @@ -3664,10 +3640,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); - down_write(&fs_info->dev_replace.rwsem); + down_write(&dev_replace->rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; - up_write(&fs_info->dev_replace.rwsem); + up_write(&dev_replace->rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); @@ -3681,7 +3657,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, */ spin_lock(&cache->lock); if (!cache->removed && !cache->ro && cache->reserved == 0 && - btrfs_block_group_used(&cache->item) == 0) { + cache->used == 0) { spin_unlock(&cache->lock); btrfs_mark_bg_unused(cache); } else { diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 123ac54af071..ae2db5eb1549 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -25,6 +25,14 @@ #include "compression.h" /* + * Maximum number of references an extent can have in order for us to attempt to + * issue clone operations instead of write operations. This currently exists to + * avoid hitting limitations of the backreference walking code (taking a lot of + * time and using too much memory for extents with large number of references). + */ +#define SEND_MAX_EXTENT_REFS 64 + +/* * A fs_path is a helper to dynamically build path names with unknown size. * It reallocates the internal buffer on demand. * It allows fast adding of path elements on the right side (normal path) and @@ -1248,12 +1256,20 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) */ if (found->root == bctx->sctx->send_root) { /* - * TODO for the moment we don't accept clones from the inode - * that is currently send. We may change this when - * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same - * file. + * If the source inode was not yet processed we can't issue a + * clone operation, as the source extent does not exist yet at + * the destination of the stream. */ - if (ino >= bctx->cur_objectid) + if (ino > bctx->cur_objectid) + return 0; + /* + * We clone from the inode currently being sent as long as the + * source extent is already processed, otherwise we could try + * to clone from an extent that does not exist yet at the + * destination of the stream. + */ + if (ino == bctx->cur_objectid && + offset >= bctx->sctx->cur_inode_next_write_offset) return 0; } @@ -1302,6 +1318,7 @@ static int find_extent_clone(struct send_ctx *sctx, struct clone_root *cur_clone_root; struct btrfs_key found_key; struct btrfs_path *tmp_path; + struct btrfs_extent_item *ei; int compressed; u32 i; @@ -1349,7 +1366,6 @@ static int find_extent_clone(struct send_ctx *sctx, ret = extent_from_logical(fs_info, disk_byte, tmp_path, &found_key, &flags); up_read(&fs_info->commit_root_sem); - btrfs_release_path(tmp_path); if (ret < 0) goto out; @@ -1358,6 +1374,21 @@ static int find_extent_clone(struct send_ctx *sctx, goto out; } + ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0], + struct btrfs_extent_item); + /* + * Backreference walking (iterate_extent_inodes() below) is currently + * too expensive when an extent has a large number of references, both + * in time spent and used memory. So for now just fallback to write + * operations instead of clone operations when an extent has more than + * a certain amount of references. + */ + if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) { + ret = -ENOENT; + goto out; + } + btrfs_release_path(tmp_path); + /* * Setup the clone roots. */ @@ -4779,7 +4810,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) return PTR_ERR(inode); diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 98dc092a905e..f09aa6ee9113 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -10,7 +10,7 @@ #include "transaction.h" #include "block-group.h" -u64 btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, bool may_use_included) { ASSERT(s_info); @@ -58,7 +58,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) spin_lock_init(&space_info->lock); space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; - init_waitqueue_head(&space_info->wait); INIT_LIST_HEAD(&space_info->ro_bgs); INIT_LIST_HEAD(&space_info->tickets); INIT_LIST_HEAD(&space_info->priority_tickets); @@ -285,7 +284,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, int dump_block_groups) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; int index = 0; spin_lock(&info->lock); @@ -301,8 +300,7 @@ again: spin_lock(&cache->lock); btrfs_info(fs_info, "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", - cache->key.objectid, cache->key.offset, - btrfs_block_group_used(&cache->item), cache->pinned, + cache->start, cache->length, cache->used, cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); @@ -893,6 +891,15 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, while (ticket->bytes > 0 && ticket->error == 0) { ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); if (ret) { + /* + * Delete us from the list. After we unlock the space + * info, we don't want the async reclaim job to reserve + * space for this ticket. If that would happen, then the + * ticket's task would not known that space was reserved + * despite getting an error, resulting in a space leak + * (bytes_may_use counter of our space_info). + */ + list_del_init(&ticket->list); ticket->error = -EINTR; break; } @@ -945,12 +952,24 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, spin_lock(&space_info->lock); ret = ticket->error; if (ticket->bytes || ticket->error) { + /* + * Need to delete here for priority tickets. For regular tickets + * either the async reclaim job deletes the ticket from the list + * or we delete it ourselves at wait_reserve_ticket(). + */ list_del_init(&ticket->list); if (!ret) ret = -ENOSPC; } spin_unlock(&space_info->lock); ASSERT(list_empty(&ticket->list)); + /* + * Check that we can't have an error set if the reservation succeeded, + * as that would confuse tasks and lead them to error out without + * releasing reserved space (if an error happens the expectation is that + * space wasn't reserved at all). + */ + ASSERT(!(ticket->bytes == 0 && ticket->error)); return ret; } diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 8867e84aa33d..1a349e3f9cc1 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -63,7 +63,6 @@ struct btrfs_space_info { struct rw_semaphore groups_sem; /* for block groups in our same type */ struct list_head block_groups[BTRFS_NR_RAID_TYPES]; - wait_queue_head_t wait; struct kobject kobj; struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; @@ -116,7 +115,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, struct btrfs_space_info **space_info); struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, u64 flags); -u64 btrfs_space_info_used(struct btrfs_space_info *s_info, +u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, bool may_use_included); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1b151af25772..a98c3c71fc54 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -66,7 +66,7 @@ static struct file_system_type btrfs_root_fs_type; static int btrfs_remount(struct super_block *sb, int *flags, char *data); -const char *btrfs_decode_error(int errno) +const char * __attribute_const__ btrfs_decode_error(int errno) { char *errstr = "unknown"; @@ -187,7 +187,7 @@ static struct ratelimit_state printk_limits[] = { RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100), }; -void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) +void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; struct va_format vaf; @@ -1219,7 +1219,7 @@ static int btrfs_fill_super(struct super_block *sb, key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL); + inode = btrfs_iget(sb, &key, fs_info->fs_root); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail_close; @@ -1669,7 +1669,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, btrfs_workqueue_set_max(fs_info->workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); - btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size); @@ -1936,6 +1935,10 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, num_stripes = nr_devices; else if (type & BTRFS_BLOCK_GROUP_RAID1) num_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID1C3) + num_stripes = 3; + else if (type & BTRFS_BLOCK_GROUP_RAID1C4) + num_stripes = 4; else if (type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = 4; @@ -2022,7 +2025,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); struct btrfs_super_block *disk_super = fs_info->super_copy; - struct list_head *head = &fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; u64 total_free_data = 0; @@ -2036,7 +2038,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) int mixed = 0; rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) { + list_for_each_entry_rcu(found, &fs_info->space_info, list) { if (found->flags & BTRFS_BLOCK_GROUP_DATA) { int i; @@ -2360,10 +2362,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_cachep; - err = extent_map_init(); + err = extent_state_cache_init(); if (err) goto free_extent_io; + err = extent_map_init(); + if (err) + goto free_extent_state_cache; + err = ordered_data_init(); if (err) goto free_extent_map; @@ -2422,6 +2428,8 @@ free_ordered_data: ordered_data_exit(); free_extent_map: extent_map_exit(); +free_extent_state_cache: + extent_state_cache_exit(); free_extent_io: extent_io_exit(); free_cachep: @@ -2442,6 +2450,7 @@ static void __exit exit_btrfs_fs(void) btrfs_prelim_ref_exit(); ordered_data_exit(); extent_map_exit(); + extent_state_cache_exit(); extent_io_exit(); btrfs_interface_exit(); btrfs_end_io_wq_exit(); @@ -2456,3 +2465,6 @@ module_exit(exit_btrfs_fs) MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); +MODULE_SOFTDEP("pre: xxhash64"); +MODULE_SOFTDEP("pre: sha256"); +MODULE_SOFTDEP("pre: blake2b-256"); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index f6d3c80f2e28..5ebbe8a5ee76 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -9,6 +9,7 @@ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/bug.h> +#include <crypto/hash.h> #include "ctree.h" #include "disk-io.h" @@ -258,6 +259,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA); BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID); BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE); +BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34); static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(mixed_backref), @@ -272,6 +274,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(no_holes), BTRFS_FEAT_ATTR_PTR(metadata_uuid), BTRFS_FEAT_ATTR_PTR(free_space_tree), + BTRFS_FEAT_ATTR_PTR(raid1c34), NULL }; @@ -295,8 +298,30 @@ static ssize_t rmdir_subvol_show(struct kobject *kobj, } BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show); +static ssize_t supported_checksums_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + ssize_t ret = 0; + int i; + + for (i = 0; i < btrfs_get_num_csums(); i++) { + /* + * This "trick" only works as long as 'enum btrfs_csum_type' has + * no holes in it + */ + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s%s", + (i == 0 ? "" : " "), btrfs_super_csum_name(i)); + + } + + ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); + return ret; +} +BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show); + static struct attribute *btrfs_supported_static_feature_attrs[] = { BTRFS_ATTR_PTR(static_feature, rmdir_subvol), + BTRFS_ATTR_PTR(static_feature, supported_checksums), NULL }; @@ -372,16 +397,16 @@ static ssize_t raid_bytes_show(struct kobject *kobj, { struct btrfs_space_info *sinfo = to_space_info(kobj->parent); - struct btrfs_block_group_cache *block_group; + struct btrfs_block_group *block_group; int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags); u64 val = 0; down_read(&sinfo->groups_sem); list_for_each_entry(block_group, &sinfo->block_groups[index], list) { if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) - val += block_group->key.offset; + val += block_group->length; else - val += btrfs_block_group_used(&block_group->item); + val += block_group->used; } up_read(&sinfo->groups_sem); return snprintf(buf, PAGE_SIZE, "%llu\n", val); @@ -604,6 +629,19 @@ static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj, BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show); +static ssize_t btrfs_checksum_show(struct kobject *kobj, + struct kobj_attribute *a, char *buf) +{ + struct btrfs_fs_info *fs_info = to_fs_info(kobj); + u16 csum_type = btrfs_super_csum_type(fs_info->super_copy); + + return snprintf(buf, PAGE_SIZE, "%s (%s)\n", + btrfs_super_csum_name(csum_type), + crypto_shash_driver_name(fs_info->csum_shash)); +} + +BTRFS_ATTR(, checksum, btrfs_checksum_show); + static const struct attribute *btrfs_attrs[] = { BTRFS_ATTR_PTR(, label), BTRFS_ATTR_PTR(, nodesize), @@ -611,6 +649,7 @@ static const struct attribute *btrfs_attrs[] = { BTRFS_ATTR_PTR(, clone_alignment), BTRFS_ATTR_PTR(, quota_override), BTRFS_ATTR_PTR(, metadata_uuid), + BTRFS_ATTR_PTR(, checksum), NULL, }; @@ -822,7 +861,7 @@ static void init_feature_attrs(void) * Create a sysfs entry for a given block group type at path * /sys/fs/btrfs/UUID/allocation/data/TYPE */ -void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_space_info *space_info = cache->space_info; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 610e9c36a94c..e10c3adfc30f 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -32,7 +32,7 @@ int __init btrfs_init_sysfs(void); void __cold btrfs_exit_sysfs(void); int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); -void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache); int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info); diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 99fe9bf3fdac..a7aca4141788 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -202,11 +202,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root) kfree(root); } -struct btrfs_block_group_cache * +struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (!cache) @@ -218,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, return NULL; } - cache->key.objectid = 0; - cache->key.offset = length; - cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + cache->start = 0; + cache->length = length; cache->full_stripe_len = fs_info->sectorsize; cache->fs_info = fs_info; @@ -233,7 +232,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, return cache; } -void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache) +void btrfs_free_dummy_block_group(struct btrfs_block_group *cache) { if (!cache) return; diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h index ee277bbd939b..9e52527357d8 100644 --- a/fs/btrfs/tests/btrfs-tests.h +++ b/fs/btrfs/tests/btrfs-tests.h @@ -41,9 +41,9 @@ struct inode *btrfs_new_test_inode(void); struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize); void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info); void btrfs_free_dummy_root(struct btrfs_root *root); -struct btrfs_block_group_cache * +struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length); -void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); +void btrfs_free_dummy_block_group(struct btrfs_block_group *cache); void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); #else diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index 43ec7060fcd2..aebdf23f0cdd 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -17,7 +17,7 @@ * entry and remove space from either end and the middle, and make sure we can * remove space that covers adjacent extent entries. */ -static int test_extents(struct btrfs_block_group_cache *cache) +static int test_extents(struct btrfs_block_group *cache) { int ret = 0; @@ -87,8 +87,7 @@ static int test_extents(struct btrfs_block_group_cache *cache) return 0; } -static int test_bitmaps(struct btrfs_block_group_cache *cache, - u32 sectorsize) +static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize) { u64 next_bitmap_offset; int ret; @@ -156,7 +155,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache, } /* This is the high grade jackassery */ -static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache, +static int test_bitmaps_and_extents(struct btrfs_block_group *cache, u32 sectorsize) { u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); @@ -331,7 +330,7 @@ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl, /* Used by test_steal_space_from_bitmap_to_extent(). */ static int -check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, +check_num_extents_and_bitmaps(const struct btrfs_block_group *cache, const int num_extents, const int num_bitmaps) { @@ -351,7 +350,7 @@ check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, } /* Used by test_steal_space_from_bitmap_to_extent(). */ -static int check_cache_empty(struct btrfs_block_group_cache *cache) +static int check_cache_empty(struct btrfs_block_group *cache) { u64 offset; u64 max_extent_size; @@ -393,7 +392,7 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache) * requests. */ static int -test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, +test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache, u32 sectorsize) { int ret; @@ -829,7 +828,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; struct btrfs_root *root = NULL; int ret = -ENOMEM; diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index bc92df977630..1a846bf6e197 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -18,7 +18,7 @@ struct free_space_extent { static int __check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) @@ -48,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans, if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { if (path->slots[0] != 0) goto invalid; - end = cache->key.objectid + cache->key.offset; + end = cache->start + cache->length; i = 0; while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -107,7 +107,7 @@ invalid: static int check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) @@ -150,12 +150,12 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans, static int test_empty_block_group(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, cache->key.offset}, + {cache->start, cache->length}, }; return check_free_space_extents(trans, fs_info, cache, path, @@ -164,7 +164,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans, static int test_remove_all(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { @@ -172,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans, int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, + cache->length); if (ret) { test_err("could not remove free space"); return ret; @@ -185,18 +185,17 @@ static int test_remove_all(struct btrfs_trans_handle *trans, static int test_remove_beginning(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid + alignment, - cache->key.offset - alignment}, + {cache->start + alignment, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, alignment); + cache->start, alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -209,19 +208,18 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans, static int test_remove_end(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, cache->key.offset - alignment}, + {cache->start, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid + - cache->key.offset - alignment, - alignment); + cache->start + cache->length - alignment, + alignment); if (ret) { test_err("could not remove free space"); return ret; @@ -233,19 +231,18 @@ static int test_remove_end(struct btrfs_trans_handle *trans, static int test_remove_middle(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, alignment}, - {cache->key.objectid + 2 * alignment, - cache->key.offset - 2 * alignment}, + {cache->start, alignment}, + {cache->start + 2 * alignment, cache->length - 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not remove free space"); @@ -258,24 +255,23 @@ static int test_remove_middle(struct btrfs_trans_handle *trans, static int test_merge_left(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, 2 * alignment}, + {cache->start, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -283,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); @@ -296,25 +292,24 @@ static int test_merge_left(struct btrfs_trans_handle *trans, static int test_merge_right(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid + alignment, 2 * alignment}, + {cache->start + alignment, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); @@ -322,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); @@ -335,24 +330,23 @@ static int test_merge_right(struct btrfs_trans_handle *trans, static int test_merge_both(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, 3 * alignment}, + {cache->start, 3 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -360,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, - alignment); + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + alignment, - alignment); + cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -381,26 +373,25 @@ static int test_merge_both(struct btrfs_trans_handle *trans, static int test_merge_none(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache, + struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { - {cache->key.objectid, alignment}, - {cache->key.objectid + 2 * alignment, alignment}, - {cache->key.objectid + 4 * alignment, alignment}, + {cache->start, alignment}, + {cache->start + 2 * alignment, alignment}, + {cache->start + 4 * alignment, alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, - cache->key.objectid, - cache->key.offset); + cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } - ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, + ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); @@ -408,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans, } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 4 * alignment, - alignment); + cache->start + 4 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, - cache->key.objectid + 2 * alignment, - alignment); + cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; @@ -429,7 +418,7 @@ static int test_merge_none(struct btrfs_trans_handle *trans, typedef int (*test_func_t)(struct btrfs_trans_handle *, struct btrfs_fs_info *, - struct btrfs_block_group_cache *, + struct btrfs_block_group *, struct btrfs_path *, u32 alignment); @@ -438,7 +427,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, { struct btrfs_fs_info *fs_info; struct btrfs_root *root = NULL; - struct btrfs_block_group_cache *cache = NULL; + struct btrfs_block_group *cache = NULL; struct btrfs_trans_handle trans; struct btrfs_path *path = NULL; int ret; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 8624bdee8c5b..cfc08ef9b876 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -24,9 +24,79 @@ #define BTRFS_ROOT_TRANS_TAG 0 +/* + * Transaction states and transitions + * + * No running transaction (fs tree blocks are not modified) + * | + * | To next stage: + * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). + * V + * Transaction N [[TRANS_STATE_RUNNING]] + * | + * | New trans handles can be attached to transaction N by calling all + * | start_transaction() variants. + * | + * | To next stage: + * | Call btrfs_commit_transaction() on any trans handle attached to + * | transaction N + * V + * Transaction N [[TRANS_STATE_COMMIT_START]] + * | + * | Will wait for previous running transaction to completely finish if there + * | is one + * | + * | Then one of the following happes: + * | - Wait for all other trans handle holders to release. + * | The btrfs_commit_transaction() caller will do the commit work. + * | - Wait for current transaction to be committed by others. + * | Other btrfs_commit_transaction() caller will do the commit work. + * | + * | At this stage, only btrfs_join_transaction*() variants can attach + * | to this running transaction. + * | All other variants will wait for current one to finish and attach to + * | transaction N+1. + * | + * | To next stage: + * | Caller is chosen to commit transaction N, and all other trans handle + * | haven been released. + * V + * Transaction N [[TRANS_STATE_COMMIT_DOING]] + * | + * | The heavy lifting transaction work is started. + * | From running delayed refs (modifying extent tree) to creating pending + * | snapshots, running qgroups. + * | In short, modify supporting trees to reflect modifications of subvolume + * | trees. + * | + * | At this stage, all start_transaction() calls will wait for this + * | transaction to finish and attach to transaction N+1. + * | + * | To next stage: + * | Until all supporting trees are updated. + * V + * Transaction N [[TRANS_STATE_UNBLOCKED]] + * | Transaction N+1 + * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] + * | need to write them back to disk and update | + * | super blocks. | + * | | + * | At this stage, new transaction is allowed to | + * | start. | + * | All new start_transaction() calls will be | + * | attached to transid N+1. | + * | | + * | To next stage: | + * | Until all tree blocks are super blocks are | + * | written to block devices | + * V | + * Transaction N [[TRANS_STATE_COMPLETED]] V + * All tree blocks and super blocks are written. Transaction N+1 + * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] + * data structures will be cleaned up. | Life goes on + */ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_RUNNING] = 0U, - [TRANS_STATE_BLOCKED] = __TRANS_START, [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | @@ -63,10 +133,10 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) * discard the physical locations of the block groups. */ while (!list_empty(&transaction->deleted_bgs)) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; cache = list_first_entry(&transaction->deleted_bgs, - struct btrfs_block_group_cache, + struct btrfs_block_group, bg_list); list_del_init(&cache->bg_list); btrfs_put_block_group_trimming(cache); @@ -383,7 +453,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, static inline int is_transaction_blocked(struct btrfs_transaction *trans) { - return (trans->state >= TRANS_STATE_BLOCKED && + return (trans->state >= TRANS_STATE_COMMIT_START && trans->state < TRANS_STATE_UNBLOCKED && !trans->aborted); } @@ -570,7 +640,7 @@ again: INIT_LIST_HEAD(&h->new_bgs); smp_mb(); - if (cur_trans->state >= TRANS_STATE_BLOCKED && + if (cur_trans->state >= TRANS_STATE_COMMIT_START && may_wait_transaction(fs_info, type)) { current->journal_info = h; btrfs_commit_transaction(h); @@ -659,7 +729,7 @@ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) true); } -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) +struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK, BTRFS_RESERVE_NO_FLUSH, true); @@ -798,7 +868,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *cur_trans = trans->transaction; smp_mb(); - if (cur_trans->state >= TRANS_STATE_BLOCKED || + if (cur_trans->state >= TRANS_STATE_COMMIT_START || cur_trans->delayed_refs.flushing) return 1; @@ -831,7 +901,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; - int lock = (trans->type != TRANS_JOIN_NOLOCK); int err = 0; if (refcount_read(&trans->use_count) > 1) { @@ -847,13 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_chunk_metadata(trans); - if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { - if (throttle) - return btrfs_commit_transaction(trans); - else - wake_up_process(info->transaction_kthread); - } - if (trans->type & __TRANS_FREEZABLE) sb_end_intwrite(info->sb); @@ -990,7 +1052,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, return werr; } -int btrfs_wait_extents(struct btrfs_fs_info *fs_info, +static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { bool errors = false; @@ -1875,7 +1937,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group, *tmp; + struct btrfs_block_group *block_group, *tmp; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { btrfs_delayed_refs_rsv_release(fs_info, 1); @@ -1949,6 +2011,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) struct btrfs_transaction *prev_trans = NULL; int ret; + ASSERT(refcount_read(&trans->use_count) == 1); + /* Stop the commit early if ->aborted is set */ if (unlikely(READ_ONCE(cur_trans->aborted))) { ret = cur_trans->aborted; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2c5a6f6e5bb0..49f7196368f5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -13,7 +13,6 @@ enum btrfs_trans_state { TRANS_STATE_RUNNING, - TRANS_STATE_BLOCKED, TRANS_STATE_COMMIT_START, TRANS_STATE_COMMIT_DOING, TRANS_STATE_UNBLOCKED, @@ -184,7 +183,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( unsigned int num_items, int min_factor); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction_barrier( @@ -218,8 +217,6 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages, int mark); -int btrfs_wait_extents(struct btrfs_fs_info *fs_info, - struct extent_io_tree *dirty_pages); int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); int btrfs_transaction_blocked(struct btrfs_fs_info *info); int btrfs_transaction_in_commit(struct btrfs_fs_info *info); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 43e488f5d063..493d4d9e0f79 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -23,6 +23,7 @@ #include "disk-io.h" #include "compression.h" #include "volumes.h" +#include "misc.h" /* * Error message should follow the following format: @@ -124,6 +125,74 @@ static u64 file_extent_end(struct extent_buffer *leaf, return end; } +/* + * Customized report for dir_item, the only new important information is + * key->objectid, which represents inode number + */ +__printf(3, 4) +__cold +static void dir_item_err(const struct extent_buffer *eb, int slot, + const char *fmt, ...) +{ + const struct btrfs_fs_info *fs_info = eb->fs_info; + struct btrfs_key key; + struct va_format vaf; + va_list args; + + btrfs_item_key_to_cpu(eb, &key, slot); + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + btrfs_crit(fs_info, + "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", + btrfs_header_level(eb) == 0 ? "leaf" : "node", + btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, + key.objectid, &vaf); + va_end(args); +} + +/* + * This functions checks prev_key->objectid, to ensure current key and prev_key + * share the same objectid as inode number. + * + * This is to detect missing INODE_ITEM in subvolume trees. + * + * Return true if everything is OK or we don't need to check. + * Return false if anything is wrong. + */ +static bool check_prev_ino(struct extent_buffer *leaf, + struct btrfs_key *key, int slot, + struct btrfs_key *prev_key) +{ + /* No prev key, skip check */ + if (slot == 0) + return true; + + /* Only these key->types needs to be checked */ + ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || + key->type == BTRFS_INODE_REF_KEY || + key->type == BTRFS_DIR_INDEX_KEY || + key->type == BTRFS_DIR_ITEM_KEY || + key->type == BTRFS_EXTENT_DATA_KEY); + + /* + * Only subvolume trees along with their reloc trees need this check. + * Things like log tree doesn't follow this ino requirement. + */ + if (!is_fstree(btrfs_header_owner(leaf))) + return true; + + if (key->objectid == prev_key->objectid) + return true; + + /* Error found */ + dir_item_err(leaf, slot, + "invalid previous key objectid, have %llu expect %llu", + prev_key->objectid, key->objectid); + return false; +} static int check_extent_data_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot, struct btrfs_key *prev_key) @@ -141,13 +210,33 @@ static int check_extent_data_item(struct extent_buffer *leaf, return -EUCLEAN; } + /* + * Previous key must have the same key->objectid (ino). + * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA. + * But if objectids mismatch, it means we have a missing + * INODE_ITEM. + */ + if (!check_prev_ino(leaf, key, slot, prev_key)) + return -EUCLEAN; + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { + /* + * Make sure the item contains at least inline header, so the file + * extent type is not some garbage. + */ + if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) { + file_extent_err(leaf, slot, + "invalid item size, have %u expect [%lu, %u)", + item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START, + SZ_4K); + return -EUCLEAN; + } + if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) { file_extent_err(leaf, slot, "invalid type for file extent, have %u expect range [0, %u]", btrfs_file_extent_type(leaf, fi), - BTRFS_FILE_EXTENT_TYPES); + BTRFS_NR_FILE_EXTENT_TYPES - 1); return -EUCLEAN; } @@ -155,11 +244,11 @@ static int check_extent_data_item(struct extent_buffer *leaf, * Support for new compression/encryption must introduce incompat flag, * and must be caught in open_ctree(). */ - if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { + if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) { file_extent_err(leaf, slot, "invalid compression for file extent, have %u expect range [0, %u]", btrfs_file_extent_compression(leaf, fi), - BTRFS_COMPRESS_TYPES); + BTRFS_NR_COMPRESS_TYPES - 1); return -EUCLEAN; } if (btrfs_file_extent_encryption(leaf, fi)) { @@ -270,42 +359,17 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, return 0; } -/* - * Customized reported for dir_item, only important new info is key->objectid, - * which represents inode number - */ -__printf(3, 4) -__cold -static void dir_item_err(const struct extent_buffer *eb, int slot, - const char *fmt, ...) -{ - const struct btrfs_fs_info *fs_info = eb->fs_info; - struct btrfs_key key; - struct va_format vaf; - va_list args; - - btrfs_item_key_to_cpu(eb, &key, slot); - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - btrfs_crit(fs_info, - "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", - btrfs_header_level(eb) == 0 ? "leaf" : "node", - btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, - key.objectid, &vaf); - va_end(args); -} - static int check_dir_item(struct extent_buffer *leaf, - struct btrfs_key *key, int slot) + struct btrfs_key *key, struct btrfs_key *prev_key, + int slot) { struct btrfs_fs_info *fs_info = leaf->fs_info; struct btrfs_dir_item *di; u32 item_size = btrfs_item_size_nr(leaf, slot); u32 cur = 0; + if (!check_prev_ino(leaf, key, slot, prev_key)) + return -EUCLEAN; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); while (cur < item_size) { u32 name_len; @@ -459,23 +523,23 @@ static int check_block_group_item(struct extent_buffer *leaf, read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), sizeof(bgi)); - if (btrfs_block_group_chunk_objectid(&bgi) != + if (btrfs_stack_block_group_chunk_objectid(&bgi) != BTRFS_FIRST_CHUNK_TREE_OBJECTID) { block_group_err(leaf, slot, "invalid block group chunk objectid, have %llu expect %llu", - btrfs_block_group_chunk_objectid(&bgi), + btrfs_stack_block_group_chunk_objectid(&bgi), BTRFS_FIRST_CHUNK_TREE_OBJECTID); return -EUCLEAN; } - if (btrfs_block_group_used(&bgi) > key->offset) { + if (btrfs_stack_block_group_used(&bgi) > key->offset) { block_group_err(leaf, slot, "invalid block group used, have %llu expect [0, %llu)", - btrfs_block_group_used(&bgi), key->offset); + btrfs_stack_block_group_used(&bgi), key->offset); return -EUCLEAN; } - flags = btrfs_block_group_flags(&bgi); + flags = btrfs_stack_block_group_flags(&bgi); if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { block_group_err(leaf, slot, "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", @@ -609,7 +673,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, return -EUCLEAN; } - if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && + if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) { chunk_err(leaf, chunk, logical, "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", @@ -686,9 +750,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot, static int check_dev_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { - struct btrfs_fs_info *fs_info = leaf->fs_info; struct btrfs_dev_item *ditem; - u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK); if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { dev_item_err(leaf, slot, @@ -696,12 +758,6 @@ static int check_dev_item(struct extent_buffer *leaf, key->objectid, BTRFS_DEV_ITEMS_OBJECTID); return -EUCLEAN; } - if (key->offset > max_devid) { - dev_item_err(leaf, slot, - "invalid devid: has=%llu expect=[0, %llu]", - key->offset, max_devid); - return -EUCLEAN; - } ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); if (btrfs_device_id(leaf, ditem) != key->offset) { dev_item_err(leaf, slot, @@ -793,11 +849,11 @@ static int check_inode_item(struct extent_buffer *leaf, } /* - * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2, - * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG. - * Only needs to check BLK, LNK and SOCKS + * S_IFMT is not bit mapped so we can't completely rely on + * is_power_of_2/has_single_bit_set, but it can save us from checking + * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS */ - if (!is_power_of_2(mode & S_IFMT)) { + if (!has_single_bit_set(mode & S_IFMT)) { if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) { inode_item_err(fs_info, leaf, slot, "invalid mode: has 0%o expect valid S_IF* bit(s)", @@ -1018,8 +1074,8 @@ static int check_extent_item(struct extent_buffer *leaf, btrfs_super_generation(fs_info->super_copy) + 1); return -EUCLEAN; } - if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA | - BTRFS_EXTENT_FLAG_TREE_BLOCK))) { + if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA | + BTRFS_EXTENT_FLAG_TREE_BLOCK))) { extent_err(leaf, slot, "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", flags, BTRFS_EXTENT_FLAG_DATA | @@ -1232,6 +1288,58 @@ static int check_extent_data_ref(struct extent_buffer *leaf, return 0; } +#define inode_ref_err(fs_info, eb, slot, fmt, args...) \ + inode_item_err(fs_info, eb, slot, fmt, ##args) +static int check_inode_ref(struct extent_buffer *leaf, + struct btrfs_key *key, struct btrfs_key *prev_key, + int slot) +{ + struct btrfs_inode_ref *iref; + unsigned long ptr; + unsigned long end; + + if (!check_prev_ino(leaf, key, slot, prev_key)) + return -EUCLEAN; + /* namelen can't be 0, so item_size == sizeof() is also invalid */ + if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) { + inode_ref_err(fs_info, leaf, slot, + "invalid item size, have %u expect (%zu, %u)", + btrfs_item_size_nr(leaf, slot), + sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); + return -EUCLEAN; + } + + ptr = btrfs_item_ptr_offset(leaf, slot); + end = ptr + btrfs_item_size_nr(leaf, slot); + while (ptr < end) { + u16 namelen; + + if (ptr + sizeof(iref) > end) { + inode_ref_err(fs_info, leaf, slot, + "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", + ptr, end, sizeof(iref)); + return -EUCLEAN; + } + + iref = (struct btrfs_inode_ref *)ptr; + namelen = btrfs_inode_ref_name_len(leaf, iref); + if (ptr + sizeof(*iref) + namelen > end) { + inode_ref_err(fs_info, leaf, slot, + "inode ref overflow, ptr %lu end %lu namelen %u", + ptr, end, namelen); + return -EUCLEAN; + } + + /* + * NOTE: In theory we should record all found index numbers + * to find any duplicated indexes, but that will be too time + * consuming for inodes with too many hard links. + */ + ptr += sizeof(*iref) + namelen; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -1252,7 +1360,10 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_DIR_ITEM_KEY: case BTRFS_DIR_INDEX_KEY: case BTRFS_XATTR_ITEM_KEY: - ret = check_dir_item(leaf, key, slot); + ret = check_dir_item(leaf, key, prev_key, slot); + break; + case BTRFS_INODE_REF_KEY: + ret = check_inode_ref(leaf, key, prev_key, slot); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: ret = check_block_group_item(leaf, key, slot); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8a6cc600bf18..6f757361db53 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -559,7 +559,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); + inode = btrfs_iget(root->fs_info->sb, &key, root); if (IS_ERR(inode)) inode = NULL; return inode; @@ -945,54 +945,32 @@ static noinline int backref_in_log(struct btrfs_root *log, const char *name, int namelen) { struct btrfs_path *path; - struct btrfs_inode_ref *ref; - unsigned long ptr; - unsigned long ptr_end; - unsigned long name_ptr; - int found_name_len; - int item_size; int ret; - int match = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, log, key, path, 0, 0); - if (ret != 0) + if (ret < 0) { goto out; - - ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); - - if (key->type == BTRFS_INODE_EXTREF_KEY) { - if (btrfs_find_name_in_ext_backref(path->nodes[0], - path->slots[0], - ref_objectid, - name, namelen)) - match = 1; - + } else if (ret == 1) { + ret = 0; goto out; } - item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); - ptr_end = ptr + item_size; - while (ptr < ptr_end) { - ref = (struct btrfs_inode_ref *)ptr; - found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); - if (found_name_len == namelen) { - name_ptr = (unsigned long)(ref + 1); - ret = memcmp_extent_buffer(path->nodes[0], name, - name_ptr, namelen); - if (ret == 0) { - match = 1; - goto out; - } - } - ptr = (unsigned long)(ref + 1) + found_name_len; - } + if (key->type == BTRFS_INODE_EXTREF_KEY) + ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], + path->slots[0], + ref_objectid, + name, namelen); + else + ret = !!btrfs_find_name_in_backref(path->nodes[0], + path->slots[0], + name, namelen); out: btrfs_free_path(path); - return match; + return ret; } static inline int __add_inode_ref(struct btrfs_trans_handle *trans, @@ -1050,10 +1028,13 @@ again: (unsigned long)(victim_ref + 1), victim_name_len); - if (!backref_in_log(log_root, &search_key, - parent_objectid, - victim_name, - victim_name_len)) { + ret = backref_in_log(log_root, &search_key, + parent_objectid, victim_name, + victim_name_len); + if (ret < 0) { + kfree(victim_name); + return ret; + } else if (!ret) { inc_nlink(&inode->vfs_inode); btrfs_release_path(path); @@ -1115,10 +1096,12 @@ again: search_key.offset = btrfs_extref_hash(parent_objectid, victim_name, victim_name_len); - ret = 0; - if (!backref_in_log(log_root, &search_key, - parent_objectid, victim_name, - victim_name_len)) { + ret = backref_in_log(log_root, &search_key, + parent_objectid, victim_name, + victim_name_len); + if (ret < 0) { + return ret; + } else if (!ret) { ret = -ENOENT; victim_parent = read_one_inode(root, parent_objectid); @@ -1885,30 +1868,6 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, } /* - * Return true if an inode reference exists in the log for the given name, - * inode and parent inode. - */ -static bool name_in_log_ref(struct btrfs_root *log_root, - const char *name, const int name_len, - const u64 dirid, const u64 ino) -{ - struct btrfs_key search_key; - - search_key.objectid = ino; - search_key.type = BTRFS_INODE_REF_KEY; - search_key.offset = dirid; - if (backref_in_log(log_root, &search_key, dirid, name, name_len)) - return true; - - search_key.type = BTRFS_INODE_EXTREF_KEY; - search_key.offset = btrfs_extref_hash(dirid, name, name_len); - if (backref_in_log(log_root, &search_key, dirid, name, name_len)) - return true; - - return false; -} - -/* * take a single entry in a log directory item and replay it into * the subvolume. * @@ -2024,8 +1983,31 @@ out: return ret; insert: - if (name_in_log_ref(root->log_root, name, name_len, - key->objectid, log_key.objectid)) { + /* + * Check if the inode reference exists in the log for the given name, + * inode and parent inode + */ + found_key.objectid = log_key.objectid; + found_key.type = BTRFS_INODE_REF_KEY; + found_key.offset = key->objectid; + ret = backref_in_log(root->log_root, &found_key, 0, name, name_len); + if (ret < 0) { + goto out; + } else if (ret) { + /* The dentry will be added later. */ + ret = 0; + update_size = false; + goto out; + } + + found_key.objectid = log_key.objectid; + found_key.type = BTRFS_INODE_EXTREF_KEY; + found_key.offset = key->objectid; + ret = backref_in_log(root->log_root, &found_key, key->objectid, name, + name_len); + if (ret < 0) { + goto out; + } else if (ret) { /* The dentry will be added later. */ ret = 0; update_size = false; @@ -2869,7 +2851,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, level = btrfs_header_level(log->node); orig_level = level; path->nodes[level] = log->node; - extent_buffer_get(log->node); + atomic_inc(&log->node->refs); path->slots[level] = 0; while (1) { @@ -4983,7 +4965,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(fs_info->sb, &key, root, NULL); + inode = btrfs_iget(fs_info->sb, &key, root); /* * If the other inode that had a conflicting dir entry was * deleted in the current transaction, we need to log its parent @@ -4993,8 +4975,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, ret = PTR_ERR(inode); if (ret == -ENOENT) { key.objectid = parent; - inode = btrfs_iget(fs_info->sb, &key, root, - NULL); + inode = btrfs_iget(fs_info->sb, &key, root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); } else { @@ -5699,7 +5680,7 @@ process_leaf: continue; btrfs_release_path(path); - di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); + di_inode = btrfs_iget(fs_info->sb, &di_key, root); if (IS_ERR(di_inode)) { ret = PTR_ERR(di_inode); goto next_dir_inode; @@ -5825,8 +5806,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, cur_offset = item_size; } - dir_inode = btrfs_iget(fs_info->sb, &inode_key, - root, NULL); + dir_inode = btrfs_iget(fs_info->sb, &inode_key, root); /* * If the parent inode was deleted, return an error to * fallback to a transaction commit. This is to prevent @@ -5900,7 +5880,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, search_key.objectid = found_key.offset; search_key.type = BTRFS_INODE_ITEM_KEY; search_key.offset = 0; - inode = btrfs_iget(fs_info->sb, &search_key, root, NULL); + inode = btrfs_iget(fs_info->sb, &search_key, root); if (IS_ERR(inode)) return PTR_ERR(inode); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bdfe4493e43a..d8e5560db285 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -58,6 +58,30 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { .bg_flag = BTRFS_BLOCK_GROUP_RAID1, .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, }, + [BTRFS_RAID_RAID1C3] = { + .sub_stripes = 1, + .dev_stripes = 1, + .devs_max = 0, + .devs_min = 3, + .tolerated_failures = 2, + .devs_increment = 3, + .ncopies = 3, + .raid_name = "raid1c3", + .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3, + .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET, + }, + [BTRFS_RAID_RAID1C4] = { + .sub_stripes = 1, + .dev_stripes = 1, + .devs_max = 0, + .devs_min = 4, + .tolerated_failures = 3, + .devs_increment = 4, + .ncopies = 4, + .raid_name = "raid1c4", + .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4, + .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET, + }, [BTRFS_RAID_DUP] = { .sub_stripes = 1, .dev_stripes = 2, @@ -297,7 +321,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); -struct list_head *btrfs_get_fs_uuids(void) +struct list_head * __attribute_const__ btrfs_get_fs_uuids(void) { return &fs_uuids; } @@ -397,8 +421,6 @@ static struct btrfs_device *__alloc_device(void) INIT_LIST_HEAD(&dev->dev_alloc_list); INIT_LIST_HEAD(&dev->post_commit_list); - spin_lock_init(&dev->io_lock); - atomic_set(&dev->reada_in_flight, 0); atomic_set(&dev->dev_stats_ccnt, 0); btrfs_device_data_ordered_init(dev); @@ -501,212 +523,6 @@ error: return ret; } -static void requeue_list(struct btrfs_pending_bios *pending_bios, - struct bio *head, struct bio *tail) -{ - - struct bio *old_head; - - old_head = pending_bios->head; - pending_bios->head = head; - if (pending_bios->tail) - tail->bi_next = old_head; - else - pending_bios->tail = tail; -} - -/* - * we try to collect pending bios for a device so we don't get a large - * number of procs sending bios down to the same device. This greatly - * improves the schedulers ability to collect and merge the bios. - * - * But, it also turns into a long list of bios to process and that is sure - * to eventually make the worker thread block. The solution here is to - * make some progress and then put this work struct back at the end of - * the list if the block device is congested. This way, multiple devices - * can make progress from a single worker thread. - */ -static noinline void run_scheduled_bios(struct btrfs_device *device) -{ - struct btrfs_fs_info *fs_info = device->fs_info; - struct bio *pending; - struct backing_dev_info *bdi; - struct btrfs_pending_bios *pending_bios; - struct bio *tail; - struct bio *cur; - int again = 0; - unsigned long num_run; - unsigned long batch_run = 0; - unsigned long last_waited = 0; - int force_reg = 0; - int sync_pending = 0; - struct blk_plug plug; - - /* - * this function runs all the bios we've collected for - * a particular device. We don't want to wander off to - * another device without first sending all of these down. - * So, setup a plug here and finish it off before we return - */ - blk_start_plug(&plug); - - bdi = device->bdev->bd_bdi; - -loop: - spin_lock(&device->io_lock); - -loop_lock: - num_run = 0; - - /* take all the bios off the list at once and process them - * later on (without the lock held). But, remember the - * tail and other pointers so the bios can be properly reinserted - * into the list if we hit congestion - */ - if (!force_reg && device->pending_sync_bios.head) { - pending_bios = &device->pending_sync_bios; - force_reg = 1; - } else { - pending_bios = &device->pending_bios; - force_reg = 0; - } - - pending = pending_bios->head; - tail = pending_bios->tail; - WARN_ON(pending && !tail); - - /* - * if pending was null this time around, no bios need processing - * at all and we can stop. Otherwise it'll loop back up again - * and do an additional check so no bios are missed. - * - * device->running_pending is used to synchronize with the - * schedule_bio code. - */ - if (device->pending_sync_bios.head == NULL && - device->pending_bios.head == NULL) { - again = 0; - device->running_pending = 0; - } else { - again = 1; - device->running_pending = 1; - } - - pending_bios->head = NULL; - pending_bios->tail = NULL; - - spin_unlock(&device->io_lock); - - while (pending) { - - rmb(); - /* we want to work on both lists, but do more bios on the - * sync list than the regular list - */ - if ((num_run > 32 && - pending_bios != &device->pending_sync_bios && - device->pending_sync_bios.head) || - (num_run > 64 && pending_bios == &device->pending_sync_bios && - device->pending_bios.head)) { - spin_lock(&device->io_lock); - requeue_list(pending_bios, pending, tail); - goto loop_lock; - } - - cur = pending; - pending = pending->bi_next; - cur->bi_next = NULL; - - BUG_ON(atomic_read(&cur->__bi_cnt) == 0); - - /* - * if we're doing the sync list, record that our - * plug has some sync requests on it - * - * If we're doing the regular list and there are - * sync requests sitting around, unplug before - * we add more - */ - if (pending_bios == &device->pending_sync_bios) { - sync_pending = 1; - } else if (sync_pending) { - blk_finish_plug(&plug); - blk_start_plug(&plug); - sync_pending = 0; - } - - btrfsic_submit_bio(cur); - num_run++; - batch_run++; - - cond_resched(); - - /* - * we made progress, there is more work to do and the bdi - * is now congested. Back off and let other work structs - * run instead - */ - if (pending && bdi_write_congested(bdi) && batch_run > 8 && - fs_info->fs_devices->open_devices > 1) { - struct io_context *ioc; - - ioc = current->io_context; - - /* - * the main goal here is that we don't want to - * block if we're going to be able to submit - * more requests without blocking. - * - * This code does two great things, it pokes into - * the elevator code from a filesystem _and_ - * it makes assumptions about how batching works. - */ - if (ioc && ioc->nr_batch_requests > 0 && - time_before(jiffies, ioc->last_waited + HZ/50UL) && - (last_waited == 0 || - ioc->last_waited == last_waited)) { - /* - * we want to go through our batch of - * requests and stop. So, we copy out - * the ioc->last_waited time and test - * against it before looping - */ - last_waited = ioc->last_waited; - cond_resched(); - continue; - } - spin_lock(&device->io_lock); - requeue_list(pending_bios, pending, tail); - device->running_pending = 1; - - spin_unlock(&device->io_lock); - btrfs_queue_work(fs_info->submit_workers, - &device->work); - goto done; - } - } - - cond_resched(); - if (again) - goto loop; - - spin_lock(&device->io_lock); - if (device->pending_bios.head || device->pending_sync_bios.head) - goto loop_lock; - spin_unlock(&device->io_lock); - -done: - blk_finish_plug(&plug); -} - -static void pending_bios_fn(struct btrfs_work *work) -{ - struct btrfs_device *device; - - device = container_of(work, struct btrfs_device, work); - run_scheduled_bios(device); -} - static bool device_path_matched(const char *path, struct btrfs_device *device) { int found; @@ -818,7 +634,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, } clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); - fs_devices->seeding = 1; + fs_devices->seeding = true; } else { if (bdev_read_only(bdev)) clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); @@ -828,7 +644,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, q = bdev_get_queue(bdev); if (!blk_queue_nonrot(q)) - fs_devices->rotating = 1; + fs_devices->rotating = true; device->bdev = bdev; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); @@ -1005,11 +821,15 @@ static noinline struct btrfs_device *device_list_add(const char *path, *new_device_added = true; if (disk_super->label[0]) - pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", - disk_super->label, devid, found_transid, path); + pr_info( + "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n", + disk_super->label, devid, found_transid, path, + current->comm, task_pid_nr(current)); else - pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", - disk_super->fsid, devid, found_transid, path); + pr_info( + "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n", + disk_super->fsid, devid, found_transid, path, + current->comm, task_pid_nr(current)); } else if (!device->name || strcmp(device->name->str, path)) { /* @@ -1295,7 +1115,7 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices) WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; - fs_devices->seeding = 0; + fs_devices->seeding = false; return 0; } @@ -2048,7 +1868,7 @@ static struct btrfs_device * btrfs_find_next_active_device( * where this function called, there should be always be another device (or * this_dev) which is active. */ -void btrfs_assign_next_active_device(struct btrfs_device *device, +void __cold btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *this_dev) { struct btrfs_fs_info *fs_info = device->fs_info; @@ -2450,11 +2270,11 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); mutex_unlock(&fs_info->chunk_mutex); - fs_devices->seeding = 0; + fs_devices->seeding = false; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; - fs_devices->rotating = 0; + fs_devices->rotating = false; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); @@ -2649,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path atomic64_add(device->total_bytes, &fs_info->free_chunk_space); if (!blk_queue_nonrot(q)) - fs_devices->rotating = 1; + fs_devices->rotating = true; orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); btrfs_set_super_total_bytes(fs_info->super_copy, @@ -3177,7 +2997,7 @@ error: static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 bytes_used; u64 chunk_type; @@ -3186,27 +3006,28 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, chunk_type = cache->flags; btrfs_put_block_group(cache); - if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { - spin_lock(&fs_info->data_sinfo->lock); - bytes_used = fs_info->data_sinfo->bytes_used; - spin_unlock(&fs_info->data_sinfo->lock); + if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA)) + return 0; + + spin_lock(&fs_info->data_sinfo->lock); + bytes_used = fs_info->data_sinfo->bytes_used; + spin_unlock(&fs_info->data_sinfo->lock); - if (!bytes_used) { - struct btrfs_trans_handle *trans; - int ret; + if (!bytes_used) { + struct btrfs_trans_handle *trans; + int ret; - trans = btrfs_join_transaction(fs_info->tree_root); - if (IS_ERR(trans)) - return PTR_ERR(trans); + trans = btrfs_join_transaction(fs_info->tree_root); + if (IS_ERR(trans)) + return PTR_ERR(trans); - ret = btrfs_force_chunk_alloc(trans, - BTRFS_BLOCK_GROUP_DATA); - btrfs_end_transaction(trans); - if (ret < 0) - return ret; - return 1; - } + ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); + btrfs_end_transaction(trans); + if (ret < 0) + return ret; + return 1; } + return 0; } @@ -3385,28 +3206,28 @@ static int chunk_profiles_filter(u64 chunk_type, static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 chunk_used; u64 user_thresh_min; u64 user_thresh_max; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); - chunk_used = btrfs_block_group_used(&cache->item); + chunk_used = cache->used; if (bargs->usage_min == 0) user_thresh_min = 0; else - user_thresh_min = div_factor_fine(cache->key.offset, - bargs->usage_min); + user_thresh_min = div_factor_fine(cache->length, + bargs->usage_min); if (bargs->usage_max == 0) user_thresh_max = 1; else if (bargs->usage_max > 100) - user_thresh_max = cache->key.offset; + user_thresh_max = cache->length; else - user_thresh_max = div_factor_fine(cache->key.offset, - bargs->usage_max); + user_thresh_max = div_factor_fine(cache->length, + bargs->usage_max); if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) ret = 0; @@ -3418,20 +3239,19 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { - struct btrfs_block_group_cache *cache; + struct btrfs_block_group *cache; u64 chunk_used, user_thresh; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); - chunk_used = btrfs_block_group_used(&cache->item); + chunk_used = cache->used; if (bargs->usage_min == 0) user_thresh = 1; else if (bargs->usage > 100) - user_thresh = cache->key.offset; + user_thresh = cache->length; else - user_thresh = div_factor_fine(cache->key.offset, - bargs->usage); + user_thresh = div_factor_fine(cache->length, bargs->usage); if (chunk_used < user_thresh) ret = 0; @@ -3844,12 +3664,7 @@ static int alloc_profile_is_valid(u64 flags, int extended) if (flags == 0) return !extended; /* "0" is valid for usual profiles */ - /* true if exactly one bit set */ - /* - * Don't use is_power_of_2(unsigned long) because it won't work - * for the single profile (1ULL << 48) on 32-bit CPUs. - */ - return flags != 0 && (flags & (flags - 1)) == 0; + return has_single_bit_set(flags); } static inline int balance_need_close(struct btrfs_fs_info *fs_info) @@ -4036,7 +3851,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, int ret; u64 num_devices; unsigned seq; - bool reducing_integrity; + bool reducing_redundancy; int i; if (btrfs_fs_closing(fs_info) || @@ -4119,9 +3934,9 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_metadata_alloc_bits & allowed) && !(bctl->meta.target & allowed))) - reducing_integrity = true; + reducing_redundancy = true; else - reducing_integrity = false; + reducing_redundancy = false; /* if we're not converting, the target field is uninitialized */ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? @@ -4130,13 +3945,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, bctl->data.target : fs_info->avail_data_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); - if (reducing_integrity) { + if (reducing_redundancy) { if (bctl->flags & BTRFS_BALANCE_FORCE) { btrfs_info(fs_info, - "balance: force reducing metadata integrity"); + "balance: force reducing metadata redundancy"); } else { btrfs_err(fs_info, - "balance: reduces metadata integrity, use --force if you want this"); + "balance: reduces metadata redundancy, use --force if you want this"); ret = -EINVAL; goto out; } @@ -4902,6 +4717,14 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) btrfs_set_fs_incompat(info, RAID56); } +static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type) +{ + if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4))) + return; + + btrfs_set_fs_incompat(info, RAID1C34); +} + static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 start, u64 type) { @@ -4967,6 +4790,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { max_stripe_size = SZ_32M; max_chunk_size = 2 * max_stripe_size; + devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK); } else { btrfs_err(info, "invalid chunk type 0x%llx requested", type); @@ -5047,8 +4871,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, sort(devices_info, ndevs, sizeof(struct btrfs_device_info), btrfs_cmp_device_info, NULL); - /* round down to number of usable stripes */ - ndevs = round_down(ndevs, devs_increment); + /* + * Round down to number of usable stripes, devs_increment can be any + * number so we can't use round_down() + */ + ndevs -= ndevs % devs_increment; if (ndevs < devs_min) { ret = -ENOSPC; @@ -5164,6 +4991,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, free_extent_map(em); check_raid56_incompat_flag(info, type); + check_raid1c34_incompat_flag(info, type); kfree(devices_info); return 0; @@ -5582,12 +5410,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio) * replace. */ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, - u64 logical, u64 length, + u64 logical, u64 *length_ret, struct btrfs_bio **bbio_ret) { struct extent_map *em; struct map_lookup *map; struct btrfs_bio *bbio; + u64 length = *length_ret; u64 offset; u64 stripe_nr; u64 stripe_nr_end; @@ -5620,7 +5449,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, } offset = logical - em->start; - length = min_t(u64, em->len - offset, length); + length = min_t(u64, em->start + em->len - logical, length); + *length_ret = length; stripe_len = map->stripe_len; /* @@ -6035,7 +5865,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, if (op == BTRFS_MAP_DISCARD) return __btrfs_map_block_for_discard(fs_info, logical, - *length, bbio_ret); + length, bbio_ret); ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); if (ret < 0) @@ -6415,52 +6245,8 @@ static void btrfs_end_bio(struct bio *bio) } } -/* - * see run_scheduled_bios for a description of why bios are collected for - * async submit. - * - * This will add one bio to the pending list for a device and make sure - * the work struct is scheduled. - */ -static noinline void btrfs_schedule_bio(struct btrfs_device *device, - struct bio *bio) -{ - struct btrfs_fs_info *fs_info = device->fs_info; - int should_queue = 1; - struct btrfs_pending_bios *pending_bios; - - /* don't bother with additional async steps for reads, right now */ - if (bio_op(bio) == REQ_OP_READ) { - btrfsic_submit_bio(bio); - return; - } - - WARN_ON(bio->bi_next); - bio->bi_next = NULL; - - spin_lock(&device->io_lock); - if (op_is_sync(bio->bi_opf)) - pending_bios = &device->pending_sync_bios; - else - pending_bios = &device->pending_bios; - - if (pending_bios->tail) - pending_bios->tail->bi_next = bio; - - pending_bios->tail = bio; - if (!pending_bios->head) - pending_bios->head = bio; - if (device->running_pending) - should_queue = 0; - - spin_unlock(&device->io_lock); - - if (should_queue) - btrfs_queue_work(fs_info->submit_workers, &device->work); -} - static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, - u64 physical, int dev_nr, int async) + u64 physical, int dev_nr) { struct btrfs_device *dev = bbio->stripes[dev_nr].dev; struct btrfs_fs_info *fs_info = bbio->fs_info; @@ -6478,10 +6264,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, btrfs_bio_counter_inc_noblocked(fs_info); - if (async) - btrfs_schedule_bio(dev, bio); - else - btrfsic_submit_bio(bio); + btrfsic_submit_bio(bio); } static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) @@ -6502,7 +6285,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) } blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit) + int mirror_num) { struct btrfs_device *dev; struct bio *first_bio = bio; @@ -6571,7 +6354,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, bio = first_bio; submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, - dev_nr, async_submit); + dev_nr); } btrfs_bio_counter_dec(fs_info); return BLK_STS_OK; @@ -6675,9 +6458,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, else generate_random_uuid(dev->uuid); - btrfs_init_work(&dev->work, btrfs_submit_helper, - pending_bios_fn, NULL, NULL); - return dev; } @@ -6874,7 +6654,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, if (IS_ERR(fs_devices)) return fs_devices; - fs_devices->seeding = 1; + fs_devices->seeding = true; fs_devices->opened = 1; return fs_devices; } @@ -7063,48 +6843,49 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) sb_array_offset += len; cur_offset += len; - if (key.type == BTRFS_CHUNK_ITEM_KEY) { - chunk = (struct btrfs_chunk *)sb_array_offset; - /* - * At least one btrfs_chunk with one stripe must be - * present, exact stripe count check comes afterwards - */ - len = btrfs_chunk_item_size(1); - if (cur_offset + len > array_size) - goto out_short_read; - - num_stripes = btrfs_chunk_num_stripes(sb, chunk); - if (!num_stripes) { - btrfs_err(fs_info, - "invalid number of stripes %u in sys_array at offset %u", - num_stripes, cur_offset); - ret = -EIO; - break; - } + if (key.type != BTRFS_CHUNK_ITEM_KEY) { + btrfs_err(fs_info, + "unexpected item type %u in sys_array at offset %u", + (u32)key.type, cur_offset); + ret = -EIO; + break; + } - type = btrfs_chunk_type(sb, chunk); - if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { - btrfs_err(fs_info, - "invalid chunk type %llu in sys_array at offset %u", - type, cur_offset); - ret = -EIO; - break; - } + chunk = (struct btrfs_chunk *)sb_array_offset; + /* + * At least one btrfs_chunk with one stripe must be present, + * exact stripe count check comes afterwards + */ + len = btrfs_chunk_item_size(1); + if (cur_offset + len > array_size) + goto out_short_read; - len = btrfs_chunk_item_size(num_stripes); - if (cur_offset + len > array_size) - goto out_short_read; + num_stripes = btrfs_chunk_num_stripes(sb, chunk); + if (!num_stripes) { + btrfs_err(fs_info, + "invalid number of stripes %u in sys_array at offset %u", + num_stripes, cur_offset); + ret = -EIO; + break; + } - ret = read_one_chunk(&key, sb, chunk); - if (ret) - break; - } else { + type = btrfs_chunk_type(sb, chunk); + if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { btrfs_err(fs_info, - "unexpected item type %u in sys_array at offset %u", - (u32)key.type, cur_offset); + "invalid chunk type %llu in sys_array at offset %u", + type, cur_offset); ret = -EIO; break; } + + len = btrfs_chunk_item_size(num_stripes); + if (cur_offset + len > array_size) + goto out_short_read; + + ret = read_one_chunk(&key, sb, chunk); + if (ret) + break; + array_ptr += len; sb_array_offset += len; cur_offset += len; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a7da1f3e3627..fc1b564b9cfe 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -18,10 +18,6 @@ extern struct mutex uuid_mutex; #define BTRFS_STRIPE_LEN SZ_64K struct buffer_head; -struct btrfs_pending_bios { - struct bio *head; - struct bio *tail; -}; struct btrfs_io_geometry { /* remaining bytes before crossing a stripe */ @@ -68,13 +64,6 @@ struct btrfs_device { u64 generation; - spinlock_t io_lock ____cacheline_aligned; - int running_pending; - /* regular prio bios */ - struct btrfs_pending_bios pending_bios; - /* sync bios */ - struct btrfs_pending_bios pending_sync_bios; - struct block_device *bdev; /* the mode sent to blkdev_get */ @@ -254,14 +243,14 @@ struct btrfs_fs_devices { struct list_head alloc_list; struct btrfs_fs_devices *seed; - int seeding; + bool seeding; int opened; /* set when we find or add a device that doesn't have the * nonrot flag set */ - int rotating; + bool rotating; struct btrfs_fs_info *fs_info; /* sysfs kobjects */ @@ -330,7 +319,6 @@ struct btrfs_bio { u64 map_type; /* get from map_lookup->type */ bio_end_io_t *end_io; struct bio *orig_bio; - unsigned long flags; void *private; atomic_t error; int max_errors; @@ -436,7 +424,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type); void btrfs_mapping_tree_free(struct extent_map_tree *tree); blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit); + int mirror_num); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); struct btrfs_device *btrfs_scan_one_device(const char *path, @@ -557,6 +545,10 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) return BTRFS_RAID_RAID10; else if (flags & BTRFS_BLOCK_GROUP_RAID1) return BTRFS_RAID_RAID1; + else if (flags & BTRFS_BLOCK_GROUP_RAID1C3) + return BTRFS_RAID_RAID1C3; + else if (flags & BTRFS_BLOCK_GROUP_RAID1C4) + return BTRFS_RAID_RAID1C4; else if (flags & BTRFS_BLOCK_GROUP_DUP) return BTRFS_RAID_DUP; else if (flags & BTRFS_BLOCK_GROUP_RAID0) @@ -571,7 +563,7 @@ static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) void btrfs_commit_device_sizes(struct btrfs_transaction *trans); -struct list_head *btrfs_get_fs_uuids(void); +struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info); void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info); bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index df1aace5df50..a6c90a003c12 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -29,19 +29,9 @@ struct workspace { static struct workspace_manager wsm; -static void zlib_init_workspace_manager(void) +struct list_head *zlib_get_workspace(unsigned int level) { - btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress); -} - -static void zlib_cleanup_workspace_manager(void) -{ - btrfs_cleanup_workspace_manager(&wsm); -} - -static struct list_head *zlib_get_workspace(unsigned int level) -{ - struct list_head *ws = btrfs_get_workspace(&wsm, level); + struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level); struct workspace *workspace = list_entry(ws, struct workspace, list); workspace->level = level; @@ -49,12 +39,7 @@ static struct list_head *zlib_get_workspace(unsigned int level) return ws; } -static void zlib_put_workspace(struct list_head *ws) -{ - btrfs_put_workspace(&wsm, ws); -} - -static void zlib_free_workspace(struct list_head *ws) +void zlib_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -63,7 +48,7 @@ static void zlib_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zlib_alloc_workspace(unsigned int level) +struct list_head *zlib_alloc_workspace(unsigned int level) { struct workspace *workspace; int workspacesize; @@ -88,13 +73,9 @@ fail: return ERR_PTR(-ENOMEM); } -static int zlib_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; @@ -228,7 +209,7 @@ out: return ret; } -static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0, ret2; @@ -319,10 +300,9 @@ done: return ret; } -static int zlib_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -419,15 +399,7 @@ next: } const struct btrfs_compress_op btrfs_zlib_compress = { - .init_workspace_manager = zlib_init_workspace_manager, - .cleanup_workspace_manager = zlib_cleanup_workspace_manager, - .get_workspace = zlib_get_workspace, - .put_workspace = zlib_put_workspace, - .alloc_workspace = zlib_alloc_workspace, - .free_workspace = zlib_free_workspace, - .compress_pages = zlib_compress_pages, - .decompress_bio = zlib_decompress_bio, - .decompress = zlib_decompress, + .workspace_manager = &wsm, .max_level = 9, .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, }; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 764d47b107e5..9a4871636c6c 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -91,9 +91,8 @@ static inline struct workspace *list_to_workspace(struct list_head *list) return container_of(list, struct workspace, list); } -static void zstd_free_workspace(struct list_head *ws); -static struct list_head *zstd_alloc_workspace(unsigned int level); - +void zstd_free_workspace(struct list_head *ws); +struct list_head *zstd_alloc_workspace(unsigned int level); /* * zstd_reclaim_timer_fn - reclaim timer * @t: timer @@ -168,7 +167,7 @@ static void zstd_calc_ws_mem_sizes(void) } } -static void zstd_init_workspace_manager(void) +void zstd_init_workspace_manager(void) { struct list_head *ws; int i; @@ -194,7 +193,7 @@ static void zstd_init_workspace_manager(void) } } -static void zstd_cleanup_workspace_manager(void) +void zstd_cleanup_workspace_manager(void) { struct workspace *workspace; int i; @@ -261,7 +260,7 @@ static struct list_head *zstd_find_workspace(unsigned int level) * attempt to allocate a new workspace. If we fail to allocate one due to * memory pressure, go to sleep waiting for the max level workspace to free up. */ -static struct list_head *zstd_get_workspace(unsigned int level) +struct list_head *zstd_get_workspace(unsigned int level) { struct list_head *ws; unsigned int nofs_flag; @@ -302,7 +301,7 @@ again: * isn't set, it is also set here. Only the max level workspace tries and wakes * up waiting workspaces. */ -static void zstd_put_workspace(struct list_head *ws) +void zstd_put_workspace(struct list_head *ws) { struct workspace *workspace = list_to_workspace(ws); @@ -332,7 +331,7 @@ static void zstd_put_workspace(struct list_head *ws) cond_wake_up(&wsm.wait); } -static void zstd_free_workspace(struct list_head *ws) +void zstd_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); @@ -341,7 +340,7 @@ static void zstd_free_workspace(struct list_head *ws) kfree(workspace); } -static struct list_head *zstd_alloc_workspace(unsigned int level) +struct list_head *zstd_alloc_workspace(unsigned int level) { struct workspace *workspace; @@ -367,13 +366,9 @@ fail: return ERR_PTR(-ENOMEM); } -static int zstd_compress_pages(struct list_head *ws, - struct address_space *mapping, - u64 start, - struct page **pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out) +int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, + u64 start, struct page **pages, unsigned long *out_pages, + unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); ZSTD_CStream *stream; @@ -548,7 +543,7 @@ out: return ret; } -static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) +int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); struct page **pages_in = cb->compressed_pages; @@ -626,10 +621,9 @@ done: return ret; } -static int zstd_decompress(struct list_head *ws, unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +int zstd_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, unsigned long start_byte, size_t srclen, + size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); ZSTD_DStream *stream; @@ -712,15 +706,8 @@ finish: } const struct btrfs_compress_op btrfs_zstd_compress = { - .init_workspace_manager = zstd_init_workspace_manager, - .cleanup_workspace_manager = zstd_cleanup_workspace_manager, - .get_workspace = zstd_get_workspace, - .put_workspace = zstd_put_workspace, - .alloc_workspace = zstd_alloc_workspace, - .free_workspace = zstd_free_workspace, - .compress_pages = zstd_compress_pages, - .decompress_bio = zstd_decompress_bio, - .decompress = zstd_decompress, + /* ZSTD uses own workspace manager */ + .workspace_manager = NULL, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d3b9c9d5c1bd..f5a38910a82b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1058,6 +1058,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + /* remove from inode's cap rbtree, and clear auth cap */ + rb_erase(&cap->ci_node, &ci->i_caps); + if (ci->i_auth_cap == cap) + ci->i_auth_cap = NULL; + /* remove from session list */ spin_lock(&session->s_cap_lock); if (session->s_cap_iterator == cap) { @@ -1091,11 +1096,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) spin_unlock(&session->s_cap_lock); - /* remove from inode list */ - rb_erase(&cap->ci_node, &ci->i_caps); - if (ci->i_auth_cap == cap) - ci->i_auth_cap = NULL; - if (removed) ceph_put_cap(mdsc, cap); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4ca0b8ff9a72..d17a789fd856 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1553,36 +1553,37 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) { int valid = 0; struct dentry *parent; - struct inode *dir; + struct inode *dir, *inode; if (flags & LOOKUP_RCU) { parent = READ_ONCE(dentry->d_parent); dir = d_inode_rcu(parent); if (!dir) return -ECHILD; + inode = d_inode_rcu(dentry); } else { parent = dget_parent(dentry); dir = d_inode(parent); + inode = d_inode(dentry); } dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, - dentry, d_inode(dentry), ceph_dentry(dentry)->offset); + dentry, inode, ceph_dentry(dentry)->offset); /* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, - dentry, d_inode(dentry)); + dentry, inode); valid = 1; - } else if (d_really_is_positive(dentry) && - ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) { + } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { valid = 1; } else { valid = dentry_lease_is_valid(dentry, flags); if (valid == -ECHILD) return valid; if (valid || dir_lease_is_valid(dir, dentry)) { - if (d_really_is_positive(dentry)) - valid = ceph_is_any_caps(d_inode(dentry)); + if (inode) + valid = ceph_is_any_caps(inode); else valid = 1; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index d277f71abe0b..8de633964dc3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -462,6 +462,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, err = ceph_security_init_secctx(dentry, mode, &as_ctx); if (err < 0) goto out_ctx; + } else if (!d_in_lookup(dentry)) { + /* If it's not being looked up, it's negative */ + return -ENOENT; } /* do the open */ @@ -750,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode, if (!atomic_dec_and_test(&aio_req->pending_reqs)) return; + if (aio_req->iocb->ki_flags & IOCB_DIRECT) + inode_dio_end(inode); + ret = aio_req->error; if (!ret) ret = aio_req->total_len; @@ -1088,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, CEPH_CAP_FILE_RD); list_splice(&aio_req->osd_reqs, &osd_reqs); + inode_dio_begin(inode); while (!list_empty(&osd_reqs)) { req = list_first_entry(&osd_reqs, struct ceph_osd_request, @@ -1261,14 +1268,24 @@ again: dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); + if (iocb->ki_flags & IOCB_DIRECT) + ceph_start_io_direct(inode); + else + ceph_start_io_read(inode); + if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); - if (ret < 0) + if (ret < 0) { + if (iocb->ki_flags & IOCB_DIRECT) + ceph_end_io_direct(inode); + else + ceph_end_io_read(inode); return ret; + } if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || (iocb->ki_flags & IOCB_DIRECT) || @@ -1280,16 +1297,12 @@ again: if (ci->i_inline_version == CEPH_INLINE_NONE) { if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { - ceph_start_io_direct(inode); ret = ceph_direct_read_write(iocb, to, NULL, NULL); - ceph_end_io_direct(inode); if (ret >= 0 && ret < len) retry_op = CHECK_EOF; } else { - ceph_start_io_read(inode); ret = ceph_sync_read(iocb, to, &retry_op); - ceph_end_io_read(inode); } } else { retry_op = READ_INLINE; @@ -1300,11 +1313,10 @@ again: inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, ceph_cap_string(got)); ceph_add_rw_context(fi, &rw_ctx); - ceph_start_io_read(inode); ret = generic_file_read_iter(iocb, to); - ceph_end_io_read(inode); ceph_del_rw_context(fi, &rw_ctx); } + dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); if (pinned_page) { @@ -1312,6 +1324,12 @@ again: pinned_page = NULL; } ceph_put_cap_refs(ci, got); + + if (iocb->ki_flags & IOCB_DIRECT) + ceph_end_io_direct(inode); + else + ceph_end_io_read(inode); + if (retry_op > HAVE_RETRIED && ret >= 0) { int statret; struct page *page = NULL; @@ -1956,10 +1974,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off, if (ceph_test_mount_opt(src_fsc, NOCOPYFROM)) return -EOPNOTSUPP; + /* + * Striped file layouts require that we copy partial objects, but the + * OSD copy-from operation only supports full-object copies. Limit + * this to non-striped file layouts for now. + */ if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) || - (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) || - (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) + (src_ci->i_layout.stripe_count != 1) || + (dst_ci->i_layout.stripe_count != 1) || + (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) { + dout("Invalid src/dst files layout\n"); return -EOPNOTSUPP; + } if (len < src_ci->i_layout.object_size) return -EOPNOTSUPP; /* no remote copy will be done */ diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9f135624ae47..c07407586ce8 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1434,6 +1434,7 @@ retry_lookup: dout(" final dn %p\n", dn); } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || req->r_op == CEPH_MDS_OP_MKSNAP) && + test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { struct inode *dir = req->r_parent; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a8a8f84f3bbf..a5163296d9d9 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end, } done: - if (*p != end) - goto bad; + /* Skip over any unrecognized fields */ + *p = end; return 0; bad: @@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end, goto bad; info->filelock_reply = *p; - *p += sizeof(*info->filelock_reply); - if (unlikely(*p != end)) - goto bad; + /* Skip over any unrecognized fields */ + *p = end; return 0; - bad: return -EIO; } @@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end, { if (features == (u64)-1 || (features & CEPH_FEATURE_REPLY_CREATE_INODE)) { + /* Malformed reply? */ if (*p == end) { info->has_create_ino = false; } else { info->has_create_ino = true; - info->ino = ceph_decode_64(p); + ceph_decode_64_safe(p, end, info->ino, bad); } + } else { + if (*p != end) + goto bad; } - if (unlikely(*p != end)) - goto bad; + /* Skip over any unrecognized fields */ + *p = end; return 0; - bad: return -EIO; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index edfd643a8205..b47f43fc2d68 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -268,6 +268,7 @@ static int parse_fsopt_token(char *c, void *private) } break; case Opt_fscache_uniq: +#ifdef CONFIG_CEPH_FSCACHE kfree(fsopt->fscache_uniq); fsopt->fscache_uniq = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, @@ -276,7 +277,10 @@ static int parse_fsopt_token(char *c, void *private) return -ENOMEM; fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE; break; - /* misc */ +#else + pr_err("fscache support is disabled\n"); + return -EINVAL; +#endif case Opt_wsize: if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE) return -EINVAL; @@ -353,10 +357,15 @@ static int parse_fsopt_token(char *c, void *private) fsopt->flags &= ~CEPH_MOUNT_OPT_INO32; break; case Opt_fscache: +#ifdef CONFIG_CEPH_FSCACHE fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE; kfree(fsopt->fscache_uniq); fsopt->fscache_uniq = NULL; break; +#else + pr_err("fscache support is disabled\n"); + return -EINVAL; +#endif case Opt_nofscache: fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; kfree(fsopt->fscache_uniq); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c049c7b3aa87..1a135d1b85bd 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -169,7 +169,13 @@ cifs_read_super(struct super_block *sb) else sb->s_maxbytes = MAX_NON_LFS; - /* Some very old servers like DOS and OS/2 used 2 second granularity */ + /* + * Some very old servers like DOS and OS/2 used 2 second granularity + * (while all current servers use 100ns granularity - see MS-DTYP) + * but 1 second is the maximum allowed granularity for the VFS + * so for old servers set time granularity to 1 second while for + * everything else (current servers) set it to 100ns. + */ if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) && diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 50dfd9049370..d78bfcc19156 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1391,6 +1391,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); struct cifsInodeInfo { bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ + /* + * NOTE: Some code paths call down_read(lock_sem) twice, so + * we must always use use cifs_down_write() instead of down_write() + * for this semaphore to avoid deadlocks. + */ struct rw_semaphore lock_sem; /* protect the fields above */ /* BB add in lists for dirty pages i.e. write caching info for oplock */ struct list_head openFileList; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index e53e9f62b87b..fe597d3d5208 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -170,6 +170,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, const unsigned int xid); extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile); +extern void cifs_down_write(struct rw_semaphore *sem); extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a64dfa95a925..ccaa8bad336f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -564,9 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server) spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); + kref_get(&mid_entry->refcount); if (mid_entry->mid_state == MID_REQUEST_SUBMITTED) mid_entry->mid_state = MID_RETRY_NEEDED; list_move(&mid_entry->qhead, &retry_list); + mid_entry->mid_flags |= MID_DELETED; } spin_unlock(&GlobalMid_Lock); mutex_unlock(&server->srv_mutex); @@ -576,6 +578,7 @@ cifs_reconnect(struct TCP_Server_Info *server) mid_entry = list_entry(tmp, struct mid_q_entry, qhead); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); + cifs_mid_q_entry_release(mid_entry); } if (cifs_rdma_enabled(server)) { @@ -895,8 +898,10 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) if (mid->mid_flags & MID_DELETED) printk_once(KERN_WARNING "trying to dequeue a deleted mid\n"); - else + else { list_del_init(&mid->qhead); + mid->mid_flags |= MID_DELETED; + } spin_unlock(&GlobalMid_Lock); } @@ -966,8 +971,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid); + kref_get(&mid_entry->refcount); mid_entry->mid_state = MID_SHUTDOWN; list_move(&mid_entry->qhead, &dispose_list); + mid_entry->mid_flags |= MID_DELETED; } spin_unlock(&GlobalMid_Lock); @@ -977,6 +984,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); + cifs_mid_q_entry_release(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); @@ -3882,8 +3890,12 @@ generic_ip_connect(struct TCP_Server_Info *server) rc = socket->ops->connect(socket, saddr, slen, server->noblockcnt ? O_NONBLOCK : 0); - - if (rc == -EINPROGRESS) + /* + * When mounting SMB root file systems, we do not want to block in + * connect. Otherwise bail out and then let cifs_reconnect() perform + * reconnect failover - if possible. + */ + if (server->noblockcnt && rc == -EINPROGRESS) rc = 0; if (rc < 0) { cifs_dbg(FYI, "Error %d connecting to server\n", rc); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5ad15de2bb4f..fa7b0fa72bb3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -281,6 +281,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode) return has_locks; } +void +cifs_down_write(struct rw_semaphore *sem) +{ + while (!down_write_trylock(sem)) + msleep(10); +} + struct cifsFileInfo * cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) @@ -306,7 +313,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, INIT_LIST_HEAD(&fdlocks->locks); fdlocks->cfile = cfile; cfile->llist = fdlocks; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_add(&fdlocks->llist, &cinode->llist); up_write(&cinode->lock_sem); @@ -405,10 +412,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) bool oplock_break_cancelled; spin_lock(&tcon->open_file_lock); - + spin_lock(&cifsi->open_file_lock); spin_lock(&cifs_file->file_info_lock); if (--cifs_file->count > 0) { spin_unlock(&cifs_file->file_info_lock); + spin_unlock(&cifsi->open_file_lock); spin_unlock(&tcon->open_file_lock); return; } @@ -421,9 +429,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); /* remove it from the lists */ - spin_lock(&cifsi->open_file_lock); list_del(&cifs_file->flist); - spin_unlock(&cifsi->open_file_lock); list_del(&cifs_file->tlist); atomic_dec(&tcon->num_local_opens); @@ -440,6 +446,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) cifs_set_oplock_level(cifsi, 0); } + spin_unlock(&cifsi->open_file_lock); spin_unlock(&tcon->open_file_lock); oplock_break_cancelled = wait_oplock_handler ? @@ -464,7 +471,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ - down_write(&cifsi->lock_sem); + cifs_down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); @@ -1027,7 +1034,7 @@ static void cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); } @@ -1049,7 +1056,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, try_again: exist = false; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, lock->type, lock->flags, &conf_lock, @@ -1072,7 +1079,7 @@ try_again: (lock->blist.next == &lock->blist)); if (!rc) goto try_again; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_del_init(&lock->blist); } @@ -1125,7 +1132,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock) return rc; try_again: - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; @@ -1331,7 +1338,7 @@ cifs_push_locks(struct cifsFileInfo *cfile) int rc = 0; /* we are going to update can_cache_brlcks here - need a write access */ - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; @@ -1522,7 +1529,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, if (!buf) return -ENOMEM; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); for (i = 0; i < 2; i++) { cur = buf; num = 0; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5dcc95b38310..df9377828e2f 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2475,9 +2475,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid); cifsFileInfo_put(wfile); if (rc) - return rc; + goto cifs_setattr_exit; } else if (rc != -EBADF) - return rc; + goto cifs_setattr_exit; else rc = 0; } diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index b7421a096319..514810694c0f 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server) /* we do not want to loop forever */ last_mid = cur_mid; cur_mid++; + /* avoid 0xFFFF MID */ + if (cur_mid == 0xffff) + cur_mid++; /* * This nested loop looks more expensive than it is. diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index e6a1fc72018f..8b0b512c5792 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -145,7 +145,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, cur = buf; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset || (flock->fl_start + length) < diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 4c0922596467..cd55af9b7cc5 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -4084,6 +4084,7 @@ free_pages: kfree(dw->ppages); cifs_small_buf_release(dw->buf); + kfree(dw); } @@ -4157,7 +4158,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, dw->server = server; dw->ppages = pages; dw->len = len; - queue_work(cifsiod_wq, &dw->decrypt); + queue_work(decrypt_wq, &dw->decrypt); *num_mids = 0; /* worker thread takes care of finding mid */ return -1; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index ea735d59c36e..0abfde6d0b05 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -838,6 +838,7 @@ struct create_durable_handle_reconnect_v2 { struct create_context ccontext; __u8 Name[8]; struct durable_reconnect_context_v2 dcontext; + __u8 Pad[4]; } __packed; /* See MS-SMB2 2.2.13.2.5 */ diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 308ad0f495e1..ca3de62688d6 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -86,22 +86,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) static void _cifs_mid_q_entry_release(struct kref *refcount) { - struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, - refcount); - - mempool_free(mid, cifs_mid_poolp); -} - -void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) -{ - spin_lock(&GlobalMid_Lock); - kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); - spin_unlock(&GlobalMid_Lock); -} - -void -DeleteMidQEntry(struct mid_q_entry *midEntry) -{ + struct mid_q_entry *midEntry = + container_of(refcount, struct mid_q_entry, refcount); #ifdef CONFIG_CIFS_STATS2 __le16 command = midEntry->server->vals->lock_cmd; __u16 smb_cmd = le16_to_cpu(midEntry->command); @@ -166,6 +152,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) } } #endif + + mempool_free(midEntry, cifs_mid_poolp); +} + +void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) +{ + spin_lock(&GlobalMid_Lock); + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); + spin_unlock(&GlobalMid_Lock); +} + +void DeleteMidQEntry(struct mid_q_entry *midEntry) +{ cifs_mid_q_entry_release(midEntry); } @@ -173,8 +172,10 @@ void cifs_delete_mid(struct mid_q_entry *mid) { spin_lock(&GlobalMid_Lock); - list_del_init(&mid->qhead); - mid->mid_flags |= MID_DELETED; + if (!(mid->mid_flags & MID_DELETED)) { + list_del_init(&mid->qhead); + mid->mid_flags |= MID_DELETED; + } spin_unlock(&GlobalMid_Lock); DeleteMidQEntry(mid); @@ -872,7 +873,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) rc = -EHOSTDOWN; break; default: - list_del_init(&mid->qhead); + if (!(mid->mid_flags & MID_DELETED)) { + list_del_init(&mid->qhead); + mid->mid_flags |= MID_DELETED; + } cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", __func__, mid->mid, mid->mid_state); rc = -EIO; diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index dc5dbf6a81d7..cb61467478ca 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -101,7 +101,7 @@ static int create_link(struct config_item *parent_item, } target_sd->s_links++; spin_unlock(&configfs_dirent_lock); - ret = configfs_get_target_path(item, item, body); + ret = configfs_get_target_path(parent_item, item, body); if (!ret) ret = configfs_create_link(target_sd, parent_item->ci_dentry, dentry, body); diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index d12ea28836a5..2f04024c3588 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -958,8 +958,8 @@ static int cramfs_get_tree(struct fs_context *fc) if (IS_ENABLED(CONFIG_CRAMFS_MTD)) { ret = get_tree_mtd(fc, cramfs_mtd_fill_super); - if (ret < 0) - return ret; + if (!ret) + return 0; } if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) ret = get_tree_bdev(fc, cramfs_blkdev_fill_super); diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 82da2510721f..1f4b8a277060 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -26,7 +26,7 @@ #include <linux/namei.h> #include "fscrypt_private.h" -static void __fscrypt_decrypt_bio(struct bio *bio, bool done) +void fscrypt_decrypt_bio(struct bio *bio) { struct bio_vec *bv; struct bvec_iter_all iter_all; @@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) bv->bv_offset); if (ret) SetPageError(page); - else if (done) - SetPageUptodate(page); - if (done) - unlock_page(page); } } - -void fscrypt_decrypt_bio(struct bio *bio) -{ - __fscrypt_decrypt_bio(bio, false); -} EXPORT_SYMBOL(fscrypt_decrypt_bio); -static void completion_pages(struct work_struct *work) -{ - struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work); - struct bio *bio = ctx->bio; - - __fscrypt_decrypt_bio(bio, true); - fscrypt_release_ctx(ctx); - bio_put(bio); -} - -void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) -{ - INIT_WORK(&ctx->work, completion_pages); - ctx->bio = bio; - fscrypt_enqueue_decrypt_work(&ctx->work); -} -EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); - int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 32a7ad0098cc..3719efa546c6 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -27,29 +27,20 @@ #include <linux/ratelimit.h> #include <linux/dcache.h> #include <linux/namei.h> -#include <crypto/aes.h> #include <crypto/skcipher.h> #include "fscrypt_private.h" static unsigned int num_prealloc_crypto_pages = 32; -static unsigned int num_prealloc_crypto_ctxs = 128; module_param(num_prealloc_crypto_pages, uint, 0444); MODULE_PARM_DESC(num_prealloc_crypto_pages, "Number of crypto pages to preallocate"); -module_param(num_prealloc_crypto_ctxs, uint, 0444); -MODULE_PARM_DESC(num_prealloc_crypto_ctxs, - "Number of crypto contexts to preallocate"); static mempool_t *fscrypt_bounce_page_pool = NULL; -static LIST_HEAD(fscrypt_free_ctxs); -static DEFINE_SPINLOCK(fscrypt_ctx_lock); - static struct workqueue_struct *fscrypt_read_workqueue; static DEFINE_MUTEX(fscrypt_init_mutex); -static struct kmem_cache *fscrypt_ctx_cachep; struct kmem_cache *fscrypt_info_cachep; void fscrypt_enqueue_decrypt_work(struct work_struct *work) @@ -58,62 +49,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work) } EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); -/** - * fscrypt_release_ctx() - Release a decryption context - * @ctx: The decryption context to release. - * - * If the decryption context was allocated from the pre-allocated pool, return - * it to that pool. Else, free it. - */ -void fscrypt_release_ctx(struct fscrypt_ctx *ctx) -{ - unsigned long flags; - - if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { - kmem_cache_free(fscrypt_ctx_cachep, ctx); - } else { - spin_lock_irqsave(&fscrypt_ctx_lock, flags); - list_add(&ctx->free_list, &fscrypt_free_ctxs); - spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); - } -} -EXPORT_SYMBOL(fscrypt_release_ctx); - -/** - * fscrypt_get_ctx() - Get a decryption context - * @gfp_flags: The gfp flag for memory allocation - * - * Allocate and initialize a decryption context. - * - * Return: A new decryption context on success; an ERR_PTR() otherwise. - */ -struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) -{ - struct fscrypt_ctx *ctx; - unsigned long flags; - - /* - * First try getting a ctx from the free list so that we don't have to - * call into the slab allocator. - */ - spin_lock_irqsave(&fscrypt_ctx_lock, flags); - ctx = list_first_entry_or_null(&fscrypt_free_ctxs, - struct fscrypt_ctx, free_list); - if (ctx) - list_del(&ctx->free_list); - spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); - if (!ctx) { - ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); - if (!ctx) - return ERR_PTR(-ENOMEM); - ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; - } else { - ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; - } - return ctx; -} -EXPORT_SYMBOL(fscrypt_get_ctx); - struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) { return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); @@ -138,14 +73,17 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page); void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci) { + u8 flags = fscrypt_policy_flags(&ci->ci_policy); + memset(iv, 0, ci->ci_mode->ivsize); - iv->lblk_num = cpu_to_le64(lblk_num); - if (fscrypt_is_direct_key_policy(&ci->ci_policy)) + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { + WARN_ON_ONCE((u32)lblk_num != lblk_num); + lblk_num |= (u64)ci->ci_inode->i_ino << 32; + } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); - - if (ci->ci_essiv_tfm != NULL) - crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); + } + iv->lblk_num = cpu_to_le64(lblk_num); } /* Encrypt or decrypt a single filesystem block of file contents */ @@ -396,17 +334,6 @@ const struct dentry_operations fscrypt_d_ops = { .d_revalidate = fscrypt_d_revalidate, }; -static void fscrypt_destroy(void) -{ - struct fscrypt_ctx *pos, *n; - - list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) - kmem_cache_free(fscrypt_ctx_cachep, pos); - INIT_LIST_HEAD(&fscrypt_free_ctxs); - mempool_destroy(fscrypt_bounce_page_pool); - fscrypt_bounce_page_pool = NULL; -} - /** * fscrypt_initialize() - allocate major buffers for fs encryption. * @cop_flags: fscrypt operations flags @@ -414,11 +341,11 @@ static void fscrypt_destroy(void) * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * - * Return: Zero on success, non-zero otherwise. + * Return: 0 on success; -errno on failure */ int fscrypt_initialize(unsigned int cop_flags) { - int i, res = -ENOMEM; + int err = 0; /* No need to allocate a bounce page pool if this FS won't use it. */ if (cop_flags & FS_CFLG_OWN_PAGES) @@ -426,29 +353,18 @@ int fscrypt_initialize(unsigned int cop_flags) mutex_lock(&fscrypt_init_mutex); if (fscrypt_bounce_page_pool) - goto already_initialized; - - for (i = 0; i < num_prealloc_crypto_ctxs; i++) { - struct fscrypt_ctx *ctx; - - ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); - if (!ctx) - goto fail; - list_add(&ctx->free_list, &fscrypt_free_ctxs); - } + goto out_unlock; + err = -ENOMEM; fscrypt_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!fscrypt_bounce_page_pool) - goto fail; + goto out_unlock; -already_initialized: - mutex_unlock(&fscrypt_init_mutex); - return 0; -fail: - fscrypt_destroy(); + err = 0; +out_unlock: mutex_unlock(&fscrypt_init_mutex); - return res; + return err; } void fscrypt_msg(const struct inode *inode, const char *level, @@ -494,13 +410,9 @@ static int __init fscrypt_init(void) if (!fscrypt_read_workqueue) goto fail; - fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); - if (!fscrypt_ctx_cachep) - goto fail_free_queue; - fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); if (!fscrypt_info_cachep) - goto fail_free_ctx; + goto fail_free_queue; err = fscrypt_init_keyring(); if (err) @@ -510,8 +422,6 @@ static int __init fscrypt_init(void) fail_free_info: kmem_cache_destroy(fscrypt_info_cachep); -fail_free_ctx: - kmem_cache_destroy(fscrypt_ctx_cachep); fail_free_queue: destroy_workqueue(fscrypt_read_workqueue); fail: diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index e84efc01512e..130b50e5a011 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -163,11 +163,8 @@ struct fscrypt_info { /* The actual crypto transform used for encryption and decryption */ struct crypto_skcipher *ci_ctfm; - /* - * Cipher for ESSIV IV generation. Only set for CBC contents - * encryption, otherwise is NULL. - */ - struct crypto_cipher *ci_essiv_tfm; + /* True if the key should be freed when this fscrypt_info is freed */ + bool ci_owns_key; /* * Encryption mode used for this inode. It corresponds to either the @@ -209,8 +206,6 @@ typedef enum { FS_ENCRYPT, } fscrypt_direction_t; -#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 - static inline bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) { @@ -289,7 +284,8 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, */ #define HKDF_CONTEXT_KEY_IDENTIFIER 1 #define HKDF_CONTEXT_PER_FILE_KEY 2 -#define HKDF_CONTEXT_PER_MODE_KEY 3 +#define HKDF_CONTEXT_DIRECT_KEY 3 +#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -386,8 +382,14 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; - /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */ - struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1]; + /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */ + struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1]; + + /* + * Crypto API transforms for filesystem-layer implementation of + * IV_INO_LBLK_64 policies, allocated on-demand. + */ + struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1]; } __randomize_layout; @@ -443,8 +445,7 @@ struct fscrypt_mode { const char *cipher_str; int keysize; int ivsize; - bool logged_impl_name; - bool needs_essiv; + int logged_impl_name; }; static inline bool diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index c34fa7c61b43..040df1f5e1c8 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -43,8 +43,10 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); - for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++) - crypto_free_skcipher(mk->mk_mode_keys[i]); + for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { + crypto_free_skcipher(mk->mk_direct_tfms[i]); + crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]); + } key_put(mk->mk_users); kzfree(mk); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index d71c2d6dd162..f577bb6613f9 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -8,15 +8,11 @@ * Heavily modified since then. */ -#include <crypto/aes.h> -#include <crypto/sha.h> #include <crypto/skcipher.h> #include <linux/key.h> #include "fscrypt_private.h" -static struct crypto_shash *essiv_hash_tfm; - static struct fscrypt_mode available_modes[] = { [FSCRYPT_MODE_AES_256_XTS] = { .friendly_name = "AES-256-XTS", @@ -31,11 +27,10 @@ static struct fscrypt_mode available_modes[] = { .ivsize = 16, }, [FSCRYPT_MODE_AES_128_CBC] = { - .friendly_name = "AES-128-CBC", - .cipher_str = "cbc(aes)", + .friendly_name = "AES-128-CBC-ESSIV", + .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, - .needs_essiv = true, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -86,15 +81,13 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, mode->cipher_str, PTR_ERR(tfm)); return tfm; } - if (unlikely(!mode->logged_impl_name)) { + if (!xchg(&mode->logged_impl_name, 1)) { /* * fscrypt performance can vary greatly depending on which * crypto algorithm implementation is used. Help people debug * performance problems by logging the ->cra_driver_name the - * first time a mode is used. Note that multiple threads can - * race here, but it doesn't really matter. + * first time a mode is used. */ - mode->logged_impl_name = true; pr_info("fscrypt: %s using implementation \"%s\"\n", mode->friendly_name, crypto_skcipher_alg(tfm)->base.cra_driver_name); @@ -111,131 +104,64 @@ err_free_tfm: return ERR_PTR(err); } -static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt) -{ - struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm); - - /* init hash transform on demand */ - if (unlikely(!tfm)) { - struct crypto_shash *prev_tfm; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { - fscrypt_warn(NULL, - "Missing crypto API support for SHA-256"); - return -ENOPKG; - } - fscrypt_err(NULL, - "Error allocating SHA-256 transform: %ld", - PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm); - if (prev_tfm) { - crypto_free_shash(tfm); - tfm = prev_tfm; - } - } - - { - SHASH_DESC_ON_STACK(desc, tfm); - desc->tfm = tfm; - - return crypto_shash_digest(desc, key, keysize, salt); - } -} - -static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key, - int keysize) -{ - int err; - struct crypto_cipher *essiv_tfm; - u8 salt[SHA256_DIGEST_SIZE]; - - if (WARN_ON(ci->ci_mode->ivsize != AES_BLOCK_SIZE)) - return -EINVAL; - - essiv_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(essiv_tfm)) - return PTR_ERR(essiv_tfm); - - ci->ci_essiv_tfm = essiv_tfm; - - err = derive_essiv_salt(raw_key, keysize, salt); - if (err) - goto out; - - /* - * Using SHA256 to derive the salt/key will result in AES-256 being - * used for IV generation. File contents encryption will still use the - * configured keysize (AES-128) nevertheless. - */ - err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt)); - if (err) - goto out; - -out: - memzero_explicit(salt, sizeof(salt)); - return err; -} - -/* Given the per-file key, set up the file's crypto transform object(s) */ +/* Given the per-file key, set up the file's crypto transform object */ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) { - struct fscrypt_mode *mode = ci->ci_mode; - struct crypto_skcipher *ctfm; - int err; - - ctfm = fscrypt_allocate_skcipher(mode, derived_key, ci->ci_inode); - if (IS_ERR(ctfm)) - return PTR_ERR(ctfm); + struct crypto_skcipher *tfm; - ci->ci_ctfm = ctfm; + tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); - if (mode->needs_essiv) { - err = init_essiv_generator(ci, derived_key, mode->keysize); - if (err) { - fscrypt_warn(ci->ci_inode, - "Error initializing ESSIV generator: %d", - err); - return err; - } - } + ci->ci_ctfm = tfm; + ci->ci_owns_key = true; return 0; } static int setup_per_mode_key(struct fscrypt_info *ci, - struct fscrypt_master_key *mk) + struct fscrypt_master_key *mk, + struct crypto_skcipher **tfms, + u8 hkdf_context, bool include_fs_uuid) { + const struct inode *inode = ci->ci_inode; + const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; u8 mode_num = mode - available_modes; struct crypto_skcipher *tfm, *prev_tfm; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; + u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; + unsigned int hkdf_infolen = 0; int err; - if (WARN_ON(mode_num >= ARRAY_SIZE(mk->mk_mode_keys))) + if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; /* pairs with cmpxchg() below */ - tfm = READ_ONCE(mk->mk_mode_keys[mode_num]); + tfm = READ_ONCE(tfms[mode_num]); if (likely(tfm != NULL)) goto done; BUILD_BUG_ON(sizeof(mode_num) != 1); + BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); + BUILD_BUG_ON(sizeof(hkdf_info) != 17); + hkdf_info[hkdf_infolen++] = mode_num; + if (include_fs_uuid) { + memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, + sizeof(sb->s_uuid)); + hkdf_infolen += sizeof(sb->s_uuid); + } err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, - HKDF_CONTEXT_PER_MODE_KEY, - &mode_num, sizeof(mode_num), + hkdf_context, hkdf_info, hkdf_infolen, mode_key, mode->keysize); if (err) return err; - tfm = fscrypt_allocate_skcipher(mode, mode_key, ci->ci_inode); + tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); memzero_explicit(mode_key, mode->keysize); if (IS_ERR(tfm)) return PTR_ERR(tfm); /* pairs with READ_ONCE() above */ - prev_tfm = cmpxchg(&mk->mk_mode_keys[mode_num], NULL, tfm); + prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm); if (prev_tfm != NULL) { crypto_free_skcipher(tfm); tfm = prev_tfm; @@ -266,7 +192,19 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, ci->ci_mode->friendly_name); return -EINVAL; } - return setup_per_mode_key(ci, mk); + return setup_per_mode_key(ci, mk, mk->mk_direct_tfms, + HKDF_CONTEXT_DIRECT_KEY, false); + } else if (ci->ci_policy.v2.flags & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { + /* + * IV_INO_LBLK_64: encryption keys are derived from (master_key, + * mode_num, filesystem_uuid), and inode number is included in + * the IVs. This format is optimized for use with inline + * encryption hardware compliant with the UFS or eMMC standards. + */ + return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms, + HKDF_CONTEXT_IV_INO_LBLK_64_KEY, + true); } err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, @@ -388,13 +326,10 @@ static void put_crypt_info(struct fscrypt_info *ci) if (!ci) return; - if (ci->ci_direct_key) { + if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); - } else if ((ci->ci_ctfm != NULL || ci->ci_essiv_tfm != NULL) && - !fscrypt_is_direct_key_policy(&ci->ci_policy)) { + else if (ci->ci_owns_key) crypto_free_skcipher(ci->ci_ctfm); - crypto_free_cipher(ci->ci_essiv_tfm); - } key = ci->ci_master_key; if (key) { @@ -415,6 +350,7 @@ static void put_crypt_info(struct fscrypt_info *ci) key_invalidate(key); key_put(key); } + memzero_explicit(ci, sizeof(*ci)); kmem_cache_free(fscrypt_info_cachep, ci); } diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index ad1a36c370c3..5298ef22aa85 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -270,10 +270,6 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, return -EINVAL; } - /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */ - if (WARN_ON(mode->needs_essiv)) - return -EINVAL; - dk = fscrypt_get_direct_key(ci, raw_master_key); if (IS_ERR(dk)) return PTR_ERR(dk); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 4072ba644595..96f528071bed 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -29,6 +29,40 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1, return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); } +static bool supported_iv_ino_lblk_64_policy( + const struct fscrypt_policy_v2 *policy, + const struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + int ino_bits = 64, lblk_bits = 64; + + if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + fscrypt_warn(inode, + "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive"); + return false; + } + /* + * It's unsafe to include inode numbers in the IVs if the filesystem can + * potentially renumber inodes, e.g. via filesystem shrinking. + */ + if (!sb->s_cop->has_stable_inodes || + !sb->s_cop->has_stable_inodes(sb)) { + fscrypt_warn(inode, + "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers", + sb->s_id); + return false; + } + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + if (ino_bits > 32 || lblk_bits > 32) { + fscrypt_warn(inode, + "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers", + sb->s_id); + return false; + } + return true; +} + /** * fscrypt_supported_policy - check whether an encryption policy is supported * @@ -55,7 +89,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, return false; } - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", policy->flags); @@ -83,6 +118,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, return false; } + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && + !supported_iv_ino_lblk_64_policy(policy, inode)) + return false; + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { fscrypt_warn(inode, @@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) for (;;) { entry = xas_find_conflict(xas); + if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) + return entry; if (dax_entry_order(entry) < order) return XA_RETRY_ENTRY; - if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || - !dax_is_locked(entry)) + if (!dax_is_locked(entry)) return entry; wq = dax_entry_waitqueue(xas, entry, &ewait.key); diff --git a/fs/dcache.c b/fs/dcache.c index e88cf0554e65..f7931b682a0d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1319,7 +1319,7 @@ resume: if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&this_parent->d_lock); - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); + spin_release(&dentry->d_lock.dep_map, _RET_IP_); this_parent = dentry; spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 87846aad594b..dede25247b81 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -420,20 +420,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n"); * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. - * - * This function will return a pointer to a dentry if it succeeds. This - * pointer must be passed to the debugfs_remove() function when the file is - * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be - * returned. - * - * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will - * be returned. */ -struct dentry *debugfs_create_u8(const char *name, umode_t mode, - struct dentry *parent, u8 *value) +void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, + u8 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8, &fops_u8_ro, &fops_u8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u8); @@ -465,20 +456,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n"); * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. - * - * This function will return a pointer to a dentry if it succeeds. This - * pointer must be passed to the debugfs_remove() function when the file is - * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be - * returned. - * - * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will - * be returned. */ -struct dentry *debugfs_create_u16(const char *name, umode_t mode, - struct dentry *parent, u16 *value) +void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, + u16 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16, &fops_u16_ro, &fops_u16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u16); @@ -556,20 +538,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. - * - * This function will return a pointer to a dentry if it succeeds. This - * pointer must be passed to the debugfs_remove() function when the file is - * to be removed (no automatic cleanup happens if your module is unloaded, - * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be - * returned. - * - * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will - * be returned. */ -struct dentry *debugfs_create_u64(const char *name, umode_t mode, - struct dentry *parent, u64 *value) +void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, + u64 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64, &fops_u64_ro, &fops_u64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u64); @@ -660,10 +633,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n"); * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_x8(const char *name, umode_t mode, - struct dentry *parent, u8 *value) +void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, + u8 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8, &fops_x8_ro, &fops_x8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x8); @@ -678,10 +651,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8); * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_x16(const char *name, umode_t mode, - struct dentry *parent, u16 *value) +void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, + u16 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16, &fops_x16_ro, &fops_x16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x16); @@ -696,10 +669,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16); * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_x32(const char *name, umode_t mode, - struct dentry *parent, u32 *value) +void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, + u32 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32, &fops_x32_ro, &fops_x32_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x32); @@ -714,10 +687,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32); * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_x64(const char *name, umode_t mode, - struct dentry *parent, u64 *value) +void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, + u64 *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64, + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64, &fops_x64_ro, &fops_x64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x64); @@ -748,12 +721,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n"); * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_size_t(const char *name, umode_t mode, - struct dentry *parent, size_t *value) +void debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, size_t *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, - &fops_size_t, &fops_size_t_ro, - &fops_size_t_wo); + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_size_t, + &fops_size_t_ro, &fops_size_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_size_t); @@ -785,12 +757,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, * @value: a pointer to the variable that the file should read to and write * from. */ -struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, - struct dentry *parent, atomic_t *value) +void debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value) { - return debugfs_create_mode_unsafe(name, mode, parent, value, - &fops_atomic_t, &fops_atomic_t_ro, - &fops_atomic_t_wo); + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_atomic_t, + &fops_atomic_t_ro, &fops_atomic_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_atomic_t); diff --git a/fs/direct-io.c b/fs/direct-io.c index ae196784f487..9329ced91f1d 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -241,9 +241,8 @@ void dio_warn_stale_pagecache(struct file *filp) } } -/** +/* * dio_complete() - called when all DIO BIO I/O has been completed - * @offset: the byte offset in the file of the completed operation * * This drops i_dio_count, lets interested parties know that a DIO operation * has completed, and calculates the resulting return code for the operation. diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 18426f4855f1..e23752d9a79f 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); struct dentry *lower_dir_dentry; + struct inode *lower_dir_inode; int rc; - dget(lower_dentry); - lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL); + lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent); + lower_dir_inode = d_inode(lower_dir_dentry); + inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT); + dget(lower_dentry); // don't even try to make the lower negative + if (lower_dentry->d_parent != lower_dir_dentry) + rc = -EINVAL; + else if (d_unhashed(lower_dentry)) + rc = -EINVAL; + else + rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL); if (rc) { printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); goto out_unlock; @@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, fsstack_copy_attr_times(dir, lower_dir_inode); set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); inode->i_ctime = dir->i_ctime; - d_drop(dentry); out_unlock: - unlock_dir(lower_dir_dentry); dput(lower_dentry); + inode_unlock(lower_dir_inode); + if (!rc) + d_drop(dentry); return rc; } @@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode) static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, struct dentry *lower_dentry) { - struct inode *inode, *lower_inode = d_inode(lower_dentry); + struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent); + struct inode *inode, *lower_inode; struct ecryptfs_dentry_info *dentry_info; - struct vfsmount *lower_mnt; int rc = 0; dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); @@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, return ERR_PTR(-ENOMEM); } - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); fsstack_copy_attr_atime(d_inode(dentry->d_parent), - d_inode(lower_dentry->d_parent)); + d_inode(path->dentry)); BUG_ON(!d_count(lower_dentry)); ecryptfs_set_dentry_private(dentry, dentry_info); - dentry_info->lower_path.mnt = lower_mnt; + dentry_info->lower_path.mnt = mntget(path->mnt); dentry_info->lower_path.dentry = lower_dentry; - if (d_really_is_negative(lower_dentry)) { + /* + * negative dentry can go positive under us here - its parent is not + * locked. That's OK and that could happen just as we return from + * ecryptfs_lookup() anyway. Just need to be careful and fetch + * ->d_inode only once - it's not stable here. + */ + lower_inode = READ_ONCE(lower_dentry->d_inode); + + if (!lower_inode) { /* We want to add because we couldn't find in lower */ d_add(dentry, NULL); return NULL; @@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry) { struct dentry *lower_dentry; struct dentry *lower_dir_dentry; + struct inode *lower_dir_inode; int rc; lower_dentry = ecryptfs_dentry_to_lower(dentry); - dget(dentry); - lower_dir_dentry = lock_parent(lower_dentry); - dget(lower_dentry); - rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry); - dput(lower_dentry); - if (!rc && d_really_is_positive(dentry)) + lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent); + lower_dir_inode = d_inode(lower_dir_dentry); + + inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT); + dget(lower_dentry); // don't even try to make the lower negative + if (lower_dentry->d_parent != lower_dir_dentry) + rc = -EINVAL; + else if (d_unhashed(lower_dentry)) + rc = -EINVAL; + else + rc = vfs_rmdir(lower_dir_inode, lower_dentry); + if (!rc) { clear_nlink(d_inode(dentry)); - fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); - set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink); - unlock_dir(lower_dir_dentry); + fsstack_copy_attr_times(dir, lower_dir_inode); + set_nlink(dir, lower_dir_inode->i_nlink); + } + dput(lower_dentry); + inode_unlock(lower_dir_inode); if (!rc) d_drop(dentry); - dput(dentry); return rc; } @@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *lower_new_dentry; struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; - struct dentry *trap = NULL; + struct dentry *trap; struct inode *target_inode; if (flags) return -EINVAL; + lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent); + lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent); + lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); - dget(lower_old_dentry); - dget(lower_new_dentry); - lower_old_dir_dentry = dget_parent(lower_old_dentry); - lower_new_dir_dentry = dget_parent(lower_new_dentry); + target_inode = d_inode(new_dentry); + trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); + dget(lower_new_dentry); rc = -EINVAL; if (lower_old_dentry->d_parent != lower_old_dir_dentry) goto out_lock; @@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir != old_dir) fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry)); out_lock: - unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); - dput(lower_new_dir_dentry); - dput(lower_old_dir_dentry); dput(lower_new_dentry); - dput(lower_old_dentry); + unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); return rc; } diff --git a/fs/exec.c b/fs/exec.c index 555e93c7dec8..c27231234764 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1015,7 +1015,7 @@ static int exec_mmap(struct mm_struct *mm) /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; - mm_release(tsk, old_mm); + exec_mm_release(tsk, old_mm); if (old_mm) { sync_mm_rss(old_mm); diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 09bc68708d28..2dd55b172d57 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, * inode is actually connected to the parent. */ err = exportfs_get_name(mnt, target_dir, nbuf, result); - if (!err) { - inode_lock(target_dir->d_inode); - nresult = lookup_one_len(nbuf, target_dir, - strlen(nbuf)); - inode_unlock(target_dir->d_inode); - if (!IS_ERR(nresult)) { - if (nresult->d_inode) { - dput(result); - result = nresult; - } else - dput(nresult); - } + if (err) { + dput(target_dir); + goto err_result; } + inode_lock(target_dir->d_inode); + nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf)); + if (!IS_ERR(nresult)) { + if (unlikely(nresult->d_inode != result->d_inode)) { + dput(nresult); + nresult = ERR_PTR(-ESTALE); + } + } + inode_unlock(target_dir->d_inode); /* * At this point we are done with the parent, but it's pinned * by the child dentry anyway. */ dput(target_dir); + if (IS_ERR(nresult)) { + err = PTR_ERR(nresult); + goto err_result; + } + dput(result); + result = nresult; + /* * And finally make sure the dentry is actually acceptable * to NFSD. diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index cbb5ca830e57..ef42ab040905 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -106,3 +106,20 @@ config EXT4_DEBUG If you select Y here, then you will be able to turn on debugging with a command such as: echo 1 > /sys/module/ext4/parameters/mballoc_debug + +config EXT4_KUNIT_TESTS + bool "KUnit tests for ext4" + select EXT4_FS + depends on KUNIT + help + This builds the ext4 KUnit tests. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running KUnit test harness and are not for inclusion into a production + build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index b17ddc229ac5..840b91d040f1 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -13,4 +13,5 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \ ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o +ext4-$(CONFIG_EXT4_KUNIT_TESTS) += inode-test.o ext4-$(CONFIG_FS_VERITY) += verity.o diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 61987c106511..f8578caba40d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1682,6 +1682,7 @@ static inline bool ext4_verity_in_progress(struct inode *inode) #define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020 #define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200 +#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800 #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 @@ -1783,6 +1784,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR) EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE) EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX) EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2) +EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES) EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER) EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE) diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c new file mode 100644 index 000000000000..92a9da1774aa --- /dev/null +++ b/fs/ext4/inode-test.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test of ext4 inode that verify the seconds part of [a/c/m] + * timestamps in ext4 inode structs are decoded correctly. + */ + +#include <kunit/test.h> +#include <linux/kernel.h> +#include <linux/time64.h> + +#include "ext4.h" + +/* + * For constructing the nonnegative timestamp lower bound value. + * binary: 00000000 00000000 00000000 00000000 + */ +#define LOWER_MSB_0 0L +/* + * For constructing the nonnegative timestamp upper bound value. + * binary: 01111111 11111111 11111111 11111111 + * + */ +#define UPPER_MSB_0 0x7fffffffL +/* + * For constructing the negative timestamp lower bound value. + * binary: 10000000 00000000 00000000 00000000 + */ +#define LOWER_MSB_1 (-0x80000000L) +/* + * For constructing the negative timestamp upper bound value. + * binary: 11111111 11111111 11111111 11111111 + */ +#define UPPER_MSB_1 (-1L) +/* + * Upper bound for nanoseconds value supported by the encoding. + * binary: 00111111 11111111 11111111 11111111 + */ +#define MAX_NANOSECONDS ((1L << 30) - 1) + +#define CASE_NAME_FORMAT "%s: msb:%x lower_bound:%x extra_bits: %x" + +#define LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE\ + "1901-12-13 Lower bound of 32bit < 0 timestamp, no extra bits" +#define UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE\ + "1969-12-31 Upper bound of 32bit < 0 timestamp, no extra bits" +#define LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\ + "1970-01-01 Lower bound of 32bit >=0 timestamp, no extra bits" +#define UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE\ + "2038-01-19 Upper bound of 32bit >=0 timestamp, no extra bits" +#define LOWER_BOUND_NEG_LO_1_CASE\ + "2038-01-19 Lower bound of 32bit <0 timestamp, lo extra sec bit on" +#define UPPER_BOUND_NEG_LO_1_CASE\ + "2106-02-07 Upper bound of 32bit <0 timestamp, lo extra sec bit on" +#define LOWER_BOUND_NONNEG_LO_1_CASE\ + "2106-02-07 Lower bound of 32bit >=0 timestamp, lo extra sec bit on" +#define UPPER_BOUND_NONNEG_LO_1_CASE\ + "2174-02-25 Upper bound of 32bit >=0 timestamp, lo extra sec bit on" +#define LOWER_BOUND_NEG_HI_1_CASE\ + "2174-02-25 Lower bound of 32bit <0 timestamp, hi extra sec bit on" +#define UPPER_BOUND_NEG_HI_1_CASE\ + "2242-03-16 Upper bound of 32bit <0 timestamp, hi extra sec bit on" +#define LOWER_BOUND_NONNEG_HI_1_CASE\ + "2242-03-16 Lower bound of 32bit >=0 timestamp, hi extra sec bit on" +#define UPPER_BOUND_NONNEG_HI_1_CASE\ + "2310-04-04 Upper bound of 32bit >=0 timestamp, hi extra sec bit on" +#define UPPER_BOUND_NONNEG_HI_1_NS_1_CASE\ + "2310-04-04 Upper bound of 32bit>=0 timestamp, hi extra sec bit 1. 1 ns" +#define LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE\ + "2378-04-22 Lower bound of 32bit>= timestamp. Extra sec bits 1. Max ns" +#define LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE\ + "2378-04-22 Lower bound of 32bit >=0 timestamp. All extra sec bits on" +#define UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE\ + "2446-05-10 Upper bound of 32bit >=0 timestamp. All extra sec bits on" + +struct timestamp_expectation { + const char *test_case_name; + struct timespec64 expected; + u32 extra_bits; + bool msb_set; + bool lower_bound; +}; + +static time64_t get_32bit_time(const struct timestamp_expectation * const test) +{ + if (test->msb_set) { + if (test->lower_bound) + return LOWER_MSB_1; + + return UPPER_MSB_1; + } + + if (test->lower_bound) + return LOWER_MSB_0; + return UPPER_MSB_0; +} + + +/* + * Test data is derived from the table in the Inode Timestamps section of + * Documentation/filesystems/ext4/inodes.rst. + */ +static void inode_test_xtimestamp_decoding(struct kunit *test) +{ + const struct timestamp_expectation test_data[] = { + { + .test_case_name = LOWER_BOUND_NEG_NO_EXTRA_BITS_CASE, + .msb_set = true, + .lower_bound = true, + .extra_bits = 0, + .expected = {.tv_sec = -0x80000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NEG_NO_EXTRA_BITS_CASE, + .msb_set = true, + .lower_bound = false, + .extra_bits = 0, + .expected = {.tv_sec = -1LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = LOWER_BOUND_NONNEG_NO_EXTRA_BITS_CASE, + .msb_set = false, + .lower_bound = true, + .extra_bits = 0, + .expected = {0LL, 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NONNEG_NO_EXTRA_BITS_CASE, + .msb_set = false, + .lower_bound = false, + .extra_bits = 0, + .expected = {.tv_sec = 0x7fffffffLL, .tv_nsec = 0L}, + }, + + { + .test_case_name = LOWER_BOUND_NEG_LO_1_CASE, + .msb_set = true, + .lower_bound = true, + .extra_bits = 1, + .expected = {.tv_sec = 0x80000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NEG_LO_1_CASE, + .msb_set = true, + .lower_bound = false, + .extra_bits = 1, + .expected = {.tv_sec = 0xffffffffLL, .tv_nsec = 0L}, + }, + + { + .test_case_name = LOWER_BOUND_NONNEG_LO_1_CASE, + .msb_set = false, + .lower_bound = true, + .extra_bits = 1, + .expected = {.tv_sec = 0x100000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NONNEG_LO_1_CASE, + .msb_set = false, + .lower_bound = false, + .extra_bits = 1, + .expected = {.tv_sec = 0x17fffffffLL, .tv_nsec = 0L}, + }, + + { + .test_case_name = LOWER_BOUND_NEG_HI_1_CASE, + .msb_set = true, + .lower_bound = true, + .extra_bits = 2, + .expected = {.tv_sec = 0x180000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NEG_HI_1_CASE, + .msb_set = true, + .lower_bound = false, + .extra_bits = 2, + .expected = {.tv_sec = 0x1ffffffffLL, .tv_nsec = 0L}, + }, + + { + .test_case_name = LOWER_BOUND_NONNEG_HI_1_CASE, + .msb_set = false, + .lower_bound = true, + .extra_bits = 2, + .expected = {.tv_sec = 0x200000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NONNEG_HI_1_CASE, + .msb_set = false, + .lower_bound = false, + .extra_bits = 2, + .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NONNEG_HI_1_NS_1_CASE, + .msb_set = false, + .lower_bound = false, + .extra_bits = 6, + .expected = {.tv_sec = 0x27fffffffLL, .tv_nsec = 1L}, + }, + + { + .test_case_name = LOWER_BOUND_NONNEG_HI_1_NS_MAX_CASE, + .msb_set = false, + .lower_bound = true, + .extra_bits = 0xFFFFFFFF, + .expected = {.tv_sec = 0x300000000LL, + .tv_nsec = MAX_NANOSECONDS}, + }, + + { + .test_case_name = LOWER_BOUND_NONNEG_EXTRA_BITS_1_CASE, + .msb_set = false, + .lower_bound = true, + .extra_bits = 3, + .expected = {.tv_sec = 0x300000000LL, .tv_nsec = 0L}, + }, + + { + .test_case_name = UPPER_BOUND_NONNEG_EXTRA_BITS_1_CASE, + .msb_set = false, + .lower_bound = false, + .extra_bits = 3, + .expected = {.tv_sec = 0x37fffffffLL, .tv_nsec = 0L}, + } + }; + + struct timespec64 timestamp; + int i; + + for (i = 0; i < ARRAY_SIZE(test_data); ++i) { + timestamp.tv_sec = get_32bit_time(&test_data[i]); + ext4_decode_extra_time(×tamp, + cpu_to_le32(test_data[i].extra_bits)); + + KUNIT_EXPECT_EQ_MSG(test, + test_data[i].expected.tv_sec, + timestamp.tv_sec, + CASE_NAME_FORMAT, + test_data[i].test_case_name, + test_data[i].msb_set, + test_data[i].lower_bound, + test_data[i].extra_bits); + KUNIT_EXPECT_EQ_MSG(test, + test_data[i].expected.tv_nsec, + timestamp.tv_nsec, + CASE_NAME_FORMAT, + test_data[i].test_case_name, + test_data[i].msb_set, + test_data[i].lower_bound, + test_data[i].extra_bits); + } +} + +static struct kunit_case ext4_inode_test_cases[] = { + KUNIT_CASE(inode_test_xtimestamp_decoding), + {} +}; + +static struct kunit_suite ext4_inode_test_suite = { + .name = "ext4_inode_test", + .test_cases = ext4_inode_test_cases, +}; + +kunit_test_suite(ext4_inode_test_suite); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 310e4abd9aca..28f28de0c1b6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5385,12 +5385,15 @@ int ext4_getattr(const struct path *path, struct kstat *stat, stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & EXT4_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; + if (flags & EXT4_VERITY_FL) + stat->attributes |= STATX_ATTR_VERITY; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | - STATX_ATTR_NODUMP); + STATX_ATTR_NODUMP | + STATX_ATTR_VERITY); generic_fillattr(inode, stat); return 0; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b205112ca051..c54019979e80 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1345,6 +1345,18 @@ static bool ext4_dummy_context(struct inode *inode) return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb)); } +static bool ext4_has_stable_inodes(struct super_block *sb) +{ + return ext4_has_feature_stable_inodes(sb); +} + +static void ext4_get_ino_and_lblk_bits(struct super_block *sb, + int *ino_bits_ret, int *lblk_bits_ret) +{ + *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count); + *lblk_bits_ret = 8 * sizeof(ext4_lblk_t); +} + static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, @@ -1352,6 +1364,8 @@ static const struct fscrypt_operations ext4_cryptops = { .dummy_context = ext4_dummy_context, .empty_dir = ext4_empty_dir, .max_namelen = EXT4_NAME_LEN, + .has_stable_inodes = ext4_has_stable_inodes, + .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits, }; #endif diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 29bc0a542759..6a2e5b7d8fc7 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -726,11 +726,14 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & F2FS_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; + if (IS_VERITY(inode)) + stat->attributes |= STATX_ATTR_VERITY; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | - STATX_ATTR_NODUMP); + STATX_ATTR_NODUMP | + STATX_ATTR_VERITY); generic_fillattr(inode, stat); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 808709581481..2c997f94a3b2 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1771,7 +1771,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, return -EIO; } trace_f2fs_issue_reset_zone(bdev, blkstart); - return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS); + return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, + sector, nr_sects, GFP_NOFS); } /* For conventional zones, use regular discard if supported */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 1443cee15863..197ad6b314de 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2308,13 +2308,27 @@ static bool f2fs_dummy_context(struct inode *inode) return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode)); } +static bool f2fs_has_stable_inodes(struct super_block *sb) +{ + return true; +} + +static void f2fs_get_ino_and_lblk_bits(struct super_block *sb, + int *ino_bits_ret, int *lblk_bits_ret) +{ + *ino_bits_ret = 8 * sizeof(nid_t); + *lblk_bits_ret = 8 * sizeof(block_t); +} + static const struct fscrypt_operations f2fs_cryptops = { - .key_prefix = "f2fs:", - .get_context = f2fs_get_context, - .set_context = f2fs_set_context, - .dummy_context = f2fs_dummy_context, - .empty_dir = f2fs_empty_dir, - .max_namelen = F2FS_NAME_LEN, + .key_prefix = "f2fs:", + .get_context = f2fs_get_context, + .set_context = f2fs_set_context, + .dummy_context = f2fs_dummy_context, + .empty_dir = f2fs_empty_dir, + .max_namelen = F2FS_NAME_LEN, + .has_stable_inodes = f2fs_has_stable_inodes, + .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits, }; #endif @@ -2857,15 +2871,21 @@ static int init_percpu_info(struct f2fs_sb_info *sbi) } #ifdef CONFIG_BLK_DEV_ZONED +static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx, + void *data) +{ + struct f2fs_dev_info *dev = data; + + if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) + set_bit(idx, dev->blkz_seq); + return 0; +} + static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) { struct block_device *bdev = FDEV(devi).bdev; sector_t nr_sectors = bdev->bd_part->nr_sects; - sector_t sector = 0; - struct blk_zone *zones; - unsigned int i, nr_zones; - unsigned int n = 0; - int err = -EIO; + int ret; if (!f2fs_sb_has_blkzoned(sbi)) return 0; @@ -2890,38 +2910,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) if (!FDEV(devi).blkz_seq) return -ENOMEM; -#define F2FS_REPORT_NR_ZONES 4096 - - zones = f2fs_kzalloc(sbi, - array_size(F2FS_REPORT_NR_ZONES, - sizeof(struct blk_zone)), - GFP_KERNEL); - if (!zones) - return -ENOMEM; - /* Get block zones type */ - while (zones && sector < nr_sectors) { - - nr_zones = F2FS_REPORT_NR_ZONES; - err = blkdev_report_zones(bdev, sector, zones, &nr_zones); - if (err) - break; - if (!nr_zones) { - err = -EIO; - break; - } - - for (i = 0; i < nr_zones; i++) { - if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) - set_bit(n, FDEV(devi).blkz_seq); - sector += zones[i].len; - n++; - } - } - - kvfree(zones); + ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb, + &FDEV(devi)); + if (ret < 0) + return ret; - return err; + return 0; } #endif diff --git a/fs/fcntl.c b/fs/fcntl.c index 3d40771e8e7c..41b6438bd2d9 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -261,7 +261,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg) static bool rw_hint_valid(enum rw_hint hint) { switch (hint) { - case RWF_WRITE_LIFE_NOT_SET: + case RWH_WRITE_LIFE_NOT_SET: case RWH_WRITE_LIFE_NONE: case RWH_WRITE_LIFE_SHORT: case RWH_WRITE_LIFE_MEDIUM: diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e88421d9a48d..335607b8c5c0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, spin_unlock(&inode->i_lock); /* - * A dying wb indicates that the memcg-blkcg mapping has changed - * and a new wb is already serving the memcg. Switch immediately. + * A dying wb indicates that either the blkcg associated with the + * memcg changed or the associated memcg is dying. In the first + * case, a replacement wb should already be available and we should + * refresh the wb immediately. In the second case, trying to + * refresh will keep failing. */ - if (unlikely(wb_dying(wbc->wb))) + if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) inode_switch_wbs(inode, wbc->wb_id); } EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode); @@ -905,7 +908,7 @@ restart: * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs * @bdi_id: target bdi id * @memcg_id: target memcg css id - * @nr_pages: number of pages to write, 0 for best-effort dirty flushing + * @nr: number of pages to write, 0 for best-effort dirty flushing * @reason: reason why some writeback work initiated * @done: target wb_completion * diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 6419a2b3510d..3e8cebfb59b7 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o -obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o +obj-$(CONFIG_VIRTIO_FS) += virtiofs.o fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o +virtiofs-y += virtio_fs.o diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index dadd617d826c..ed1abc9e33cf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -276,10 +276,12 @@ static void flush_bg_queue(struct fuse_conn *fc) void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_iqueue *fiq = &fc->iq; - bool async = req->args->end; + bool async; if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; + + async = req->args->end; /* * test_and_set_bit() implies smp_mb() between bit * changing and below intr_entry check. Pairs with diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index d572c900bb0f..54d638f9ba1c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -405,7 +405,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, else fuse_invalidate_entry_cache(entry); - fuse_advise_use_readdirplus(dir); + if (inode) + fuse_advise_use_readdirplus(dir); return newent; out_iput: @@ -1521,6 +1522,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, is_truncate = true; } + /* Flush dirty data/metadata before non-truncate SETATTR */ + if (is_wb && S_ISREG(inode->i_mode) && + attr->ia_valid & + (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET | + ATTR_TIMES_SET)) { + err = write_inode_now(inode, true); + if (err) + return err; + + fuse_set_nowrite(inode); + fuse_release_nowrite(inode); + } + if (is_truncate) { fuse_set_nowrite(inode); set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 0f0225686aee..db48a5cf8620 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) { struct fuse_conn *fc = get_fuse_conn(inode); int err; - bool lock_inode = (file->f_flags & O_TRUNC) && + bool is_wb_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc && fc->writeback_cache; @@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (err) return err; - if (lock_inode) + if (is_wb_truncate) { inode_lock(inode); + fuse_set_nowrite(inode); + } err = fuse_do_open(fc, get_node_id(inode), file, isdir); if (!err) fuse_finish_open(inode, file); - if (lock_inode) + if (is_wb_truncate) { + fuse_release_nowrite(inode); inode_unlock(inode); + } return err; } @@ -1997,7 +2001,7 @@ static int fuse_writepages_fill(struct page *page, if (!data->ff) { err = -EIO; - data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); + data->ff = fuse_write_file_get(fc, fi); if (!data->ff) goto out_unlock; } @@ -2042,8 +2046,6 @@ static int fuse_writepages_fill(struct page *page, * under writeback, so we can release the page lock. */ if (data->wpa == NULL) { - struct fuse_inode *fi = get_fuse_inode(inode); - err = -ENOMEM; wpa = fuse_writepage_args_alloc(); if (!wpa) { diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 956aeaf961ae..d148188cfca4 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -479,6 +479,7 @@ struct fuse_fs_context { bool destroy:1; bool no_control:1; bool no_force_umount:1; + bool no_mount_options:1; unsigned int max_read; unsigned int blksize; const char *subtype; @@ -713,6 +714,9 @@ struct fuse_conn { /** Do not allow MNT_FORCE umount */ unsigned int no_force_umount:1; + /* Do not show mount options */ + unsigned int no_mount_options:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index e040e2a2b621..16aec32f7f3d 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -558,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root) struct super_block *sb = root->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); + if (fc->no_mount_options) + return 0; + seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id)); seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id)); if (fc->default_permissions) @@ -1180,6 +1183,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) fc->destroy = ctx->destroy; fc->no_control = ctx->no_control; fc->no_force_umount = ctx->no_force_umount; + fc->no_mount_options = ctx->no_mount_options; err = -ENOMEM; root = fuse_get_root_inode(sb, ctx->rootmode); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 6af3f131e468..a5c86048b96e 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -30,6 +30,7 @@ struct virtio_fs_vq { struct virtqueue *vq; /* protected by ->lock */ struct work_struct done_work; struct list_head queued_reqs; + struct list_head end_reqs; /* End these requests */ struct delayed_work dispatch_work; struct fuse_dev *fud; bool connected; @@ -54,6 +55,9 @@ struct virtio_fs_forget { struct list_head list; }; +static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, + struct fuse_req *req, bool in_flight); + static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq) { struct virtio_fs *fs = vq->vdev->priv; @@ -66,6 +70,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq) return &vq_to_fsvq(vq)->fud->pq; } +/* Should be called with fsvq->lock held. */ +static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq) +{ + fsvq->in_flight++; +} + +/* Should be called with fsvq->lock held. */ +static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq) +{ + WARN_ON(fsvq->in_flight <= 0); + fsvq->in_flight--; +} + static void release_virtio_fs_obj(struct kref *ref) { struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); @@ -109,22 +126,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) flush_delayed_work(&fsvq->dispatch_work); } -static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq) -{ - struct virtio_fs_forget *forget; - - spin_lock(&fsvq->lock); - while (1) { - forget = list_first_entry_or_null(&fsvq->queued_reqs, - struct virtio_fs_forget, list); - if (!forget) - break; - list_del(&forget->list); - kfree(forget); - } - spin_unlock(&fsvq->lock); -} - static void virtio_fs_drain_all_queues(struct virtio_fs *fs) { struct virtio_fs_vq *fsvq; @@ -132,9 +133,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs) for (i = 0; i < fs->nvqs; i++) { fsvq = &fs->vqs[i]; - if (i == VQ_HIPRIO) - drain_hiprio_queued_reqs(fsvq); - virtio_fs_drain_queue(fsvq); } } @@ -253,14 +251,66 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work) while ((req = virtqueue_get_buf(vq, &len)) != NULL) { kfree(req); - fsvq->in_flight--; + dec_in_flight_req(fsvq); } } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); spin_unlock(&fsvq->lock); } -static void virtio_fs_dummy_dispatch_work(struct work_struct *work) +static void virtio_fs_request_dispatch_work(struct work_struct *work) { + struct fuse_req *req; + struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, + dispatch_work.work); + struct fuse_conn *fc = fsvq->fud->fc; + int ret; + + pr_debug("virtio-fs: worker %s called.\n", __func__); + while (1) { + spin_lock(&fsvq->lock); + req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req, + list); + if (!req) { + spin_unlock(&fsvq->lock); + break; + } + + list_del_init(&req->list); + spin_unlock(&fsvq->lock); + fuse_request_end(fc, req); + } + + /* Dispatch pending requests */ + while (1) { + spin_lock(&fsvq->lock); + req = list_first_entry_or_null(&fsvq->queued_reqs, + struct fuse_req, list); + if (!req) { + spin_unlock(&fsvq->lock); + return; + } + list_del_init(&req->list); + spin_unlock(&fsvq->lock); + + ret = virtio_fs_enqueue_req(fsvq, req, true); + if (ret < 0) { + if (ret == -ENOMEM || ret == -ENOSPC) { + spin_lock(&fsvq->lock); + list_add_tail(&req->list, &fsvq->queued_reqs); + schedule_delayed_work(&fsvq->dispatch_work, + msecs_to_jiffies(1)); + spin_unlock(&fsvq->lock); + return; + } + req->out.h.error = ret; + spin_lock(&fsvq->lock); + dec_in_flight_req(fsvq); + spin_unlock(&fsvq->lock); + pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", + ret); + fuse_request_end(fc, req); + } + } } static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) @@ -286,6 +336,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) list_del(&forget->list); if (!fsvq->connected) { + dec_in_flight_req(fsvq); spin_unlock(&fsvq->lock); kfree(forget); continue; @@ -307,13 +358,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) } else { pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", ret); + dec_in_flight_req(fsvq); kfree(forget); } spin_unlock(&fsvq->lock); return; } - fsvq->in_flight++; notify = virtqueue_kick_prepare(vq); spin_unlock(&fsvq->lock); @@ -452,7 +503,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work) fuse_request_end(fc, req); spin_lock(&fsvq->lock); - fsvq->in_flight--; + dec_in_flight_req(fsvq); spin_unlock(&fsvq->lock); } } @@ -502,6 +553,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name; INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work); INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs); + INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs); INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work, virtio_fs_hiprio_dispatch_work); spin_lock_init(&fs->vqs[VQ_HIPRIO].lock); @@ -511,8 +563,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, spin_lock_init(&fs->vqs[i].lock); INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work); INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work, - virtio_fs_dummy_dispatch_work); + virtio_fs_request_dispatch_work); INIT_LIST_HEAD(&fs->vqs[i].queued_reqs); + INIT_LIST_HEAD(&fs->vqs[i].end_reqs); snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name), "requests.%u", i - VQ_REQUEST); callbacks[i] = virtio_fs_vq_done; @@ -708,6 +761,7 @@ __releases(fiq->lock) list_add_tail(&forget->list, &fsvq->queued_reqs); schedule_delayed_work(&fsvq->dispatch_work, msecs_to_jiffies(1)); + inc_in_flight_req(fsvq); } else { pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", ret); @@ -717,7 +771,7 @@ __releases(fiq->lock) goto out; } - fsvq->in_flight++; + inc_in_flight_req(fsvq); notify = virtqueue_kick_prepare(vq); spin_unlock(&fsvq->lock); @@ -819,7 +873,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, /* Add a request to a virtqueue and kick the device */ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req) + struct fuse_req *req, bool in_flight) { /* requests need at least 4 elements */ struct scatterlist *stack_sgs[6]; @@ -835,6 +889,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, unsigned int i; int ret; bool notify; + struct fuse_pqueue *fpq; /* Does the sglist fit on the stack? */ total_sgs = sg_count_fuse_req(req); @@ -889,7 +944,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, goto out; } - fsvq->in_flight++; + /* Request successfully sent. */ + fpq = &fsvq->fud->pq; + spin_lock(&fpq->lock); + list_add_tail(&req->list, fpq->processing); + spin_unlock(&fpq->lock); + set_bit(FR_SENT, &req->flags); + /* matches barrier in request_wait_answer() */ + smp_mb__after_atomic(); + + if (!in_flight) + inc_in_flight_req(fsvq); notify = virtqueue_kick_prepare(vq); spin_unlock(&fsvq->lock); @@ -915,9 +980,8 @@ __releases(fiq->lock) { unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ struct virtio_fs *fs; - struct fuse_conn *fc; struct fuse_req *req; - struct fuse_pqueue *fpq; + struct virtio_fs_vq *fsvq; int ret; WARN_ON(list_empty(&fiq->pending)); @@ -928,44 +992,36 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); fs = fiq->priv; - fc = fs->vqs[queue_id].fud->fc; pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", __func__, req->in.h.opcode, req->in.h.unique, req->in.h.nodeid, req->in.h.len, fuse_len_args(req->args->out_numargs, req->args->out_args)); - fpq = &fs->vqs[queue_id].fud->pq; - spin_lock(&fpq->lock); - if (!fpq->connected) { - spin_unlock(&fpq->lock); - req->out.h.error = -ENODEV; - pr_err("virtio-fs: %s disconnected\n", __func__); - fuse_request_end(fc, req); - return; - } - list_add_tail(&req->list, fpq->processing); - spin_unlock(&fpq->lock); - set_bit(FR_SENT, &req->flags); - /* matches barrier in request_wait_answer() */ - smp_mb__after_atomic(); - -retry: - ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req); + fsvq = &fs->vqs[queue_id]; + ret = virtio_fs_enqueue_req(fsvq, req, false); if (ret < 0) { if (ret == -ENOMEM || ret == -ENOSPC) { - /* Virtqueue full. Retry submission */ - /* TODO use completion instead of timeout */ - usleep_range(20, 30); - goto retry; + /* + * Virtqueue full. Retry submission from worker + * context as we might be holding fc->bg_lock. + */ + spin_lock(&fsvq->lock); + list_add_tail(&req->list, &fsvq->queued_reqs); + inc_in_flight_req(fsvq); + schedule_delayed_work(&fsvq->dispatch_work, + msecs_to_jiffies(1)); + spin_unlock(&fsvq->lock); + return; } req->out.h.error = ret; pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret); - spin_lock(&fpq->lock); - clear_bit(FR_SENT, &req->flags); - list_del_init(&req->list); - spin_unlock(&fpq->lock); - fuse_request_end(fc, req); + + /* Can't end request in submission context. Use a worker */ + spin_lock(&fsvq->lock); + list_add_tail(&req->list, &fsvq->end_reqs); + schedule_delayed_work(&fsvq->dispatch_work, 0); + spin_unlock(&fsvq->lock); return; } } @@ -992,6 +1048,7 @@ static int virtio_fs_fill_super(struct super_block *sb) .destroy = true, .no_control = true, .no_force_umount = true, + .no_mount_options = true, }; mutex_lock(&virtio_fs_mutex); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 681b44682b0d..18daf494abab 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1540,17 +1540,23 @@ static int gfs2_init_fs_context(struct fs_context *fc) { struct gfs2_args *args; - args = kzalloc(sizeof(*args), GFP_KERNEL); + args = kmalloc(sizeof(*args), GFP_KERNEL); if (args == NULL) return -ENOMEM; - args->ar_quota = GFS2_QUOTA_DEFAULT; - args->ar_data = GFS2_DATA_DEFAULT; - args->ar_commit = 30; - args->ar_statfs_quantum = 30; - args->ar_quota_quantum = 60; - args->ar_errors = GFS2_ERRORS_DEFAULT; + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { + struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info; + *args = sdp->sd_args; + } else { + memset(args, 0, sizeof(*args)); + args->ar_quota = GFS2_QUOTA_DEFAULT; + args->ar_data = GFS2_DATA_DEFAULT; + args->ar_commit = 30; + args->ar_statfs_quantum = 30; + args->ar_quota_quantum = 60; + args->ar_errors = GFS2_ERRORS_DEFAULT; + } fc->fs_private = args; fc->ops = &gfs2_context_ops; return 0; @@ -1600,6 +1606,7 @@ static int gfs2_meta_get_tree(struct fs_context *fc) } static const struct fs_context_operations gfs2_meta_context_ops = { + .free = gfs2_fc_free, .get_tree = gfs2_meta_get_tree, }; diff --git a/fs/io-wq.c b/fs/io-wq.c new file mode 100644 index 000000000000..91b85df0861e --- /dev/null +++ b/fs/io-wq.c @@ -0,0 +1,1094 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic worker thread pool for io_uring + * + * Copyright (C) 2019 Jens Axboe + * + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched/signal.h> +#include <linux/mm.h> +#include <linux/mmu_context.h> +#include <linux/sched/mm.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/rculist_nulls.h> + +#include "io-wq.h" + +#define WORKER_IDLE_TIMEOUT (5 * HZ) + +enum { + IO_WORKER_F_UP = 1, /* up and active */ + IO_WORKER_F_RUNNING = 2, /* account as running */ + IO_WORKER_F_FREE = 4, /* worker on free list */ + IO_WORKER_F_EXITING = 8, /* worker exiting */ + IO_WORKER_F_FIXED = 16, /* static idle worker */ + IO_WORKER_F_BOUND = 32, /* is doing bounded work */ +}; + +enum { + IO_WQ_BIT_EXIT = 0, /* wq exiting */ + IO_WQ_BIT_CANCEL = 1, /* cancel work on list */ + IO_WQ_BIT_ERROR = 2, /* error on setup */ +}; + +enum { + IO_WQE_FLAG_STALLED = 1, /* stalled on hash */ +}; + +/* + * One for each thread in a wqe pool + */ +struct io_worker { + refcount_t ref; + unsigned flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + wait_queue_head_t wait; + struct io_wqe *wqe; + + struct io_wq_work *cur_work; + spinlock_t lock; + + struct rcu_head rcu; + struct mm_struct *mm; + const struct cred *creds; + struct files_struct *restore_files; +}; + +#if BITS_PER_LONG == 64 +#define IO_WQ_HASH_ORDER 6 +#else +#define IO_WQ_HASH_ORDER 5 +#endif + +struct io_wqe_acct { + unsigned nr_workers; + unsigned max_workers; + atomic_t nr_running; +}; + +enum { + IO_WQ_ACCT_BOUND, + IO_WQ_ACCT_UNBOUND, +}; + +/* + * Per-node worker thread pool + */ +struct io_wqe { + struct { + spinlock_t lock; + struct io_wq_work_list work_list; + unsigned long hash_map; + unsigned flags; + } ____cacheline_aligned_in_smp; + + int node; + struct io_wqe_acct acct[2]; + + struct hlist_nulls_head free_list; + struct hlist_nulls_head busy_list; + struct list_head all_list; + + struct io_wq *wq; +}; + +/* + * Per io_wq state + */ +struct io_wq { + struct io_wqe **wqes; + unsigned long state; + + get_work_fn *get_work; + put_work_fn *put_work; + + struct task_struct *manager; + struct user_struct *user; + struct cred *creds; + struct mm_struct *mm; + refcount_t refs; + struct completion done; +}; + +static bool io_worker_get(struct io_worker *worker) +{ + return refcount_inc_not_zero(&worker->ref); +} + +static void io_worker_release(struct io_worker *worker) +{ + if (refcount_dec_and_test(&worker->ref)) + wake_up_process(worker->task); +} + +/* + * Note: drops the wqe->lock if returning true! The caller must re-acquire + * the lock in that case. Some callers need to restart handling if this + * happens, so we can't just re-acquire the lock on behalf of the caller. + */ +static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) +{ + bool dropped_lock = false; + + if (worker->creds) { + revert_creds(worker->creds); + worker->creds = NULL; + } + + if (current->files != worker->restore_files) { + __acquire(&wqe->lock); + spin_unlock_irq(&wqe->lock); + dropped_lock = true; + + task_lock(current); + current->files = worker->restore_files; + task_unlock(current); + } + + /* + * If we have an active mm, we need to drop the wq lock before unusing + * it. If we do, return true and let the caller retry the idle loop. + */ + if (worker->mm) { + if (!dropped_lock) { + __acquire(&wqe->lock); + spin_unlock_irq(&wqe->lock); + dropped_lock = true; + } + __set_current_state(TASK_RUNNING); + set_fs(KERNEL_DS); + unuse_mm(worker->mm); + mmput(worker->mm); + worker->mm = NULL; + } + + return dropped_lock; +} + +static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, + struct io_wq_work *work) +{ + if (work->flags & IO_WQ_WORK_UNBOUND) + return &wqe->acct[IO_WQ_ACCT_UNBOUND]; + + return &wqe->acct[IO_WQ_ACCT_BOUND]; +} + +static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, + struct io_worker *worker) +{ + if (worker->flags & IO_WORKER_F_BOUND) + return &wqe->acct[IO_WQ_ACCT_BOUND]; + + return &wqe->acct[IO_WQ_ACCT_UNBOUND]; +} + +static void io_worker_exit(struct io_worker *worker) +{ + struct io_wqe *wqe = worker->wqe; + struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + unsigned nr_workers; + + /* + * If we're not at zero, someone else is holding a brief reference + * to the worker. Wait for that to go away. + */ + set_current_state(TASK_INTERRUPTIBLE); + if (!refcount_dec_and_test(&worker->ref)) + schedule(); + __set_current_state(TASK_RUNNING); + + preempt_disable(); + current->flags &= ~PF_IO_WORKER; + if (worker->flags & IO_WORKER_F_RUNNING) + atomic_dec(&acct->nr_running); + if (!(worker->flags & IO_WORKER_F_BOUND)) + atomic_dec(&wqe->wq->user->processes); + worker->flags = 0; + preempt_enable(); + + spin_lock_irq(&wqe->lock); + hlist_nulls_del_rcu(&worker->nulls_node); + list_del_rcu(&worker->all_list); + if (__io_worker_unuse(wqe, worker)) { + __release(&wqe->lock); + spin_lock_irq(&wqe->lock); + } + acct->nr_workers--; + nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers + + wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers; + spin_unlock_irq(&wqe->lock); + + /* all workers gone, wq exit can proceed */ + if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs)) + complete(&wqe->wq->done); + + kfree_rcu(worker, rcu); +} + +static inline bool io_wqe_run_queue(struct io_wqe *wqe) + __must_hold(wqe->lock) +{ + if (!wq_list_empty(&wqe->work_list) && + !(wqe->flags & IO_WQE_FLAG_STALLED)) + return true; + return false; +} + +/* + * Check head of free list for an available worker. If one isn't available, + * caller must wake up the wq manager to create one. + */ +static bool io_wqe_activate_free_worker(struct io_wqe *wqe) + __must_hold(RCU) +{ + struct hlist_nulls_node *n; + struct io_worker *worker; + + n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list)); + if (is_a_nulls(n)) + return false; + + worker = hlist_nulls_entry(n, struct io_worker, nulls_node); + if (io_worker_get(worker)) { + wake_up(&worker->wait); + io_worker_release(worker); + return true; + } + + return false; +} + +/* + * We need a worker. If we find a free one, we're good. If not, and we're + * below the max number of workers, wake up the manager to create one. + */ +static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) +{ + bool ret; + + /* + * Most likely an attempt to queue unbounded work on an io_wq that + * wasn't setup with any unbounded workers. + */ + WARN_ON_ONCE(!acct->max_workers); + + rcu_read_lock(); + ret = io_wqe_activate_free_worker(wqe); + rcu_read_unlock(); + + if (!ret && acct->nr_workers < acct->max_workers) + wake_up_process(wqe->wq->manager); +} + +static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + + atomic_inc(&acct->nr_running); +} + +static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker) + __must_hold(wqe->lock) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); + + if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) + io_wqe_wake_worker(wqe, acct); +} + +static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) +{ + allow_kernel_signal(SIGINT); + + current->flags |= PF_IO_WORKER; + + worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); + worker->restore_files = current->files; + io_wqe_inc_running(wqe, worker); +} + +/* + * Worker will start processing some work. Move it to the busy list, if + * it's currently on the freelist + */ +static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, + struct io_wq_work *work) + __must_hold(wqe->lock) +{ + bool worker_bound, work_bound; + + if (worker->flags & IO_WORKER_F_FREE) { + worker->flags &= ~IO_WORKER_F_FREE; + hlist_nulls_del_init_rcu(&worker->nulls_node); + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list); + } + + /* + * If worker is moving from bound to unbound (or vice versa), then + * ensure we update the running accounting. + */ + worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0; + work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; + if (worker_bound != work_bound) { + io_wqe_dec_running(wqe, worker); + if (work_bound) { + worker->flags |= IO_WORKER_F_BOUND; + wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; + wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; + atomic_dec(&wqe->wq->user->processes); + } else { + worker->flags &= ~IO_WORKER_F_BOUND; + wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; + wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; + atomic_inc(&wqe->wq->user->processes); + } + io_wqe_inc_running(wqe, worker); + } +} + +/* + * No work, worker going to sleep. Move to freelist, and unuse mm if we + * have one attached. Dropping the mm may potentially sleep, so we drop + * the lock in that case and return success. Since the caller has to + * retry the loop in that case (we changed task state), we don't regrab + * the lock if we return success. + */ +static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) + __must_hold(wqe->lock) +{ + if (!(worker->flags & IO_WORKER_F_FREE)) { + worker->flags |= IO_WORKER_F_FREE; + hlist_nulls_del_init_rcu(&worker->nulls_node); + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + } + + return __io_worker_unuse(wqe, worker); +} + +static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash) + __must_hold(wqe->lock) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work; + + wq_list_for_each(node, prev, &wqe->work_list) { + work = container_of(node, struct io_wq_work, list); + + /* not hashed, can run anytime */ + if (!(work->flags & IO_WQ_WORK_HASHED)) { + wq_node_del(&wqe->work_list, node, prev); + return work; + } + + /* hashed, can run if not already running */ + *hash = work->flags >> IO_WQ_HASH_SHIFT; + if (!(wqe->hash_map & BIT_ULL(*hash))) { + wqe->hash_map |= BIT_ULL(*hash); + wq_node_del(&wqe->work_list, node, prev); + return work; + } + } + + return NULL; +} + +static void io_worker_handle_work(struct io_worker *worker) + __releases(wqe->lock) +{ + struct io_wq_work *work, *old_work = NULL, *put_work = NULL; + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + do { + unsigned hash = -1U; + + /* + * If we got some work, mark us as busy. If we didn't, but + * the list isn't empty, it means we stalled on hashed work. + * Mark us stalled so we don't keep looking for work when we + * can't make progress, any work completion or insertion will + * clear the stalled flag. + */ + work = io_get_next_work(wqe, &hash); + if (work) + __io_worker_busy(wqe, worker, work); + else if (!wq_list_empty(&wqe->work_list)) + wqe->flags |= IO_WQE_FLAG_STALLED; + + spin_unlock_irq(&wqe->lock); + if (put_work && wq->put_work) + wq->put_work(old_work); + if (!work) + break; +next: + /* flush any pending signals before assigning new work */ + if (signal_pending(current)) + flush_signals(current); + + spin_lock_irq(&worker->lock); + worker->cur_work = work; + spin_unlock_irq(&worker->lock); + + if (work->flags & IO_WQ_WORK_CB) + work->func(&work); + + if ((work->flags & IO_WQ_WORK_NEEDS_FILES) && + current->files != work->files) { + task_lock(current); + current->files = work->files; + task_unlock(current); + } + if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm && + wq->mm && mmget_not_zero(wq->mm)) { + use_mm(wq->mm); + set_fs(USER_DS); + worker->mm = wq->mm; + } + if (!worker->creds) + worker->creds = override_creds(wq->creds); + if (test_bit(IO_WQ_BIT_CANCEL, &wq->state)) + work->flags |= IO_WQ_WORK_CANCEL; + if (worker->mm) + work->flags |= IO_WQ_WORK_HAS_MM; + + if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) { + put_work = work; + wq->get_work(work); + } + + old_work = work; + work->func(&work); + + spin_lock_irq(&worker->lock); + worker->cur_work = NULL; + spin_unlock_irq(&worker->lock); + + spin_lock_irq(&wqe->lock); + + if (hash != -1U) { + wqe->hash_map &= ~BIT_ULL(hash); + wqe->flags &= ~IO_WQE_FLAG_STALLED; + } + if (work && work != old_work) { + spin_unlock_irq(&wqe->lock); + + if (put_work && wq->put_work) { + wq->put_work(put_work); + put_work = NULL; + } + + /* dependent work not hashed */ + hash = -1U; + goto next; + } + } while (1); +} + +static int io_wqe_worker(void *data) +{ + struct io_worker *worker = data; + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + DEFINE_WAIT(wait); + + io_worker_start(wqe, worker); + + while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE); + + spin_lock_irq(&wqe->lock); + if (io_wqe_run_queue(wqe)) { + __set_current_state(TASK_RUNNING); + io_worker_handle_work(worker); + continue; + } + /* drops the lock on success, retry */ + if (__io_worker_idle(wqe, worker)) { + __release(&wqe->lock); + continue; + } + spin_unlock_irq(&wqe->lock); + if (signal_pending(current)) + flush_signals(current); + if (schedule_timeout(WORKER_IDLE_TIMEOUT)) + continue; + /* timed out, exit unless we're the fixed worker */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || + !(worker->flags & IO_WORKER_F_FIXED)) + break; + } + + finish_wait(&worker->wait, &wait); + + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + spin_lock_irq(&wqe->lock); + if (!wq_list_empty(&wqe->work_list)) + io_worker_handle_work(worker); + else + spin_unlock_irq(&wqe->lock); + } + + io_worker_exit(worker); + return 0; +} + +/* + * Called when a worker is scheduled in. Mark us as currently running. + */ +void io_wq_worker_running(struct task_struct *tsk) +{ + struct io_worker *worker = kthread_data(tsk); + struct io_wqe *wqe = worker->wqe; + + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (worker->flags & IO_WORKER_F_RUNNING) + return; + worker->flags |= IO_WORKER_F_RUNNING; + io_wqe_inc_running(wqe, worker); +} + +/* + * Called when worker is going to sleep. If there are no workers currently + * running and we have work pending, wake up a free one or have the manager + * set one up. + */ +void io_wq_worker_sleeping(struct task_struct *tsk) +{ + struct io_worker *worker = kthread_data(tsk); + struct io_wqe *wqe = worker->wqe; + + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (!(worker->flags & IO_WORKER_F_RUNNING)) + return; + + worker->flags &= ~IO_WORKER_F_RUNNING; + + spin_lock_irq(&wqe->lock); + io_wqe_dec_running(wqe, worker); + spin_unlock_irq(&wqe->lock); +} + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +{ + struct io_wqe_acct *acct =&wqe->acct[index]; + struct io_worker *worker; + + worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); + if (!worker) + return false; + + refcount_set(&worker->ref, 1); + worker->nulls_node.pprev = NULL; + init_waitqueue_head(&worker->wait); + worker->wqe = wqe; + spin_lock_init(&worker->lock); + + worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node, + "io_wqe_worker-%d/%d", index, wqe->node); + if (IS_ERR(worker->task)) { + kfree(worker); + return false; + } + + spin_lock_irq(&wqe->lock); + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + list_add_tail_rcu(&worker->all_list, &wqe->all_list); + worker->flags |= IO_WORKER_F_FREE; + if (index == IO_WQ_ACCT_BOUND) + worker->flags |= IO_WORKER_F_BOUND; + if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) + worker->flags |= IO_WORKER_F_FIXED; + acct->nr_workers++; + spin_unlock_irq(&wqe->lock); + + if (index == IO_WQ_ACCT_UNBOUND) + atomic_inc(&wq->user->processes); + + wake_up_process(worker->task); + return true; +} + +static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index) + __must_hold(wqe->lock) +{ + struct io_wqe_acct *acct = &wqe->acct[index]; + + /* if we have available workers or no work, no need */ + if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe)) + return false; + return acct->nr_workers < acct->max_workers; +} + +/* + * Manager thread. Tasked with creating new workers, if we need them. + */ +static int io_wq_manager(void *data) +{ + struct io_wq *wq = data; + int workers_to_create = num_possible_nodes(); + int node; + + /* create fixed workers */ + refcount_set(&wq->refs, workers_to_create); + for_each_node(node) { + if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) + goto err; + workers_to_create--; + } + + complete(&wq->done); + + while (!kthread_should_stop()) { + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + bool fork_worker[2] = { false, false }; + + spin_lock_irq(&wqe->lock); + if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) + fork_worker[IO_WQ_ACCT_BOUND] = true; + if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) + fork_worker[IO_WQ_ACCT_UNBOUND] = true; + spin_unlock_irq(&wqe->lock); + if (fork_worker[IO_WQ_ACCT_BOUND]) + create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); + if (fork_worker[IO_WQ_ACCT_UNBOUND]) + create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); + } + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } + + return 0; +err: + set_bit(IO_WQ_BIT_ERROR, &wq->state); + set_bit(IO_WQ_BIT_EXIT, &wq->state); + if (refcount_sub_and_test(workers_to_create, &wq->refs)) + complete(&wq->done); + return 0; +} + +static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, + struct io_wq_work *work) +{ + bool free_worker; + + if (!(work->flags & IO_WQ_WORK_UNBOUND)) + return true; + if (atomic_read(&acct->nr_running)) + return true; + + rcu_read_lock(); + free_worker = !hlist_nulls_empty(&wqe->free_list); + rcu_read_unlock(); + if (free_worker) + return true; + + if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && + !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN))) + return false; + + return true; +} + +static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned long flags; + + /* + * Do early check to see if we need a new unbound worker, and if we do, + * if we're allowed to do so. This isn't 100% accurate as there's a + * gap between this check and incrementing the value, but that's OK. + * It's close enough to not be an issue, fork() has the same delay. + */ + if (unlikely(!io_wq_can_queue(wqe, acct, work))) { + work->flags |= IO_WQ_WORK_CANCEL; + work->func(&work); + return; + } + + spin_lock_irqsave(&wqe->lock, flags); + wq_list_add_tail(&work->list, &wqe->work_list); + wqe->flags &= ~IO_WQE_FLAG_STALLED; + spin_unlock_irqrestore(&wqe->lock, flags); + + if (!atomic_read(&acct->nr_running)) + io_wqe_wake_worker(wqe, acct); +} + +void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) +{ + struct io_wqe *wqe = wq->wqes[numa_node_id()]; + + io_wqe_enqueue(wqe, work); +} + +/* + * Enqueue work, hashed by some key. Work items that hash to the same value + * will not be done in parallel. Used to limit concurrent writes, generally + * hashed by inode. + */ +void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val) +{ + struct io_wqe *wqe = wq->wqes[numa_node_id()]; + unsigned bit; + + + bit = hash_ptr(val, IO_WQ_HASH_ORDER); + work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); + io_wqe_enqueue(wqe, work); +} + +static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data) +{ + send_sig(SIGINT, worker->task, 1); + return false; +} + +/* + * Iterate the passed in list and call the specific function for each + * worker that isn't exiting + */ +static bool io_wq_for_each_worker(struct io_wqe *wqe, + bool (*func)(struct io_worker *, void *), + void *data) +{ + struct io_worker *worker; + bool ret = false; + + list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { + if (io_worker_get(worker)) { + ret = func(worker, data); + io_worker_release(worker); + if (ret) + break; + } + } + + return ret; +} + +void io_wq_cancel_all(struct io_wq *wq) +{ + int node; + + set_bit(IO_WQ_BIT_CANCEL, &wq->state); + + /* + * Browse both lists, as there's a gap between handing work off + * to a worker and the worker putting itself on the busy_list + */ + rcu_read_lock(); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL); + } + rcu_read_unlock(); +} + +struct io_cb_cancel_data { + struct io_wqe *wqe; + work_cancel_fn *cancel; + void *caller_data; +}; + +static bool io_work_cancel(struct io_worker *worker, void *cancel_data) +{ + struct io_cb_cancel_data *data = cancel_data; + unsigned long flags; + bool ret = false; + + /* + * Hold the lock to avoid ->cur_work going out of scope, caller + * may dereference the passed in work. + */ + spin_lock_irqsave(&worker->lock, flags); + if (worker->cur_work && + data->cancel(worker->cur_work, data->caller_data)) { + send_sig(SIGINT, worker->task, 1); + ret = true; + } + spin_unlock_irqrestore(&worker->lock, flags); + + return ret; +} + +static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe, + work_cancel_fn *cancel, + void *cancel_data) +{ + struct io_cb_cancel_data data = { + .wqe = wqe, + .cancel = cancel, + .caller_data = cancel_data, + }; + struct io_wq_work_node *node, *prev; + struct io_wq_work *work; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&wqe->lock, flags); + wq_list_for_each(node, prev, &wqe->work_list) { + work = container_of(node, struct io_wq_work, list); + + if (cancel(work, cancel_data)) { + wq_node_del(&wqe->work_list, node, prev); + found = true; + break; + } + } + spin_unlock_irqrestore(&wqe->lock, flags); + + if (found) { + work->flags |= IO_WQ_WORK_CANCEL; + work->func(&work); + return IO_WQ_CANCEL_OK; + } + + rcu_read_lock(); + found = io_wq_for_each_worker(wqe, io_work_cancel, &data); + rcu_read_unlock(); + return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND; +} + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data) +{ + enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND; + int node; + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + ret = io_wqe_cancel_cb_work(wqe, cancel, data); + if (ret != IO_WQ_CANCEL_NOTFOUND) + break; + } + + return ret; +} + +static bool io_wq_worker_cancel(struct io_worker *worker, void *data) +{ + struct io_wq_work *work = data; + unsigned long flags; + bool ret = false; + + if (worker->cur_work != work) + return false; + + spin_lock_irqsave(&worker->lock, flags); + if (worker->cur_work == work) { + send_sig(SIGINT, worker->task, 1); + ret = true; + } + spin_unlock_irqrestore(&worker->lock, flags); + + return ret; +} + +static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, + struct io_wq_work *cwork) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work; + unsigned long flags; + bool found = false; + + cwork->flags |= IO_WQ_WORK_CANCEL; + + /* + * First check pending list, if we're lucky we can just remove it + * from there. CANCEL_OK means that the work is returned as-new, + * no completion will be posted for it. + */ + spin_lock_irqsave(&wqe->lock, flags); + wq_list_for_each(node, prev, &wqe->work_list) { + work = container_of(node, struct io_wq_work, list); + + if (work == cwork) { + wq_node_del(&wqe->work_list, node, prev); + found = true; + break; + } + } + spin_unlock_irqrestore(&wqe->lock, flags); + + if (found) { + work->flags |= IO_WQ_WORK_CANCEL; + work->func(&work); + return IO_WQ_CANCEL_OK; + } + + /* + * Now check if a free (going busy) or busy worker has the work + * currently running. If we find it there, we'll return CANCEL_RUNNING + * as an indication that we attempte to signal cancellation. The + * completion will run normally in this case. + */ + rcu_read_lock(); + found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork); + rcu_read_unlock(); + return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND; +} + +enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork) +{ + enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND; + int node; + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + ret = io_wqe_cancel_work(wqe, cwork); + if (ret != IO_WQ_CANCEL_NOTFOUND) + break; + } + + return ret; +} + +struct io_wq_flush_data { + struct io_wq_work work; + struct completion done; +}; + +static void io_wq_flush_func(struct io_wq_work **workptr) +{ + struct io_wq_work *work = *workptr; + struct io_wq_flush_data *data; + + data = container_of(work, struct io_wq_flush_data, work); + complete(&data->done); +} + +/* + * Doesn't wait for previously queued work to finish. When this completes, + * it just means that previously queued work was started. + */ +void io_wq_flush(struct io_wq *wq) +{ + struct io_wq_flush_data data; + int node; + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + init_completion(&data.done); + INIT_IO_WORK(&data.work, io_wq_flush_func); + data.work.flags |= IO_WQ_WORK_INTERNAL; + io_wqe_enqueue(wqe, &data.work); + wait_for_completion(&data.done); + } +} + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) +{ + int ret = -ENOMEM, node; + struct io_wq *wq; + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return ERR_PTR(-ENOMEM); + + wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL); + if (!wq->wqes) { + kfree(wq); + return ERR_PTR(-ENOMEM); + } + + wq->get_work = data->get_work; + wq->put_work = data->put_work; + + /* caller must already hold a reference to this */ + wq->user = data->user; + wq->creds = data->creds; + + for_each_node(node) { + struct io_wqe *wqe; + + wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node); + if (!wqe) + goto err; + wq->wqes[node] = wqe; + wqe->node = node; + wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; + atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); + if (wq->user) { + wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = + task_rlimit(current, RLIMIT_NPROC); + } + atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); + wqe->node = node; + wqe->wq = wq; + spin_lock_init(&wqe->lock); + INIT_WQ_LIST(&wqe->work_list); + INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); + INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1); + INIT_LIST_HEAD(&wqe->all_list); + } + + init_completion(&wq->done); + + /* caller must have already done mmgrab() on this mm */ + wq->mm = data->mm; + + wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); + if (!IS_ERR(wq->manager)) { + wake_up_process(wq->manager); + wait_for_completion(&wq->done); + if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { + ret = -ENOMEM; + goto err; + } + reinit_completion(&wq->done); + return wq; + } + + ret = PTR_ERR(wq->manager); + complete(&wq->done); +err: + for_each_node(node) + kfree(wq->wqes[node]); + kfree(wq->wqes); + kfree(wq); + return ERR_PTR(ret); +} + +static bool io_wq_worker_wake(struct io_worker *worker, void *data) +{ + wake_up_process(worker->task); + return false; +} + +void io_wq_destroy(struct io_wq *wq) +{ + int node; + + set_bit(IO_WQ_BIT_EXIT, &wq->state); + if (wq->manager) + kthread_stop(wq->manager); + + rcu_read_lock(); + for_each_node(node) + io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); + rcu_read_unlock(); + + wait_for_completion(&wq->done); + + for_each_node(node) + kfree(wq->wqes[node]); + kfree(wq->wqes); + kfree(wq); +} diff --git a/fs/io-wq.h b/fs/io-wq.h new file mode 100644 index 000000000000..600e0158cba7 --- /dev/null +++ b/fs/io-wq.h @@ -0,0 +1,127 @@ +#ifndef INTERNAL_IO_WQ_H +#define INTERNAL_IO_WQ_H + +struct io_wq; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HAS_MM = 2, + IO_WQ_WORK_HASHED = 4, + IO_WQ_WORK_NEEDS_USER = 8, + IO_WQ_WORK_NEEDS_FILES = 16, + IO_WQ_WORK_UNBOUND = 32, + IO_WQ_WORK_INTERNAL = 64, + IO_WQ_WORK_CB = 128, + + IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK, /* cancelled before started */ + IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ + IO_WQ_CANCEL_NOTFOUND, /* work not found */ +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +static inline void wq_list_add_tail(struct io_wq_work_node *node, + struct io_wq_work_list *list) +{ + if (!list->first) { + list->first = list->last = node; + } else { + list->last->next = node; + list->last = node; + } +} + +static inline void wq_node_del(struct io_wq_work_list *list, + struct io_wq_work_node *node, + struct io_wq_work_node *prev) +{ + if (node == list->first) + list->first = node->next; + if (node == list->last) + list->last = prev; + if (prev) + prev->next = node->next; +} + +#define wq_list_for_each(pos, prv, head) \ + for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) + +#define wq_list_empty(list) ((list)->first == NULL) +#define INIT_WQ_LIST(list) do { \ + (list)->first = NULL; \ + (list)->last = NULL; \ +} while (0) + +struct io_wq_work { + union { + struct io_wq_work_node list; + void *data; + }; + void (*func)(struct io_wq_work **); + struct files_struct *files; + unsigned flags; +}; + +#define INIT_IO_WORK(work, _func) \ + do { \ + (work)->list.next = NULL; \ + (work)->func = _func; \ + (work)->flags = 0; \ + (work)->files = NULL; \ + } while (0) \ + +typedef void (get_work_fn)(struct io_wq_work *); +typedef void (put_work_fn)(struct io_wq_work *); + +struct io_wq_data { + struct mm_struct *mm; + struct user_struct *user; + struct cred *creds; + + get_work_fn *get_work; + put_work_fn *put_work; +}; + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); +void io_wq_destroy(struct io_wq *wq); + +void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); +void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val); +void io_wq_flush(struct io_wq *wq); + +void io_wq_cancel_all(struct io_wq *wq); +enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); + +typedef bool (work_cancel_fn)(struct io_wq_work *, void *); + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data); + +#if defined(CONFIG_IO_WQ) +extern void io_wq_worker_sleeping(struct task_struct *); +extern void io_wq_worker_running(struct task_struct *); +#else +static inline void io_wq_worker_sleeping(struct task_struct *tsk) +{ +} +static inline void io_wq_worker_running(struct task_struct *tsk) +{ +} +#endif + +static inline bool io_wq_current_is_worker(void) +{ + return in_task() && (current->flags & PF_IO_WORKER); +} +#endif diff --git a/fs/io_uring.c b/fs/io_uring.c index 76fdbe84aff5..2c2e8c25da01 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -56,7 +56,6 @@ #include <linux/mmu_context.h> #include <linux/percpu.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/blkdev.h> #include <linux/bvec.h> @@ -71,12 +70,24 @@ #include <linux/sizes.h> #include <linux/hugetlb.h> +#define CREATE_TRACE_POINTS +#include <trace/events/io_uring.h> + #include <uapi/linux/io_uring.h> #include "internal.h" +#include "io-wq.h" #define IORING_MAX_ENTRIES 32768 -#define IORING_MAX_FIXED_FILES 1024 +#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) + +/* + * Shift of 9 is 512 entries, or exactly one page on 64-bit archs + */ +#define IORING_FILE_TABLE_SHIFT 9 +#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) +#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) +#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) struct io_uring { u32 head ____cacheline_aligned_in_smp; @@ -161,14 +172,8 @@ struct io_mapped_ubuf { unsigned int nr_bvecs; }; -struct async_list { - spinlock_t lock; - atomic_t cnt; - struct list_head list; - - struct file *file; - off_t io_start; - size_t io_len; +struct fixed_file_table { + struct file **files; }; struct io_ring_ctx { @@ -180,6 +185,8 @@ struct io_ring_ctx { unsigned int flags; bool compat; bool account_mem; + bool cq_overflow_flushed; + bool drain_next; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -197,37 +204,31 @@ struct io_ring_ctx { unsigned sq_entries; unsigned sq_mask; unsigned sq_thread_idle; + unsigned cached_sq_dropped; + atomic_t cached_cq_overflow; struct io_uring_sqe *sq_sqes; struct list_head defer_list; struct list_head timeout_list; + struct list_head cq_overflow_list; + + wait_queue_head_t inflight_wait; } ____cacheline_aligned_in_smp; + struct io_rings *rings; + /* IO offload */ - struct workqueue_struct *sqo_wq[2]; + struct io_wq *io_wq; struct task_struct *sqo_thread; /* if using sq thread polling */ struct mm_struct *sqo_mm; wait_queue_head_t sqo_wait; - struct completion sqo_thread_started; - - struct { - unsigned cached_cq_tail; - unsigned cq_entries; - unsigned cq_mask; - struct wait_queue_head cq_wait; - struct fasync_struct *cq_fasync; - struct eventfd_ctx *cq_ev_fd; - atomic_t cq_timeouts; - } ____cacheline_aligned_in_smp; - - struct io_rings *rings; /* * If used, fixed file set. Writers must ensure that ->refs is dead, * readers must ensure that ->refs is alive as long as the file* is * used. Only updated through io_uring_register(2). */ - struct file **user_files; + struct fixed_file_table *file_table; unsigned nr_user_files; /* if used, fixed mapped user buffers */ @@ -236,7 +237,27 @@ struct io_ring_ctx { struct user_struct *user; - struct completion ctx_done; + struct cred *creds; + + /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */ + struct completion *completions; + + /* if all else fails... */ + struct io_kiocb *fallback_req; + +#if defined(CONFIG_UNIX) + struct socket *ring_sock; +#endif + + struct { + unsigned cached_cq_tail; + unsigned cq_entries; + unsigned cq_mask; + atomic_t cq_timeouts; + struct wait_queue_head cq_wait; + struct fasync_struct *cq_fasync; + struct eventfd_ctx *cq_ev_fd; + } ____cacheline_aligned_in_smp; struct { struct mutex uring_lock; @@ -253,23 +274,11 @@ struct io_ring_ctx { * manipulate the list, hence no extra locking is needed there. */ struct list_head poll_list; - struct list_head cancel_list; - } ____cacheline_aligned_in_smp; - - struct async_list pending_async[2]; - -#if defined(CONFIG_UNIX) - struct socket *ring_sock; -#endif -}; + struct rb_root cancel_tree; -struct sqe_submit { - const struct io_uring_sqe *sqe; - unsigned short index; - u32 sequence; - bool has_user; - bool needs_lock; - bool needs_fixed_file; + spinlock_t inflight_lock; + struct list_head inflight_list; + } ____cacheline_aligned_in_smp; }; /* @@ -282,12 +291,20 @@ struct io_poll_iocb { __poll_t events; bool done; bool canceled; - struct wait_queue_entry wait; + struct wait_queue_entry *wait; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; + u32 seq_offset; }; struct io_timeout { struct file *file; - struct hrtimer timer; + struct io_timeout_data *data; }; /* @@ -304,29 +321,45 @@ struct io_kiocb { struct io_timeout timeout; }; - struct sqe_submit submit; + const struct io_uring_sqe *sqe; + struct file *ring_file; + int ring_fd; + bool has_user; + bool in_async; + bool needs_fixed_file; struct io_ring_ctx *ctx; - struct list_head list; + union { + struct list_head list; + struct rb_node rb_node; + }; struct list_head link_list; unsigned int flags; refcount_t refs; #define REQ_F_NOWAIT 1 /* must not punt to workers */ #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ #define REQ_F_FIXED_FILE 4 /* ctx owns file */ -#define REQ_F_SEQ_PREV 8 /* sequential with previous */ +#define REQ_F_LINK_NEXT 8 /* already grabbed next link */ #define REQ_F_IO_DRAIN 16 /* drain existing IO first */ #define REQ_F_IO_DRAINED 32 /* drain done */ #define REQ_F_LINK 64 /* linked sqes */ -#define REQ_F_LINK_DONE 128 /* linked sqes done */ +#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */ #define REQ_F_FAIL_LINK 256 /* fail rest of links */ -#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */ +#define REQ_F_DRAIN_LINK 512 /* link should be fully drained */ #define REQ_F_TIMEOUT 1024 /* timeout request */ +#define REQ_F_ISREG 2048 /* regular file */ +#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ +#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ +#define REQ_F_INFLIGHT 16384 /* on inflight list */ +#define REQ_F_COMP_LOCKED 32768 /* completion under lock */ +#define REQ_F_FREE_SQE 65536 /* free sqe if not async queued */ u64 user_data; u32 result; u32 sequence; - struct work_struct work; + struct list_head inflight_entry; + + struct io_wq_work work; }; #define IO_PLUG_THRESHOLD 2 @@ -352,10 +385,14 @@ struct io_submit_state { unsigned int ios_left; }; -static void io_sq_wq_submit_work(struct work_struct *work); -static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, - long res); +static void io_wq_submit_work(struct io_wq_work **workptr); +static void io_cqring_fill_event(struct io_kiocb *req, long res); static void __io_free_req(struct io_kiocb *req); +static void io_put_req(struct io_kiocb *req); +static void io_double_put_req(struct io_kiocb *req); +static void __io_double_put_req(struct io_kiocb *req); +static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); +static void io_queue_linked_timeout(struct io_kiocb *req); static struct kmem_cache *req_cachep; @@ -378,56 +415,67 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); - complete(&ctx->ctx_done); + complete(&ctx->completions[0]); } static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; - int i; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; + ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL); + if (!ctx->fallback_req) + goto err; + + ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL); + if (!ctx->completions) + goto err; + if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, - PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { - kfree(ctx); - return NULL; - } + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto err; ctx->flags = p->flags; init_waitqueue_head(&ctx->cq_wait); - init_completion(&ctx->ctx_done); - init_completion(&ctx->sqo_thread_started); + INIT_LIST_HEAD(&ctx->cq_overflow_list); + init_completion(&ctx->completions[0]); + init_completion(&ctx->completions[1]); mutex_init(&ctx->uring_lock); init_waitqueue_head(&ctx->wait); - for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) { - spin_lock_init(&ctx->pending_async[i].lock); - INIT_LIST_HEAD(&ctx->pending_async[i].list); - atomic_set(&ctx->pending_async[i].cnt, 0); - } spin_lock_init(&ctx->completion_lock); INIT_LIST_HEAD(&ctx->poll_list); - INIT_LIST_HEAD(&ctx->cancel_list); + ctx->cancel_tree = RB_ROOT; INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); + init_waitqueue_head(&ctx->inflight_wait); + spin_lock_init(&ctx->inflight_lock); + INIT_LIST_HEAD(&ctx->inflight_list); return ctx; +err: + if (ctx->fallback_req) + kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->completions); + kfree(ctx); + return NULL; } -static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool __req_need_defer(struct io_kiocb *req) { - return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped; + struct io_ring_ctx *ctx = req->ctx; + + return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped + + atomic_read(&ctx->cached_cq_overflow); } -static inline bool io_sequence_defer(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool req_need_defer(struct io_kiocb *req) { - if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) - return false; + if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN) + return __req_need_defer(req); - return __io_sequence_defer(ctx, req); + return false; } static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) @@ -435,7 +483,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) struct io_kiocb *req; req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); - if (req && !io_sequence_defer(ctx, req)) { + if (req && !req_need_defer(req)) { list_del_init(&req->list); return req; } @@ -448,9 +496,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) struct io_kiocb *req; req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); - if (req && !__io_sequence_defer(ctx, req)) { - list_del_init(&req->list); - return req; + if (req) { + if (req->flags & REQ_F_TIMEOUT_NOSEQ) + return NULL; + if (!__req_need_defer(req)) { + list_del_init(&req->list); + return req; + } } return NULL; @@ -471,33 +523,80 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) } } -static inline void io_queue_async_work(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) +{ + u8 opcode = READ_ONCE(sqe->opcode); + + return !(opcode == IORING_OP_READ_FIXED || + opcode == IORING_OP_WRITE_FIXED); +} + +static inline bool io_prep_async_work(struct io_kiocb *req, + struct io_kiocb **link) { - int rw = 0; + bool do_hashed = false; - if (req->submit.sqe) { - switch (req->submit.sqe->opcode) { + if (req->sqe) { + switch (req->sqe->opcode) { case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: - rw = !(req->rw.ki_flags & IOCB_DIRECT); + do_hashed = true; + /* fall-through */ + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_SENDMSG: + case IORING_OP_RECVMSG: + case IORING_OP_ACCEPT: + case IORING_OP_POLL_ADD: + case IORING_OP_CONNECT: + /* + * We know REQ_F_ISREG is not set on some of these + * opcodes, but this enables us to keep the check in + * just one place. + */ + if (!(req->flags & REQ_F_ISREG)) + req->work.flags |= IO_WQ_WORK_UNBOUND; break; } + if (io_sqe_needs_user(req->sqe)) + req->work.flags |= IO_WQ_WORK_NEEDS_USER; + } + + *link = io_prep_linked_timeout(req); + return do_hashed; +} + +static inline void io_queue_async_work(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *link; + bool do_hashed; + + do_hashed = io_prep_async_work(req, &link); + + trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work, + req->flags); + if (!do_hashed) { + io_wq_enqueue(ctx->io_wq, &req->work); + } else { + io_wq_enqueue_hashed(ctx->io_wq, &req->work, + file_inode(req->file)); } - queue_work(ctx->sqo_wq[rw], &req->work); + if (link) + io_queue_linked_timeout(link); } static void io_kill_timeout(struct io_kiocb *req) { int ret; - ret = hrtimer_try_to_cancel(&req->timeout.timer); + ret = hrtimer_try_to_cancel(&req->timeout.data->timer); if (ret != -1) { atomic_inc(&req->ctx->cq_timeouts); - list_del(&req->list); - io_cqring_fill_event(req->ctx, req->user_data, 0); - __io_free_req(req); + list_del_init(&req->list); + io_cqring_fill_event(req, 0); + io_put_req(req); } } @@ -521,13 +620,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) __io_commit_cqring(ctx); while ((req = io_get_deferred_req(ctx)) != NULL) { - if (req->flags & REQ_F_SHADOW_DRAIN) { - /* Just for drain, free it. */ - __io_free_req(req); - continue; - } req->flags |= REQ_F_IO_DRAINED; - io_queue_async_work(ctx, req); + io_queue_async_work(req); } } @@ -549,51 +643,128 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) return &rings->cqes[tail & ctx->cq_mask]; } -static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, - long res) +static void io_cqring_ev_posted(struct io_ring_ctx *ctx) +{ + if (waitqueue_active(&ctx->wait)) + wake_up(&ctx->wait); + if (waitqueue_active(&ctx->sqo_wait)) + wake_up(&ctx->sqo_wait); + if (ctx->cq_ev_fd) + eventfd_signal(ctx->cq_ev_fd, 1); +} + +/* Returns true if there are no backlogged entries after the flush */ +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) +{ + struct io_rings *rings = ctx->rings; + struct io_uring_cqe *cqe; + struct io_kiocb *req; + unsigned long flags; + LIST_HEAD(list); + + if (!force) { + if (list_empty_careful(&ctx->cq_overflow_list)) + return true; + if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == + rings->cq_ring_entries)) + return false; + } + + spin_lock_irqsave(&ctx->completion_lock, flags); + + /* if force is set, the ring is going away. always drop after that */ + if (force) + ctx->cq_overflow_flushed = true; + + cqe = NULL; + while (!list_empty(&ctx->cq_overflow_list)) { + cqe = io_get_cqring(ctx); + if (!cqe && !force) + break; + + req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, + list); + list_move(&req->list, &list); + if (cqe) { + WRITE_ONCE(cqe->user_data, req->user_data); + WRITE_ONCE(cqe->res, req->result); + WRITE_ONCE(cqe->flags, 0); + } else { + WRITE_ONCE(ctx->rings->cq_overflow, + atomic_inc_return(&ctx->cached_cq_overflow)); + } + } + + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + io_cqring_ev_posted(ctx); + + while (!list_empty(&list)) { + req = list_first_entry(&list, struct io_kiocb, list); + list_del(&req->list); + io_put_req(req); + } + + return cqe != NULL; +} + +static void io_cqring_fill_event(struct io_kiocb *req, long res) { + struct io_ring_ctx *ctx = req->ctx; struct io_uring_cqe *cqe; + trace_io_uring_complete(ctx, req->user_data, res); + /* * If we can't get a cq entry, userspace overflowed the * submission (by quite a lot). Increment the overflow count in * the ring. */ cqe = io_get_cqring(ctx); - if (cqe) { - WRITE_ONCE(cqe->user_data, ki_user_data); + if (likely(cqe)) { + WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, 0); + } else if (ctx->cq_overflow_flushed) { + WRITE_ONCE(ctx->rings->cq_overflow, + atomic_inc_return(&ctx->cached_cq_overflow)); } else { - unsigned overflow = READ_ONCE(ctx->rings->cq_overflow); - - WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1); + refcount_inc(&req->refs); + req->result = res; + list_add_tail(&req->list, &ctx->cq_overflow_list); } } -static void io_cqring_ev_posted(struct io_ring_ctx *ctx) -{ - if (waitqueue_active(&ctx->wait)) - wake_up(&ctx->wait); - if (waitqueue_active(&ctx->sqo_wait)) - wake_up(&ctx->sqo_wait); - if (ctx->cq_ev_fd) - eventfd_signal(ctx->cq_ev_fd, 1); -} - -static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data, - long res) +static void io_cqring_add_event(struct io_kiocb *req, long res) { + struct io_ring_ctx *ctx = req->ctx; unsigned long flags; spin_lock_irqsave(&ctx->completion_lock, flags); - io_cqring_fill_event(ctx, user_data, res); + io_cqring_fill_event(req, res); io_commit_cqring(ctx); spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); } +static inline bool io_is_fallback_req(struct io_kiocb *req) +{ + return req == (struct io_kiocb *) + ((unsigned long) req->ctx->fallback_req & ~1UL); +} + +static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) +{ + struct io_kiocb *req; + + req = ctx->fallback_req; + if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req)) + return req; + + return NULL; +} + static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, struct io_submit_state *state) { @@ -606,7 +777,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, if (!state) { req = kmem_cache_alloc(req_cachep, gfp); if (unlikely(!req)) - goto out; + goto fallback; } else if (!state->free_reqs) { size_t sz; int ret; @@ -621,7 +792,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, if (unlikely(ret <= 0)) { state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); if (!state->reqs[0]) - goto out; + goto fallback; ret = 1; } state->free_reqs = ret - 1; @@ -633,14 +804,20 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, state->cur_req++; } +got_it: + req->ring_file = NULL; req->file = NULL; req->ctx = ctx; req->flags = 0; /* one is dropped after submission, the other at completion */ refcount_set(&req->refs, 2); req->result = 0; + INIT_IO_WORK(&req->work, io_wq_submit_work); return req; -out: +fallback: + req = io_get_fallback_req(ctx); + if (req) + goto got_it; percpu_ref_put(&ctx->refs); return NULL; } @@ -656,15 +833,56 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr) static void __io_free_req(struct io_kiocb *req) { + struct io_ring_ctx *ctx = req->ctx; + + if (req->flags & REQ_F_FREE_SQE) + kfree(req->sqe); if (req->file && !(req->flags & REQ_F_FIXED_FILE)) fput(req->file); - percpu_ref_put(&req->ctx->refs); - kmem_cache_free(req_cachep, req); + if (req->flags & REQ_F_INFLIGHT) { + unsigned long flags; + + spin_lock_irqsave(&ctx->inflight_lock, flags); + list_del(&req->inflight_entry); + if (waitqueue_active(&ctx->inflight_wait)) + wake_up(&ctx->inflight_wait); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + } + if (req->flags & REQ_F_TIMEOUT) + kfree(req->timeout.data); + percpu_ref_put(&ctx->refs); + if (likely(!io_is_fallback_req(req))) + kmem_cache_free(req_cachep, req); + else + clear_bit_unlock(0, (unsigned long *) ctx->fallback_req); +} + +static bool io_link_cancel_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = hrtimer_try_to_cancel(&req->timeout.data->timer); + if (ret != -1) { + io_cqring_fill_event(req, -ECANCELED); + io_commit_cqring(ctx); + req->flags &= ~REQ_F_LINK; + io_put_req(req); + return true; + } + + return false; } -static void io_req_link_next(struct io_kiocb *req) +static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) { + struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *nxt; + bool wake_ev = false; + + /* Already got next link */ + if (req->flags & REQ_F_LINK_NEXT) + return; /* * The list should never be empty when we are called here. But could @@ -672,18 +890,30 @@ static void io_req_link_next(struct io_kiocb *req) * safe side. */ nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); - if (nxt) { - list_del(&nxt->list); + while (nxt) { + list_del_init(&nxt->list); + + if ((req->flags & REQ_F_LINK_TIMEOUT) && + (nxt->flags & REQ_F_TIMEOUT)) { + wake_ev |= io_link_cancel_timeout(nxt); + nxt = list_first_entry_or_null(&req->link_list, + struct io_kiocb, list); + req->flags &= ~REQ_F_LINK_TIMEOUT; + continue; + } if (!list_empty(&req->link_list)) { INIT_LIST_HEAD(&nxt->link_list); list_splice(&req->link_list, &nxt->link_list); nxt->flags |= REQ_F_LINK; } - nxt->flags |= REQ_F_LINK_DONE; - INIT_WORK(&nxt->work, io_sq_wq_submit_work); - io_queue_async_work(req->ctx, nxt); + *nxtptr = nxt; + break; } + + req->flags |= REQ_F_LINK_NEXT; + if (wake_ev) + io_cqring_ev_posted(ctx); } /* @@ -691,33 +921,86 @@ static void io_req_link_next(struct io_kiocb *req) */ static void io_fail_links(struct io_kiocb *req) { + struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link; + unsigned long flags; + + spin_lock_irqsave(&ctx->completion_lock, flags); while (!list_empty(&req->link_list)) { link = list_first_entry(&req->link_list, struct io_kiocb, list); - list_del(&link->list); + list_del_init(&link->list); - io_cqring_add_event(req->ctx, link->user_data, -ECANCELED); - __io_free_req(link); + trace_io_uring_fail_link(req, link); + + if ((req->flags & REQ_F_LINK_TIMEOUT) && + link->sqe->opcode == IORING_OP_LINK_TIMEOUT) { + io_link_cancel_timeout(link); + } else { + io_cqring_fill_event(link, -ECANCELED); + __io_double_put_req(link); + } + req->flags &= ~REQ_F_LINK_TIMEOUT; } + + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + io_cqring_ev_posted(ctx); } -static void io_free_req(struct io_kiocb *req) +static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) { + if (likely(!(req->flags & REQ_F_LINK))) + return; + /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (req->flags & REQ_F_LINK) { - if (req->flags & REQ_F_FAIL_LINK) - io_fail_links(req); - else - io_req_link_next(req); + if (req->flags & REQ_F_FAIL_LINK) { + io_fail_links(req); + } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) == + REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + /* + * If this is a timeout link, we could be racing with the + * timeout timer. Grab the completion lock for this case to + * protect against that. + */ + spin_lock_irqsave(&ctx->completion_lock, flags); + io_req_link_next(req, nxt); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + io_req_link_next(req, nxt); } +} + +static void io_free_req(struct io_kiocb *req) +{ + struct io_kiocb *nxt = NULL; + io_req_find_next(req, &nxt); __io_free_req(req); + + if (nxt) + io_queue_async_work(nxt); +} + +/* + * Drop reference to request, return next in chain (if there is one) if this + * was the last reference to this request. + */ +__attribute__((nonnull)) +static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr) +{ + io_req_find_next(req, nxtptr); + + if (refcount_dec_and_test(&req->refs)) + __io_free_req(req); } static void io_put_req(struct io_kiocb *req) @@ -726,13 +1009,51 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } -static unsigned io_cqring_events(struct io_rings *rings) +/* + * Must only be used if we don't need to care about links, usually from + * within the completion handling itself. + */ +static void __io_double_put_req(struct io_kiocb *req) +{ + /* drop both submit and complete references */ + if (refcount_sub_and_test(2, &req->refs)) + __io_free_req(req); +} + +static void io_double_put_req(struct io_kiocb *req) +{ + /* drop both submit and complete references */ + if (refcount_sub_and_test(2, &req->refs)) + io_free_req(req); +} + +static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) { + struct io_rings *rings = ctx->rings; + + /* + * noflush == true is from the waitqueue handler, just ensure we wake + * up the task, and the next invocation will flush the entries. We + * cannot safely to it from here. + */ + if (noflush && !list_empty(&ctx->cq_overflow_list)) + return -1U; + + io_cqring_overflow_flush(ctx, false); + /* See comment at the top of this file */ smp_rmb(); return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); } +static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + /* make sure SQ entry isn't read before tail */ + return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; +} + /* * Find and free completed poll iocbs */ @@ -748,7 +1069,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, req = list_first_entry(done, struct io_kiocb, list); list_del(&req->list); - io_cqring_fill_event(ctx, req->user_data, req->result); + io_cqring_fill_event(req, req->result); (*nr_events)++; if (refcount_dec_and_test(&req->refs)) { @@ -757,8 +1078,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, * completions for those, only batch free for fixed * file and non-linked commands. */ - if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == - REQ_F_FIXED_FILE) { + if (((req->flags & + (REQ_F_FIXED_FILE|REQ_F_LINK|REQ_F_FREE_SQE)) == + REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) { reqs[to_free++] = req; if (to_free == ARRAY_SIZE(reqs)) io_free_req_many(ctx, reqs, &to_free); @@ -862,19 +1184,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } -static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, - long min) +static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, + long min) { - int iters, ret = 0; - - /* - * We disallow the app entering submit/complete with polling, but we - * still need to lock the ring to prevent racing with polled issue - * that got punted to a workqueue. - */ - mutex_lock(&ctx->uring_lock); + int iters = 0, ret = 0; - iters = 0; do { int tmin = 0; @@ -883,7 +1197,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, * If we do, we can potentially be spinning for commands that * already triggered a CQE (eg in error). */ - if (io_cqring_events(ctx->rings)) + if (io_cqring_events(ctx, false)) break; /* @@ -910,42 +1224,76 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ret = 0; } while (min && !*nr_events && !need_resched()); + return ret; +} + +static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, + long min) +{ + int ret; + + /* + * We disallow the app entering submit/complete with polling, but we + * still need to lock the ring to prevent racing with polled issue + * that got punted to a workqueue. + */ + mutex_lock(&ctx->uring_lock); + ret = __io_iopoll_check(ctx, nr_events, min); mutex_unlock(&ctx->uring_lock); return ret; } -static void kiocb_end_write(struct kiocb *kiocb) +static void kiocb_end_write(struct io_kiocb *req) { - if (kiocb->ki_flags & IOCB_WRITE) { - struct inode *inode = file_inode(kiocb->ki_filp); + /* + * Tell lockdep we inherited freeze protection from submission + * thread. + */ + if (req->flags & REQ_F_ISREG) { + struct inode *inode = file_inode(req->file); - /* - * Tell lockdep we inherited freeze protection from submission - * thread. - */ - if (S_ISREG(inode->i_mode)) - __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); - file_end_write(kiocb->ki_filp); + __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); } + file_end_write(req->file); } -static void io_complete_rw(struct kiocb *kiocb, long res, long res2) +static void io_complete_rw_common(struct kiocb *kiocb, long res) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); - kiocb_end_write(kiocb); + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); if ((req->flags & REQ_F_LINK) && res != req->result) req->flags |= REQ_F_FAIL_LINK; - io_cqring_add_event(req->ctx, req->user_data, res); + io_cqring_add_event(req, res); +} + +static void io_complete_rw(struct kiocb *kiocb, long res, long res2) +{ + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + + io_complete_rw_common(kiocb, res); io_put_req(req); } +static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res) +{ + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + struct io_kiocb *nxt = NULL; + + io_complete_rw_common(kiocb, res); + io_put_req_find_next(req, &nxt); + + return nxt; +} + static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); - kiocb_end_write(kiocb); + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); if ((req->flags & REQ_F_LINK) && res != req->result) req->flags |= REQ_F_FAIL_LINK; @@ -1047,10 +1395,9 @@ static bool io_file_supports_async(struct file *file) return false; } -static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, - bool force_nonblock) +static int io_prep_rw(struct io_kiocb *req, bool force_nonblock) { - const struct io_uring_sqe *sqe = s->sqe; + const struct io_uring_sqe *sqe = req->sqe; struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw; unsigned ioprio; @@ -1059,8 +1406,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, if (!req->file) return -EBADF; - if (force_nonblock && !io_file_supports_async(req->file)) - force_nonblock = false; + if (S_ISREG(file_inode(req->file)->i_mode)) + req->flags |= REQ_F_ISREG; + + /* + * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so + * we know to async punt it even if it was opened O_NONBLOCK + */ + if (force_nonblock && !io_file_supports_async(req->file)) { + req->flags |= REQ_F_MUST_PUNT; + return -EAGAIN; + } kiocb->ki_pos = READ_ONCE(sqe->off); kiocb->ki_flags = iocb_flags(kiocb->ki_filp); @@ -1081,7 +1437,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, return ret; /* don't allow async punt if RWF_NOWAIT was requested */ - if (kiocb->ki_flags & IOCB_NOWAIT) + if ((kiocb->ki_flags & IOCB_NOWAIT) || + (req->file->f_flags & O_NONBLOCK)) req->flags |= REQ_F_NOWAIT; if (force_nonblock) @@ -1094,6 +1451,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, kiocb->ki_flags |= IOCB_HIPRI; kiocb->ki_complete = io_complete_rw_iopoll; + req->result = 0; } else { if (kiocb->ki_flags & IOCB_HIPRI) return -EINVAL; @@ -1123,9 +1481,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } } -static int io_import_fixed(struct io_ring_ctx *ctx, int rw, - const struct io_uring_sqe *sqe, - struct iov_iter *iter) +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt, + bool in_async) +{ + if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw) + *nxt = __io_complete_rw(kiocb, ret); + else + io_rw_done(kiocb, ret); +} + +static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw, + const struct io_uring_sqe *sqe, + struct iov_iter *iter) { size_t len = READ_ONCE(sqe->len); struct io_mapped_ubuf *imu; @@ -1194,14 +1561,13 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, } } - return 0; + return len; } -static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, - const struct sqe_submit *s, struct iovec **iovec, - struct iov_iter *iter) +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter) { - const struct io_uring_sqe *sqe = s->sqe; + const struct io_uring_sqe *sqe = req->sqe; void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); size_t sqe_len = READ_ONCE(sqe->len); u8 opcode; @@ -1215,18 +1581,16 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, * flag. */ opcode = READ_ONCE(sqe->opcode); - if (opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED) { - ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; - return ret; + return io_import_fixed(req->ctx, rw, sqe, iter); } - if (!s->has_user) + if (!req->has_user) return -EFAULT; #ifdef CONFIG_COMPAT - if (ctx->compat) + if (req->ctx->compat) return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); #endif @@ -1234,65 +1598,6 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); } -static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb) -{ - if (al->file == kiocb->ki_filp) { - off_t start, end; - - /* - * Allow merging if we're anywhere in the range of the same - * page. Generally this happens for sub-page reads or writes, - * and it's beneficial to allow the first worker to bring the - * page in and the piggy backed work can then work on the - * cached page. - */ - start = al->io_start & PAGE_MASK; - end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK; - if (kiocb->ki_pos >= start && kiocb->ki_pos <= end) - return true; - } - - al->file = NULL; - return false; -} - -/* - * Make a note of the last file/offset/direction we punted to async - * context. We'll use this information to see if we can piggy back a - * sequential request onto the previous one, if it's still hasn't been - * completed by the async worker. - */ -static void io_async_list_note(int rw, struct io_kiocb *req, size_t len) -{ - struct async_list *async_list = &req->ctx->pending_async[rw]; - struct kiocb *kiocb = &req->rw; - struct file *filp = kiocb->ki_filp; - - if (io_should_merge(async_list, kiocb)) { - unsigned long max_bytes; - - /* Use 8x RA size as a decent limiter for both reads/writes */ - max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3); - if (!max_bytes) - max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3); - - /* If max len are exceeded, reset the state */ - if (async_list->io_len + len <= max_bytes) { - req->flags |= REQ_F_SEQ_PREV; - async_list->io_len += len; - } else { - async_list->file = NULL; - } - } - - /* New file? Reset state. */ - if (async_list->file != filp) { - async_list->io_start = kiocb->ki_pos; - async_list->io_len = len; - async_list->file = filp; - } -} - /* * For files that don't have ->read_iter() and ->write_iter(), handle them * by looping over ->read() or ->write() manually. @@ -1313,9 +1618,19 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return -EAGAIN; while (iov_iter_count(iter)) { - struct iovec iovec = iov_iter_iovec(iter); + struct iovec iovec; ssize_t nr; + if (!iov_iter_is_bvec(iter)) { + iovec = iov_iter_iovec(iter); + } else { + /* fixed buffers import bvec */ + iovec.iov_base = kmap(iter->bvec->bv_page) + + iter->iov_offset; + iovec.iov_len = min(iter->count, + iter->bvec->bv_len - iter->iov_offset); + } + if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, iovec.iov_len, &kiocb->ki_pos); @@ -1324,6 +1639,9 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, iovec.iov_len, &kiocb->ki_pos); } + if (iov_iter_is_bvec(iter)) + kunmap(iter->bvec->bv_page); + if (nr < 0) { if (!ret) ret = nr; @@ -1338,7 +1656,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } -static int io_read(struct io_kiocb *req, const struct sqe_submit *s, +static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; @@ -1348,7 +1666,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, size_t iov_count; ssize_t read_size, ret; - ret = io_prep_rw(req, s, force_nonblock); + ret = io_prep_rw(req, force_nonblock); if (ret) return ret; file = kiocb->ki_filp; @@ -1356,7 +1674,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_READ))) return -EBADF; - ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); + ret = io_import_iovec(READ, req, &iovec, &iter); if (ret < 0) return ret; @@ -1382,26 +1700,21 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, * need async punt anyway, so it's more efficient to do it * here. */ - if (force_nonblock && ret2 > 0 && ret2 < read_size) + if (force_nonblock && !(req->flags & REQ_F_NOWAIT) && + (req->flags & REQ_F_ISREG) && + ret2 > 0 && ret2 < read_size) ret2 = -EAGAIN; /* Catch -EAGAIN return for forced non-blocking submission */ - if (!force_nonblock || ret2 != -EAGAIN) { - io_rw_done(kiocb, ret2); - } else { - /* - * If ->needs_lock is true, we're already in async - * context. - */ - if (!s->needs_lock) - io_async_list_note(READ, req, iov_count); + if (!force_nonblock || ret2 != -EAGAIN) + kiocb_done(kiocb, ret2, nxt, req->in_async); + else ret = -EAGAIN; - } } kfree(iovec); return ret; } -static int io_write(struct io_kiocb *req, const struct sqe_submit *s, +static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; @@ -1411,7 +1724,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, size_t iov_count; ssize_t ret; - ret = io_prep_rw(req, s, force_nonblock); + ret = io_prep_rw(req, force_nonblock); if (ret) return ret; @@ -1419,7 +1732,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_WRITE))) return -EBADF; - ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); + ret = io_import_iovec(WRITE, req, &iovec, &iter); if (ret < 0) return ret; @@ -1429,12 +1742,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, iov_count = iov_iter_count(&iter); ret = -EAGAIN; - if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) { - /* If ->needs_lock is true, we're already in async context. */ - if (!s->needs_lock) - io_async_list_note(WRITE, req, iov_count); + if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) goto out_free; - } ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); if (!ret) { @@ -1447,7 +1756,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, * released so that it doesn't complain about the held lock when * we return to userspace. */ - if (S_ISREG(file_inode(file)->i_mode)) { + if (req->flags & REQ_F_ISREG) { __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); __sb_writers_release(file_inode(file)->i_sb, @@ -1459,17 +1768,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ret2 = call_write_iter(file, kiocb, &iter); else ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); - if (!force_nonblock || ret2 != -EAGAIN) { - io_rw_done(kiocb, ret2); - } else { - /* - * If ->needs_lock is true, we're already in async - * context. - */ - if (!s->needs_lock) - io_async_list_note(WRITE, req, iov_count); + if (!force_nonblock || ret2 != -EAGAIN) + kiocb_done(kiocb, ret2, nxt, req->in_async); + else ret = -EAGAIN; - } } out_free: kfree(iovec); @@ -1479,15 +1781,14 @@ out_free: /* * IORING_OP_NOP just posts a completion event, nothing else. */ -static int io_nop(struct io_kiocb *req, u64 user_data) +static int io_nop(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - long err = 0; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - io_cqring_add_event(ctx, user_data, err); + io_cqring_add_event(req, 0); io_put_req(req); return 0; } @@ -1508,7 +1809,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) } static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) + struct io_kiocb **nxt, bool force_nonblock) { loff_t sqe_off = READ_ONCE(sqe->off); loff_t sqe_len = READ_ONCE(sqe->len); @@ -1534,8 +1835,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0 && (req->flags & REQ_F_LINK)) req->flags |= REQ_F_FAIL_LINK; - io_cqring_add_event(req->ctx, sqe->user_data, ret); - io_put_req(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, nxt); return 0; } @@ -1557,6 +1858,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_sync_file_range(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) { loff_t sqe_off; @@ -1580,14 +1882,14 @@ static int io_sync_file_range(struct io_kiocb *req, if (ret < 0 && (req->flags & REQ_F_LINK)) req->flags |= REQ_F_FAIL_LINK; - io_cqring_add_event(req->ctx, sqe->user_data, ret); - io_put_req(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, nxt); return 0; } #if defined(CONFIG_NET) static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock, + struct io_kiocb **nxt, bool force_nonblock, long (*fn)(struct socket *, struct user_msghdr __user *, unsigned int)) { @@ -1616,59 +1918,161 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, return ret; } - io_cqring_add_event(req->ctx, sqe->user_data, ret); - io_put_req(req); + io_cqring_add_event(req, ret); + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_put_req_find_next(req, nxt); return 0; } #endif static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) + struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) - return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock); + return io_send_recvmsg(req, sqe, nxt, force_nonblock, + __sys_sendmsg_sock); #else return -EOPNOTSUPP; #endif } static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) + struct io_kiocb **nxt, bool force_nonblock) +{ +#if defined(CONFIG_NET) + return io_send_recvmsg(req, sqe, nxt, force_nonblock, + __sys_recvmsg_sock); +#else + return -EOPNOTSUPP; +#endif +} + +static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) +{ +#if defined(CONFIG_NET) + struct sockaddr __user *addr; + int __user *addr_len; + unsigned file_flags; + int flags, ret; + + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; + if (sqe->ioprio || sqe->len || sqe->buf_index) + return -EINVAL; + + addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); + addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2); + flags = READ_ONCE(sqe->accept_flags); + file_flags = force_nonblock ? O_NONBLOCK : 0; + + ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags); + if (ret == -EAGAIN && force_nonblock) { + req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + return -EAGAIN; + } + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_cqring_add_event(req, ret); + io_put_req_find_next(req, nxt); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) - return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock); + struct sockaddr __user *addr; + unsigned file_flags; + int addr_len, ret; + + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; + if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags) + return -EINVAL; + + addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); + addr_len = READ_ONCE(sqe->addr2); + file_flags = force_nonblock ? O_NONBLOCK : 0; + + ret = __sys_connect_file(req->file, addr, addr_len, file_flags); + if (ret == -EAGAIN && force_nonblock) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_cqring_add_event(req, ret); + io_put_req_find_next(req, nxt); + return 0; #else return -EOPNOTSUPP; #endif } +static inline void io_poll_remove_req(struct io_kiocb *req) +{ + if (!RB_EMPTY_NODE(&req->rb_node)) { + rb_erase(&req->rb_node, &req->ctx->cancel_tree); + RB_CLEAR_NODE(&req->rb_node); + } +} + static void io_poll_remove_one(struct io_kiocb *req) { struct io_poll_iocb *poll = &req->poll; spin_lock(&poll->head->lock); WRITE_ONCE(poll->canceled, true); - if (!list_empty(&poll->wait.entry)) { - list_del_init(&poll->wait.entry); - io_queue_async_work(req->ctx, req); + if (!list_empty(&poll->wait->entry)) { + list_del_init(&poll->wait->entry); + io_queue_async_work(req); } spin_unlock(&poll->head->lock); - - list_del_init(&req->list); + io_poll_remove_req(req); } static void io_poll_remove_all(struct io_ring_ctx *ctx) { + struct rb_node *node; struct io_kiocb *req; spin_lock_irq(&ctx->completion_lock); - while (!list_empty(&ctx->cancel_list)) { - req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list); + while ((node = rb_first(&ctx->cancel_tree)) != NULL) { + req = rb_entry(node, struct io_kiocb, rb_node); io_poll_remove_one(req); } spin_unlock_irq(&ctx->completion_lock); } +static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +{ + struct rb_node *p, *parent = NULL; + struct io_kiocb *req; + + p = ctx->cancel_tree.rb_node; + while (p) { + parent = p; + req = rb_entry(parent, struct io_kiocb, rb_node); + if (sqe_addr < req->user_data) { + p = p->rb_left; + } else if (sqe_addr > req->user_data) { + p = p->rb_right; + } else { + io_poll_remove_one(req); + return 0; + } + } + + return -ENOENT; +} + /* * Find a running poll command that matches one specified in sqe->addr, * and remove it if found. @@ -1676,8 +2080,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx) static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *poll_req, *next; - int ret = -ENOENT; + int ret; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; @@ -1686,37 +2089,48 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; spin_lock_irq(&ctx->completion_lock); - list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) { - if (READ_ONCE(sqe->addr) == poll_req->user_data) { - io_poll_remove_one(poll_req); - ret = 0; - break; - } - } + ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr)); spin_unlock_irq(&ctx->completion_lock); - io_cqring_add_event(req->ctx, sqe->user_data, ret); + io_cqring_add_event(req, ret); + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; io_put_req(req); return 0; } -static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req, - __poll_t mask) +static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) { + struct io_ring_ctx *ctx = req->ctx; + req->poll.done = true; - io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask)); + kfree(req->poll.wait); + if (error) + io_cqring_fill_event(req, error); + else + io_cqring_fill_event(req, mangle_poll(mask)); io_commit_cqring(ctx); } -static void io_poll_complete_work(struct work_struct *work) +static void io_poll_complete_work(struct io_wq_work **workptr) { + struct io_wq_work *work = *workptr; struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_poll_iocb *poll = &req->poll; struct poll_table_struct pt = { ._key = poll->events }; struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *nxt = NULL; __poll_t mask = 0; + int ret = 0; + + if (work->flags & IO_WQ_WORK_CANCEL) { + WRITE_ONCE(poll->canceled, true); + ret = -ECANCELED; + } else if (READ_ONCE(poll->canceled)) { + ret = -ECANCELED; + } - if (!READ_ONCE(poll->canceled)) + if (ret != -ECANCELED) mask = vfs_poll(poll->file, &pt) & poll->events; /* @@ -1727,24 +2141,28 @@ static void io_poll_complete_work(struct work_struct *work) * avoid further branches in the fast path. */ spin_lock_irq(&ctx->completion_lock); - if (!mask && !READ_ONCE(poll->canceled)) { - add_wait_queue(poll->head, &poll->wait); + if (!mask && ret != -ECANCELED) { + add_wait_queue(poll->head, poll->wait); spin_unlock_irq(&ctx->completion_lock); return; } - list_del_init(&req->list); - io_poll_complete(ctx, req, mask); + io_poll_remove_req(req); + io_poll_complete(req, mask, ret); spin_unlock_irq(&ctx->completion_lock); io_cqring_ev_posted(ctx); - io_put_req(req); + + if (ret < 0 && req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_put_req_find_next(req, &nxt); + if (nxt) + *workptr = &nxt->work; } static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { - struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, - wait); + struct io_poll_iocb *poll = wait->private; struct io_kiocb *req = container_of(poll, struct io_kiocb, poll); struct io_ring_ctx *ctx = req->ctx; __poll_t mask = key_to_poll(key); @@ -1754,17 +2172,24 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (mask && !(mask & poll->events)) return 0; - list_del_init(&poll->wait.entry); + list_del_init(&poll->wait->entry); + /* + * Run completion inline if we can. We're using trylock here because + * we are violating the completion_lock -> poll wq lock ordering. + * If we have a link timeout we're going to need the completion_lock + * for finalizing the request, mark us as having grabbed that already. + */ if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { - list_del(&req->list); - io_poll_complete(ctx, req, mask); + io_poll_remove_req(req); + io_poll_complete(req, mask, 0); + req->flags |= REQ_F_COMP_LOCKED; + io_put_req(req); spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); - io_put_req(req); } else { - io_queue_async_work(ctx, req); + io_queue_async_work(req); } return 1; @@ -1788,10 +2213,30 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, pt->error = 0; pt->req->poll.head = head; - add_wait_queue(head, &pt->req->poll.wait); + add_wait_queue(head, pt->req->poll.wait); +} + +static void io_poll_req_insert(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct rb_node **p = &ctx->cancel_tree.rb_node; + struct rb_node *parent = NULL; + struct io_kiocb *tmp; + + while (*p) { + parent = *p; + tmp = rb_entry(parent, struct io_kiocb, rb_node); + if (req->user_data < tmp->user_data) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&req->rb_node, parent, p); + rb_insert_color(&req->rb_node, &ctx->cancel_tree); } -static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt) { struct io_poll_iocb *poll = &req->poll; struct io_ring_ctx *ctx = req->ctx; @@ -1807,10 +2252,15 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!poll->file) return -EBADF; - req->submit.sqe = NULL; - INIT_WORK(&req->work, io_poll_complete_work); + poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL); + if (!poll->wait) + return -ENOMEM; + + req->sqe = NULL; + INIT_IO_WORK(&req->work, io_poll_complete_work); events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; + RB_CLEAR_NODE(&req->rb_node); poll->head = NULL; poll->done = false; @@ -1822,8 +2272,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ /* initialized the list so that we can do list_empty checks */ - INIT_LIST_HEAD(&poll->wait.entry); - init_waitqueue_func_entry(&poll->wait, io_poll_wake); + INIT_LIST_HEAD(&poll->wait->entry); + init_waitqueue_func_entry(poll->wait, io_poll_wake); + poll->wait->private = poll; INIT_LIST_HEAD(&req->list); @@ -1832,114 +2283,327 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) spin_lock_irq(&ctx->completion_lock); if (likely(poll->head)) { spin_lock(&poll->head->lock); - if (unlikely(list_empty(&poll->wait.entry))) { + if (unlikely(list_empty(&poll->wait->entry))) { if (ipt.error) cancel = true; ipt.error = 0; mask = 0; } if (mask || ipt.error) - list_del_init(&poll->wait.entry); + list_del_init(&poll->wait->entry); else if (cancel) WRITE_ONCE(poll->canceled, true); else if (!poll->done) /* actually waiting for an event */ - list_add_tail(&req->list, &ctx->cancel_list); + io_poll_req_insert(req); spin_unlock(&poll->head->lock); } if (mask) { /* no async, we'd stolen it */ ipt.error = 0; - io_poll_complete(ctx, req, mask); + io_poll_complete(req, mask, 0); } spin_unlock_irq(&ctx->completion_lock); if (mask) { io_cqring_ev_posted(ctx); - io_put_req(req); + io_put_req_find_next(req, nxt); } return ipt.error; } static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) { - struct io_ring_ctx *ctx; - struct io_kiocb *req; + struct io_timeout_data *data = container_of(timer, + struct io_timeout_data, timer); + struct io_kiocb *req = data->req; + struct io_ring_ctx *ctx = req->ctx; unsigned long flags; - req = container_of(timer, struct io_kiocb, timeout.timer); - ctx = req->ctx; atomic_inc(&ctx->cq_timeouts); spin_lock_irqsave(&ctx->completion_lock, flags); - list_del(&req->list); + /* + * We could be racing with timeout deletion. If the list is empty, + * then timeout lookup already found it and will be handling it. + */ + if (!list_empty(&req->list)) { + struct io_kiocb *prev; + + /* + * Adjust the reqs sequence before the current one because it + * will consume a slot in the cq_ring and the the cq_tail + * pointer will be increased, otherwise other timeout reqs may + * return in advance without waiting for enough wait_nr. + */ + prev = req; + list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) + prev->sequence++; + list_del_init(&req->list); + } - io_cqring_fill_event(ctx, req->user_data, -ETIME); + io_cqring_fill_event(req, -ETIME); io_commit_cqring(ctx); spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); - + if (req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; io_put_req(req); return HRTIMER_NORESTART; } -static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) +{ + struct io_kiocb *req; + int ret = -ENOENT; + + list_for_each_entry(req, &ctx->timeout_list, list) { + if (user_data == req->user_data) { + list_del_init(&req->list); + ret = 0; + break; + } + } + + if (ret == -ENOENT) + return ret; + + ret = hrtimer_try_to_cancel(&req->timeout.data->timer); + if (ret == -1) + return -EALREADY; + + if (req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_cqring_fill_event(req, -ECANCELED); + io_put_req(req); + return 0; +} + +/* + * Remove or update an existing timeout command + */ +static int io_timeout_remove(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { - unsigned count, req_dist, tail_index; struct io_ring_ctx *ctx = req->ctx; - struct list_head *entry; - struct timespec64 ts; + unsigned flags; + int ret; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || - sqe->len != 1) + if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + return -EINVAL; + flags = READ_ONCE(sqe->timeout_flags); + if (flags) + return -EINVAL; + + spin_lock_irq(&ctx->completion_lock); + ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr)); + + io_cqring_fill_event(req, ret); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + if (ret < 0 && req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_put_req(req); + return 0; +} + +static int io_timeout_setup(struct io_kiocb *req) +{ + const struct io_uring_sqe *sqe = req->sqe; + struct io_timeout_data *data; + unsigned flags; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->len != 1) + return -EINVAL; + flags = READ_ONCE(sqe->timeout_flags); + if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) + data = kzalloc(sizeof(struct io_timeout_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->req = req; + req->timeout.data = data; + req->flags |= REQ_F_TIMEOUT; + + if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; + if (flags & IORING_TIMEOUT_ABS) + data->mode = HRTIMER_MODE_ABS; + else + data->mode = HRTIMER_MODE_REL; + + hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); + return 0; +} + +static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + unsigned count; + struct io_ring_ctx *ctx = req->ctx; + struct io_timeout_data *data; + struct list_head *entry; + unsigned span = 0; + int ret; + + ret = io_timeout_setup(req); + /* common setup allows flags (like links) set, we don't */ + if (!ret && sqe->flags) + ret = -EINVAL; + if (ret) + return ret; + /* * sqe->off holds how many events that need to occur for this - * timeout event to be satisfied. + * timeout event to be satisfied. If it isn't set, then this is + * a pure timeout request, sequence isn't used. */ count = READ_ONCE(sqe->off); - if (!count) - count = 1; + if (!count) { + req->flags |= REQ_F_TIMEOUT_NOSEQ; + spin_lock_irq(&ctx->completion_lock); + entry = ctx->timeout_list.prev; + goto add; + } req->sequence = ctx->cached_sq_head + count - 1; - req->flags |= REQ_F_TIMEOUT; + req->timeout.data->seq_offset = count; /* * Insertion sort, ensuring the first entry in the list is always * the one we need first. */ - tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped; - req_dist = req->sequence - tail_index; spin_lock_irq(&ctx->completion_lock); list_for_each_prev(entry, &ctx->timeout_list) { struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); - unsigned dist; + unsigned nxt_sq_head; + long long tmp, tmp_nxt; + u32 nxt_offset = nxt->timeout.data->seq_offset; + + if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) + continue; - dist = nxt->sequence - tail_index; - if (req_dist >= dist) + /* + * Since cached_sq_head + count - 1 can overflow, use type long + * long to store it. + */ + tmp = (long long)ctx->cached_sq_head + count - 1; + nxt_sq_head = nxt->sequence - nxt_offset + 1; + tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1; + + /* + * cached_sq_head may overflow, and it will never overflow twice + * once there is some timeout req still be valid. + */ + if (ctx->cached_sq_head < nxt_sq_head) + tmp += UINT_MAX; + + if (tmp > tmp_nxt) break; + + /* + * Sequence of reqs after the insert one and itself should + * be adjusted because each timeout req consumes a slot. + */ + span++; + nxt->sequence++; } + req->sequence -= span; +add: list_add(&req->list, entry); + data = req->timeout.data; + data->timer.function = io_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); spin_unlock_irq(&ctx->completion_lock); + return 0; +} - hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - req->timeout.timer.function = io_timeout_fn; - hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), - HRTIMER_MODE_REL); +static bool io_cancel_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + return req->user_data == (unsigned long) data; +} + +static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) +{ + enum io_wq_cancel cancel_ret; + int ret = 0; + + cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr); + switch (cancel_ret) { + case IO_WQ_CANCEL_OK: + ret = 0; + break; + case IO_WQ_CANCEL_RUNNING: + ret = -EALREADY; + break; + case IO_WQ_CANCEL_NOTFOUND: + ret = -ENOENT; + break; + } + + return ret; +} + +static void io_async_find_and_cancel(struct io_ring_ctx *ctx, + struct io_kiocb *req, __u64 sqe_addr, + struct io_kiocb **nxt, int success_ret) +{ + unsigned long flags; + int ret; + + ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); + if (ret != -ENOENT) { + spin_lock_irqsave(&ctx->completion_lock, flags); + goto done; + } + + spin_lock_irqsave(&ctx->completion_lock, flags); + ret = io_timeout_cancel(ctx, sqe_addr); + if (ret != -ENOENT) + goto done; + ret = io_poll_cancel(ctx, sqe_addr); +done: + if (!ret) + ret = success_ret; + io_cqring_fill_event(req, ret); + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + io_cqring_ev_posted(ctx); + + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_put_req_find_next(req, nxt); +} + +static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || + sqe->cancel_flags) + return -EINVAL; + + io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0); return 0; } -static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_req_defer(struct io_kiocb *req) { struct io_uring_sqe *sqe_copy; + struct io_ring_ctx *ctx = req->ctx; - if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) + /* Still need defer if there is pending req in defer list. */ + if (!req_need_defer(req) && list_empty(&ctx->defer_list)) return 0; sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); @@ -1947,72 +2611,82 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, return -EAGAIN; spin_lock_irq(&ctx->completion_lock); - if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) { + if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); kfree(sqe_copy); return 0; } - memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); - req->submit.sqe = sqe_copy; + memcpy(sqe_copy, req->sqe, sizeof(*sqe_copy)); + req->flags |= REQ_F_FREE_SQE; + req->sqe = sqe_copy; - INIT_WORK(&req->work, io_sq_wq_submit_work); + trace_io_uring_defer(ctx, req, req->user_data); list_add_tail(&req->list, &ctx->defer_list); spin_unlock_irq(&ctx->completion_lock); return -EIOCBQUEUED; } -static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct sqe_submit *s, bool force_nonblock) +__attribute__((nonnull)) +static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) { int ret, opcode; + struct io_ring_ctx *ctx = req->ctx; - req->user_data = READ_ONCE(s->sqe->user_data); - - if (unlikely(s->index >= ctx->sq_entries)) - return -EINVAL; - - opcode = READ_ONCE(s->sqe->opcode); + opcode = READ_ONCE(req->sqe->opcode); switch (opcode) { case IORING_OP_NOP: - ret = io_nop(req, req->user_data); + ret = io_nop(req); break; case IORING_OP_READV: - if (unlikely(s->sqe->buf_index)) + if (unlikely(req->sqe->buf_index)) return -EINVAL; - ret = io_read(req, s, force_nonblock); + ret = io_read(req, nxt, force_nonblock); break; case IORING_OP_WRITEV: - if (unlikely(s->sqe->buf_index)) + if (unlikely(req->sqe->buf_index)) return -EINVAL; - ret = io_write(req, s, force_nonblock); + ret = io_write(req, nxt, force_nonblock); break; case IORING_OP_READ_FIXED: - ret = io_read(req, s, force_nonblock); + ret = io_read(req, nxt, force_nonblock); break; case IORING_OP_WRITE_FIXED: - ret = io_write(req, s, force_nonblock); + ret = io_write(req, nxt, force_nonblock); break; case IORING_OP_FSYNC: - ret = io_fsync(req, s->sqe, force_nonblock); + ret = io_fsync(req, req->sqe, nxt, force_nonblock); break; case IORING_OP_POLL_ADD: - ret = io_poll_add(req, s->sqe); + ret = io_poll_add(req, req->sqe, nxt); break; case IORING_OP_POLL_REMOVE: - ret = io_poll_remove(req, s->sqe); + ret = io_poll_remove(req, req->sqe); break; case IORING_OP_SYNC_FILE_RANGE: - ret = io_sync_file_range(req, s->sqe, force_nonblock); + ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock); break; case IORING_OP_SENDMSG: - ret = io_sendmsg(req, s->sqe, force_nonblock); + ret = io_sendmsg(req, req->sqe, nxt, force_nonblock); break; case IORING_OP_RECVMSG: - ret = io_recvmsg(req, s->sqe, force_nonblock); + ret = io_recvmsg(req, req->sqe, nxt, force_nonblock); break; case IORING_OP_TIMEOUT: - ret = io_timeout(req, s->sqe); + ret = io_timeout(req, req->sqe); + break; + case IORING_OP_TIMEOUT_REMOVE: + ret = io_timeout_remove(req, req->sqe); + break; + case IORING_OP_ACCEPT: + ret = io_accept(req, req->sqe, nxt, force_nonblock); + break; + case IORING_OP_CONNECT: + ret = io_connect(req, req->sqe, nxt, force_nonblock); + break; + case IORING_OP_ASYNC_CANCEL: + ret = io_async_cancel(req, req->sqe, nxt); break; default: ret = -EINVAL; @@ -2027,187 +2701,76 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, return -EAGAIN; /* workqueue context doesn't hold uring_lock, grab it now */ - if (s->needs_lock) + if (req->in_async) mutex_lock(&ctx->uring_lock); io_iopoll_req_issued(req); - if (s->needs_lock) + if (req->in_async) mutex_unlock(&ctx->uring_lock); } return 0; } -static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, - const struct io_uring_sqe *sqe) +static void io_link_work_cb(struct io_wq_work **workptr) { - switch (sqe->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - return &ctx->pending_async[READ]; - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - return &ctx->pending_async[WRITE]; - default: - return NULL; - } -} + struct io_wq_work *work = *workptr; + struct io_kiocb *link = work->data; -static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) -{ - u8 opcode = READ_ONCE(sqe->opcode); - - return !(opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED); + io_queue_linked_timeout(link); + work->func = io_wq_submit_work; } -static void io_sq_wq_submit_work(struct work_struct *work) +static void io_wq_submit_work(struct io_wq_work **workptr) { + struct io_wq_work *work = *workptr; struct io_kiocb *req = container_of(work, struct io_kiocb, work); - struct io_ring_ctx *ctx = req->ctx; - struct mm_struct *cur_mm = NULL; - struct async_list *async_list; - LIST_HEAD(req_list); - mm_segment_t old_fs; - int ret; + struct io_kiocb *nxt = NULL; + int ret = 0; - async_list = io_async_list_from_sqe(ctx, req->submit.sqe); -restart: - do { - struct sqe_submit *s = &req->submit; - const struct io_uring_sqe *sqe = s->sqe; - unsigned int flags = req->flags; + /* Ensure we clear previously set non-block flag */ + req->rw.ki_flags &= ~IOCB_NOWAIT; - /* Ensure we clear previously set non-block flag */ - req->rw.ki_flags &= ~IOCB_NOWAIT; + if (work->flags & IO_WQ_WORK_CANCEL) + ret = -ECANCELED; - ret = 0; - if (io_sqe_needs_user(sqe) && !cur_mm) { - if (!mmget_not_zero(ctx->sqo_mm)) { - ret = -EFAULT; - } else { - cur_mm = ctx->sqo_mm; - use_mm(cur_mm); - old_fs = get_fs(); - set_fs(USER_DS); - } - } + if (!ret) { + req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0; + req->in_async = true; + do { + ret = io_issue_sqe(req, &nxt, false); + /* + * We can get EAGAIN for polled IO even though we're + * forcing a sync submission from here, since we can't + * wait for request slots on the block side. + */ + if (ret != -EAGAIN) + break; + cond_resched(); + } while (1); + } - if (!ret) { - s->has_user = cur_mm != NULL; - s->needs_lock = true; - do { - ret = __io_submit_sqe(ctx, req, s, false); - /* - * We can get EAGAIN for polled IO even though - * we're forcing a sync submission from here, - * since we can't wait for request slots on the - * block side. - */ - if (ret != -EAGAIN) - break; - cond_resched(); - } while (1); - } + /* drop submission reference */ + io_put_req(req); - /* drop submission reference */ + if (ret) { + if (req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_cqring_add_event(req, ret); io_put_req(req); - - if (ret) { - io_cqring_add_event(ctx, sqe->user_data, ret); - io_put_req(req); - } - - /* async context always use a copy of the sqe */ - kfree(sqe); - - /* req from defer and link list needn't decrease async cnt */ - if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) - goto out; - - if (!async_list) - break; - if (!list_empty(&req_list)) { - req = list_first_entry(&req_list, struct io_kiocb, - list); - list_del(&req->list); - continue; - } - if (list_empty(&async_list->list)) - break; - - req = NULL; - spin_lock(&async_list->lock); - if (list_empty(&async_list->list)) { - spin_unlock(&async_list->lock); - break; - } - list_splice_init(&async_list->list, &req_list); - spin_unlock(&async_list->lock); - - req = list_first_entry(&req_list, struct io_kiocb, list); - list_del(&req->list); - } while (req); - - /* - * Rare case of racing with a submitter. If we find the count has - * dropped to zero AND we have pending work items, then restart - * the processing. This is a tiny race window. - */ - if (async_list) { - ret = atomic_dec_return(&async_list->cnt); - while (!ret && !list_empty(&async_list->list)) { - spin_lock(&async_list->lock); - atomic_inc(&async_list->cnt); - list_splice_init(&async_list->list, &req_list); - spin_unlock(&async_list->lock); - - if (!list_empty(&req_list)) { - req = list_first_entry(&req_list, - struct io_kiocb, list); - list_del(&req->list); - goto restart; - } - ret = atomic_dec_return(&async_list->cnt); - } - } - -out: - if (cur_mm) { - set_fs(old_fs); - unuse_mm(cur_mm); - mmput(cur_mm); } -} - -/* - * See if we can piggy back onto previously submitted work, that is still - * running. We currently only allow this if the new request is sequential - * to the previous one we punted. - */ -static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) -{ - bool ret; - if (!list) - return false; - if (!(req->flags & REQ_F_SEQ_PREV)) - return false; - if (!atomic_read(&list->cnt)) - return false; + /* if a dependent link is ready, pass it back */ + if (!ret && nxt) { + struct io_kiocb *link; - ret = true; - spin_lock(&list->lock); - list_add_tail(&req->list, &list->list); - /* - * Ensure we see a simultaneous modification from io_sq_wq_submit_work() - */ - smp_mb(); - if (!atomic_read(&list->cnt)) { - list_del_init(&req->list); - ret = false; + io_prep_async_work(nxt, &link); + *workptr = &nxt->work; + if (link) { + nxt->work.flags |= IO_WQ_WORK_CB; + nxt->work.func = io_link_work_cb; + nxt->work.data = link; + } } - spin_unlock(&list->lock); - return ret; } static bool io_op_needs_file(const struct io_uring_sqe *sqe) @@ -2217,42 +2780,53 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe) switch (op) { case IORING_OP_NOP: case IORING_OP_POLL_REMOVE: + case IORING_OP_TIMEOUT: + case IORING_OP_TIMEOUT_REMOVE: + case IORING_OP_ASYNC_CANCEL: + case IORING_OP_LINK_TIMEOUT: return false; default: return true; } } -static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, - struct io_submit_state *state, struct io_kiocb *req) +static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, + int index) { + struct fixed_file_table *table; + + table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT]; + return table->files[index & IORING_FILE_TABLE_MASK]; +} + +static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; unsigned flags; int fd; - flags = READ_ONCE(s->sqe->flags); - fd = READ_ONCE(s->sqe->fd); + flags = READ_ONCE(req->sqe->flags); + fd = READ_ONCE(req->sqe->fd); if (flags & IOSQE_IO_DRAIN) req->flags |= REQ_F_IO_DRAIN; - /* - * All io need record the previous position, if LINK vs DARIN, - * it can be used to mark the position of the first IO in the - * link list. - */ - req->sequence = s->sequence; - if (!io_op_needs_file(s->sqe)) + if (!io_op_needs_file(req->sqe)) return 0; if (flags & IOSQE_FIXED_FILE) { - if (unlikely(!ctx->user_files || + if (unlikely(!ctx->file_table || (unsigned) fd >= ctx->nr_user_files)) return -EBADF; - req->file = ctx->user_files[fd]; + fd = array_index_nospec(fd, ctx->nr_user_files); + req->file = io_file_from_index(ctx, fd); + if (!req->file) + return -EBADF; req->flags |= REQ_F_FIXED_FILE; } else { - if (s->needs_fixed_file) + if (req->needs_fixed_file) return -EBADF; + trace_io_uring_file_get(ctx, fd); req->file = io_file_get(state, fd); if (unlikely(!req->file)) return -EBADF; @@ -2261,138 +2835,219 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, return 0; } -static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, - struct sqe_submit *s, bool force_nonblock) +static int io_grab_files(struct io_kiocb *req) { - int ret; + int ret = -EBADF; + struct io_ring_ctx *ctx = req->ctx; - ret = __io_submit_sqe(ctx, req, s, force_nonblock); - if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - struct io_uring_sqe *sqe_copy; + rcu_read_lock(); + spin_lock_irq(&ctx->inflight_lock); + /* + * We use the f_ops->flush() handler to ensure that we can flush + * out work accessing these files if the fd is closed. Check if + * the fd has changed since we started down this path, and disallow + * this operation if it has. + */ + if (fcheck(req->ring_fd) == req->ring_file) { + list_add(&req->inflight_entry, &ctx->inflight_list); + req->flags |= REQ_F_INFLIGHT; + req->work.files = current->files; + ret = 0; + } + spin_unlock_irq(&ctx->inflight_lock); + rcu_read_unlock(); - sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); - if (sqe_copy) { - struct async_list *list; - - s->sqe = sqe_copy; - memcpy(&req->submit, s, sizeof(*s)); - list = io_async_list_from_sqe(ctx, s->sqe); - if (!io_add_to_prev_work(list, req)) { - if (list) - atomic_inc(&list->cnt); - INIT_WORK(&req->work, io_sq_wq_submit_work); - io_queue_async_work(ctx, req); - } + return ret; +} - /* - * Queued up for async execution, worker will release - * submit reference when the iocb is actually submitted. - */ - return 0; - } +static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) +{ + struct io_timeout_data *data = container_of(timer, + struct io_timeout_data, timer); + struct io_kiocb *req = data->req; + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *prev = NULL; + unsigned long flags; + + spin_lock_irqsave(&ctx->completion_lock, flags); + + /* + * We don't expect the list to be empty, that will only happen if we + * race with the completion of the linked work. + */ + if (!list_empty(&req->list)) { + prev = list_entry(req->list.prev, struct io_kiocb, link_list); + if (refcount_inc_not_zero(&prev->refs)) { + list_del_init(&req->list); + prev->flags &= ~REQ_F_LINK_TIMEOUT; + } else + prev = NULL; } - /* drop submission reference */ - io_put_req(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); - /* and drop final reference, if we failed */ - if (ret) { - io_cqring_add_event(ctx, req->user_data, ret); - if (req->flags & REQ_F_LINK) - req->flags |= REQ_F_FAIL_LINK; + if (prev) { + if (prev->flags & REQ_F_LINK) + prev->flags |= REQ_F_FAIL_LINK; + io_async_find_and_cancel(ctx, req, prev->user_data, NULL, + -ETIME); + io_put_req(prev); + } else { + io_cqring_add_event(req, -ETIME); io_put_req(req); } - - return ret; + return HRTIMER_NORESTART; } -static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, - struct sqe_submit *s, bool force_nonblock) +static void io_queue_linked_timeout(struct io_kiocb *req) { - int ret; + struct io_ring_ctx *ctx = req->ctx; - ret = io_req_defer(ctx, req, s->sqe); - if (ret) { - if (ret != -EIOCBQUEUED) { - io_free_req(req); - io_cqring_add_event(ctx, s->sqe->user_data, ret); - } - return 0; + /* + * If the list is now empty, then our linked request finished before + * we got a chance to setup the timer + */ + spin_lock_irq(&ctx->completion_lock); + if (!list_empty(&req->list)) { + struct io_timeout_data *data = req->timeout.data; + + data->timer.function = io_link_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), + data->mode); } + spin_unlock_irq(&ctx->completion_lock); - return __io_queue_sqe(ctx, req, s, force_nonblock); + /* drop submission reference */ + io_put_req(req); } -static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, - struct sqe_submit *s, struct io_kiocb *shadow, - bool force_nonblock) +static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) { + struct io_kiocb *nxt; + + if (!(req->flags & REQ_F_LINK)) + return NULL; + + nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); + if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT) + return NULL; + + req->flags |= REQ_F_LINK_TIMEOUT; + return nxt; +} + +static void __io_queue_sqe(struct io_kiocb *req) +{ + struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); + struct io_kiocb *nxt = NULL; int ret; - int need_submit = false; - if (!shadow) - return io_queue_sqe(ctx, req, s, force_nonblock); + ret = io_issue_sqe(req, &nxt, true); + if (nxt) + io_queue_async_work(nxt); /* - * Mark the first IO in link list as DRAIN, let all the following - * IOs enter the defer list. all IO needs to be completed before link - * list. + * We async punt it if the file wasn't marked NOWAIT, or if the file + * doesn't support non-blocking read/write attempts */ - req->flags |= REQ_F_IO_DRAIN; - ret = io_req_defer(ctx, req, s->sqe); - if (ret) { - if (ret != -EIOCBQUEUED) { - io_free_req(req); - io_cqring_add_event(ctx, s->sqe->user_data, ret); - return 0; + if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || + (req->flags & REQ_F_MUST_PUNT))) { + struct io_uring_sqe *sqe_copy; + + sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL); + if (!sqe_copy) + goto err; + + req->sqe = sqe_copy; + req->flags |= REQ_F_FREE_SQE; + + if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { + ret = io_grab_files(req); + if (ret) + goto err; } - } else { + /* - * If ret == 0 means that all IOs in front of link io are - * running done. let's queue link head. + * Queued up for async execution, worker will release + * submit reference when the iocb is actually submitted. */ - need_submit = true; + io_queue_async_work(req); + return; } - /* Insert shadow req to defer_list, blocking next IOs */ - spin_lock_irq(&ctx->completion_lock); - list_add_tail(&shadow->list, &ctx->defer_list); - spin_unlock_irq(&ctx->completion_lock); +err: + /* drop submission reference */ + io_put_req(req); - if (need_submit) - return __io_queue_sqe(ctx, req, s, force_nonblock); + if (linked_timeout) { + if (!ret) + io_queue_linked_timeout(linked_timeout); + else + io_put_req(linked_timeout); + } - return 0; + /* and drop final reference, if we failed */ + if (ret) { + io_cqring_add_event(req, ret); + if (req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_put_req(req); + } +} + +static void io_queue_sqe(struct io_kiocb *req) +{ + int ret; + + if (unlikely(req->ctx->drain_next)) { + req->flags |= REQ_F_IO_DRAIN; + req->ctx->drain_next = false; + } + req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK); + + ret = io_req_defer(req); + if (ret) { + if (ret != -EIOCBQUEUED) { + io_cqring_add_event(req, ret); + if (req->flags & REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; + io_double_put_req(req); + } + } else + __io_queue_sqe(req); +} + +static inline void io_queue_link_head(struct io_kiocb *req) +{ + if (unlikely(req->flags & REQ_F_FAIL_LINK)) { + io_cqring_add_event(req, -ECANCELED); + io_double_put_req(req); + } else + io_queue_sqe(req); } + #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) -static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, - struct io_submit_state *state, struct io_kiocb **link, - bool force_nonblock) +static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, + struct io_kiocb **link) { - struct io_uring_sqe *sqe_copy; - struct io_kiocb *req; + struct io_ring_ctx *ctx = req->ctx; int ret; + req->user_data = req->sqe->user_data; + /* enforce forwards compatibility on users */ - if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { + if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) { ret = -EINVAL; - goto err; - } - - req = io_get_req(ctx, state); - if (unlikely(!req)) { - ret = -EAGAIN; - goto err; + goto err_req; } - ret = io_req_set_file(ctx, s, state, req); + ret = io_req_set_file(state, req); if (unlikely(ret)) { err_req: - io_free_req(req); -err: - io_cqring_add_event(ctx, s->sqe->user_data, ret); + io_cqring_add_event(req, ret); + io_double_put_req(req); return; } @@ -2405,24 +3060,39 @@ err: */ if (*link) { struct io_kiocb *prev = *link; + struct io_uring_sqe *sqe_copy; - sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); + if (req->sqe->flags & IOSQE_IO_DRAIN) + (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; + + if (READ_ONCE(req->sqe->opcode) == IORING_OP_LINK_TIMEOUT) { + ret = io_timeout_setup(req); + /* common setup allows offset being set, we don't */ + if (!ret && req->sqe->off) + ret = -EINVAL; + if (ret) { + prev->flags |= REQ_F_FAIL_LINK; + goto err_req; + } + } + + sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL); if (!sqe_copy) { ret = -EAGAIN; goto err_req; } - s->sqe = sqe_copy; - memcpy(&req->submit, s, sizeof(*s)); + req->sqe = sqe_copy; + req->flags |= REQ_F_FREE_SQE; + trace_io_uring_link(ctx, req, prev); list_add_tail(&req->list, &prev->link_list); - } else if (s->sqe->flags & IOSQE_IO_LINK) { + } else if (req->sqe->flags & IOSQE_IO_LINK) { req->flags |= REQ_F_LINK; - memcpy(&req->submit, s, sizeof(*s)); INIT_LIST_HEAD(&req->link_list); *link = req; } else { - io_queue_sqe(ctx, req, s, force_nonblock); + io_queue_sqe(req); } } @@ -2472,7 +3142,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx) * used, it's important that those reads are done through READ_ONCE() to * prevent a re-load down the line. */ -static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) +static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req) { struct io_rings *rings = ctx->rings; u32 *sq_array = ctx->sq_array; @@ -2488,32 +3158,42 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) */ head = ctx->cached_sq_head; /* make sure SQ entry isn't read before tail */ - if (head == smp_load_acquire(&rings->sq.tail)) + if (unlikely(head == smp_load_acquire(&rings->sq.tail))) return false; head = READ_ONCE(sq_array[head & ctx->sq_mask]); - if (head < ctx->sq_entries) { - s->index = head; - s->sqe = &ctx->sq_sqes[head]; - s->sequence = ctx->cached_sq_head; + if (likely(head < ctx->sq_entries)) { + /* + * All io need record the previous position, if LINK vs DARIN, + * it can be used to mark the position of the first IO in the + * link list. + */ + req->sequence = ctx->cached_sq_head; + req->sqe = &ctx->sq_sqes[head]; ctx->cached_sq_head++; return true; } /* drop invalid entries */ ctx->cached_sq_head++; - rings->sq_dropped++; + ctx->cached_sq_dropped++; + WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped); return false; } -static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes, - unsigned int nr, bool has_user, bool mm_fault) +static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, + struct file *ring_file, int ring_fd, + struct mm_struct **mm, bool async) { struct io_submit_state state, *statep = NULL; struct io_kiocb *link = NULL; - struct io_kiocb *shadow_req = NULL; - bool prev_was_link = false; int i, submitted = 0; + bool mm_fault = false; + + /* if we have a backlog and couldn't flush it all, return BUSY */ + if (!list_empty(&ctx->cq_overflow_list) && + !io_cqring_overflow_flush(ctx, false)) + return -EBUSY; if (nr > IO_PLUG_THRESHOLD) { io_submit_state_start(&state, ctx, nr); @@ -2521,75 +3201,100 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes, } for (i = 0; i < nr; i++) { + struct io_kiocb *req; + unsigned int sqe_flags; + + req = io_get_req(ctx, statep); + if (unlikely(!req)) { + if (!submitted) + submitted = -EAGAIN; + break; + } + if (!io_get_sqring(ctx, req)) { + __io_free_req(req); + break; + } + + if (io_sqe_needs_user(req->sqe) && !*mm) { + mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm); + if (!mm_fault) { + use_mm(ctx->sqo_mm); + *mm = ctx->sqo_mm; + } + } + + sqe_flags = req->sqe->flags; + + req->ring_file = ring_file; + req->ring_fd = ring_fd; + req->has_user = *mm != NULL; + req->in_async = async; + req->needs_fixed_file = async; + trace_io_uring_submit_sqe(ctx, req->sqe->user_data, + true, async); + io_submit_sqe(req, statep, &link); + submitted++; + /* * If previous wasn't linked and we have a linked command, * that's the end of the chain. Submit the previous link. */ - if (!prev_was_link && link) { - io_queue_link_head(ctx, link, &link->submit, shadow_req, - true); + if (!(sqe_flags & IOSQE_IO_LINK) && link) { + io_queue_link_head(link); link = NULL; - shadow_req = NULL; - } - prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0; - - if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) { - if (!shadow_req) { - shadow_req = io_get_req(ctx, NULL); - if (unlikely(!shadow_req)) - goto out; - shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN); - refcount_dec(&shadow_req->refs); - } - shadow_req->sequence = sqes[i].sequence; - } - -out: - if (unlikely(mm_fault)) { - io_cqring_add_event(ctx, sqes[i].sqe->user_data, - -EFAULT); - } else { - sqes[i].has_user = has_user; - sqes[i].needs_lock = true; - sqes[i].needs_fixed_file = true; - io_submit_sqe(ctx, &sqes[i], statep, &link, true); - submitted++; } } if (link) - io_queue_link_head(ctx, link, &link->submit, shadow_req, true); + io_queue_link_head(link); if (statep) io_submit_state_end(&state); + /* Commit SQ ring head once we've consumed and submitted all SQEs */ + io_commit_sqring(ctx); + return submitted; } static int io_sq_thread(void *data) { - struct sqe_submit sqes[IO_IOPOLL_BATCH]; struct io_ring_ctx *ctx = data; struct mm_struct *cur_mm = NULL; + const struct cred *old_cred; mm_segment_t old_fs; DEFINE_WAIT(wait); unsigned inflight; unsigned long timeout; + int ret; - complete(&ctx->sqo_thread_started); + complete(&ctx->completions[1]); old_fs = get_fs(); set_fs(USER_DS); + old_cred = override_creds(ctx->creds); - timeout = inflight = 0; + ret = timeout = inflight = 0; while (!kthread_should_park()) { - bool all_fixed, mm_fault = false; - int i; + unsigned int to_submit; if (inflight) { unsigned nr_events = 0; if (ctx->flags & IORING_SETUP_IOPOLL) { - io_iopoll_check(ctx, &nr_events, 0); + /* + * inflight is the count of the maximum possible + * entries we submitted, but it can be smaller + * if we dropped some of them. If we don't have + * poll entries available, then we know that we + * have nothing left to poll for. Reset the + * inflight count to zero in that case. + */ + mutex_lock(&ctx->uring_lock); + if (!list_empty(&ctx->poll_list)) + __io_iopoll_check(ctx, &nr_events, 0); + else + inflight = 0; + mutex_unlock(&ctx->uring_lock); } else { /* * Normal IO, just pretend everything completed. @@ -2603,13 +3308,22 @@ static int io_sq_thread(void *data) timeout = jiffies + ctx->sq_thread_idle; } - if (!io_get_sqring(ctx, &sqes[0])) { + to_submit = io_sqring_entries(ctx); + + /* + * If submit got -EBUSY, flag us as needing the application + * to enter the kernel to reap and flush events. + */ + if (!to_submit || ret == -EBUSY) { /* * We're polling. If we're within the defined idle * period, then let us spin without work before going - * to sleep. + * to sleep. The exception is if we got EBUSY doing + * more IO, we should wait for the application to + * reap events and wake us up. */ - if (inflight || !time_after(jiffies, timeout)) { + if (inflight || + (!time_after(jiffies, timeout) && ret != -EBUSY)) { cond_resched(); continue; } @@ -2634,7 +3348,8 @@ static int io_sq_thread(void *data) /* make sure to read SQ tail after writing flags */ smp_mb(); - if (!io_get_sqring(ctx, &sqes[0])) { + to_submit = io_sqring_entries(ctx); + if (!to_submit || ret == -EBUSY) { if (kthread_should_park()) { finish_wait(&ctx->sqo_wait, &wait); break; @@ -2652,31 +3367,10 @@ static int io_sq_thread(void *data) ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; } - i = 0; - all_fixed = true; - do { - if (all_fixed && io_sqe_needs_user(sqes[i].sqe)) - all_fixed = false; - - i++; - if (i == ARRAY_SIZE(sqes)) - break; - } while (io_get_sqring(ctx, &sqes[i])); - - /* Unless all new commands are FIXED regions, grab mm */ - if (!all_fixed && !cur_mm) { - mm_fault = !mmget_not_zero(ctx->sqo_mm); - if (!mm_fault) { - use_mm(ctx->sqo_mm); - cur_mm = ctx->sqo_mm; - } - } - - inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL, - mm_fault); - - /* Commit SQ ring head once we've consumed all SQEs */ - io_commit_sqring(ctx); + to_submit = min(to_submit, ctx->sq_entries); + ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true); + if (ret > 0) + inflight += ret; } set_fs(old_fs); @@ -2684,85 +3378,13 @@ static int io_sq_thread(void *data) unuse_mm(cur_mm); mmput(cur_mm); } + revert_creds(old_cred); kthread_parkme(); return 0; } -static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit, - bool block_for_last) -{ - struct io_submit_state state, *statep = NULL; - struct io_kiocb *link = NULL; - struct io_kiocb *shadow_req = NULL; - bool prev_was_link = false; - int i, submit = 0; - - if (to_submit > IO_PLUG_THRESHOLD) { - io_submit_state_start(&state, ctx, to_submit); - statep = &state; - } - - for (i = 0; i < to_submit; i++) { - bool force_nonblock = true; - struct sqe_submit s; - - if (!io_get_sqring(ctx, &s)) - break; - - /* - * If previous wasn't linked and we have a linked command, - * that's the end of the chain. Submit the previous link. - */ - if (!prev_was_link && link) { - io_queue_link_head(ctx, link, &link->submit, shadow_req, - force_nonblock); - link = NULL; - shadow_req = NULL; - } - prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; - - if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { - if (!shadow_req) { - shadow_req = io_get_req(ctx, NULL); - if (unlikely(!shadow_req)) - goto out; - shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN); - refcount_dec(&shadow_req->refs); - } - shadow_req->sequence = s.sequence; - } - -out: - s.has_user = true; - s.needs_lock = false; - s.needs_fixed_file = false; - submit++; - - /* - * The caller will block for events after submit, submit the - * last IO non-blocking. This is either the only IO it's - * submitting, or it already submitted the previous ones. This - * improves performance by avoiding an async punt that we don't - * need to do. - */ - if (block_for_last && submit == to_submit) - force_nonblock = false; - - io_submit_sqe(ctx, &s, statep, &link, force_nonblock); - } - io_commit_sqring(ctx); - - if (link) - io_queue_link_head(ctx, link, &link->submit, shadow_req, - !block_for_last); - if (statep) - io_submit_state_end(statep); - - return submit; -} - struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; @@ -2770,7 +3392,7 @@ struct io_wait_queue { unsigned nr_timeouts; }; -static inline bool io_should_wake(struct io_wait_queue *iowq) +static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) { struct io_ring_ctx *ctx = iowq->ctx; @@ -2779,7 +3401,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq) * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ - return io_cqring_events(ctx->rings) >= iowq->to_wait || + return io_cqring_events(ctx, noflush) >= iowq->to_wait || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; } @@ -2789,7 +3411,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); - if (!io_should_wake(iowq)) + /* use noflush == true, as we can't safely rely on locking context */ + if (!io_should_wake(iowq, true)) return -1; return autoremove_wake_function(curr, mode, wake_flags, key); @@ -2812,9 +3435,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, .to_wait = min_events, }; struct io_rings *rings = ctx->rings; - int ret; + int ret = 0; - if (io_cqring_events(rings) >= min_events) + if (io_cqring_events(ctx, false) >= min_events) return 0; if (sig) { @@ -2830,24 +3453,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return ret; } - ret = 0; iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); + trace_io_uring_cqring_wait(ctx, min_events); do { prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); - if (io_should_wake(&iowq)) + if (io_should_wake(&iowq, false)) break; schedule(); if (signal_pending(current)) { - ret = -ERESTARTSYS; + ret = -EINTR; break; } } while (1); finish_wait(&ctx->wait, &iowq.wq); - restore_saved_sigmask_unless(ret == -ERESTARTSYS); - if (ret == -ERESTARTSYS) - ret = -EINTR; + restore_saved_sigmask_unless(ret == -EINTR); return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } @@ -2865,19 +3486,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) #else int i; - for (i = 0; i < ctx->nr_user_files; i++) - fput(ctx->user_files[i]); + for (i = 0; i < ctx->nr_user_files; i++) { + struct file *file; + + file = io_file_from_index(ctx, i); + if (file) + fput(file); + } #endif } static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { - if (!ctx->user_files) + unsigned nr_tables, i; + + if (!ctx->file_table) return -ENXIO; __io_sqe_files_unregister(ctx); - kfree(ctx->user_files); - ctx->user_files = NULL; + nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); + for (i = 0; i < nr_tables; i++) + kfree(ctx->file_table[i].files); + kfree(ctx->file_table); + ctx->file_table = NULL; ctx->nr_user_files = 0; return 0; } @@ -2885,7 +3516,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static void io_sq_thread_stop(struct io_ring_ctx *ctx) { if (ctx->sqo_thread) { - wait_for_completion(&ctx->sqo_thread_started); + wait_for_completion(&ctx->completions[1]); /* * The park is a bit of a work-around, without it we get * warning spews on shutdown with SQPOLL set and affinity @@ -2899,15 +3530,11 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx) static void io_finish_async(struct io_ring_ctx *ctx) { - int i; - io_sq_thread_stop(ctx); - for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) { - if (ctx->sqo_wq[i]) { - destroy_workqueue(ctx->sqo_wq[i]); - ctx->sqo_wq[i] = NULL; - } + if (ctx->io_wq) { + io_wq_destroy(ctx->io_wq); + ctx->io_wq = NULL; } } @@ -2915,11 +3542,9 @@ static void io_finish_async(struct io_ring_ctx *ctx) static void io_destruct_skb(struct sk_buff *skb) { struct io_ring_ctx *ctx = skb->sk->sk_user_data; - int i; - for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) - if (ctx->sqo_wq[i]) - flush_workqueue(ctx->sqo_wq[i]); + if (ctx->io_wq) + io_wq_flush(ctx->io_wq); unix_destruct_scm(skb); } @@ -2934,7 +3559,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) struct sock *sk = ctx->ring_sock->sk; struct scm_fp_list *fpl; struct sk_buff *skb; - int i; + int i, nr_files; if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { unsigned long inflight = ctx->user->unix_inflight + nr; @@ -2954,21 +3579,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) } skb->sk = sk; - skb->destructor = io_destruct_skb; + nr_files = 0; fpl->user = get_uid(ctx->user); for (i = 0; i < nr; i++) { - fpl->fp[i] = get_file(ctx->user_files[i + offset]); - unix_inflight(fpl->user, fpl->fp[i]); + struct file *file = io_file_from_index(ctx, i + offset); + + if (!file) + continue; + fpl->fp[nr_files] = get_file(file); + unix_inflight(fpl->user, fpl->fp[nr_files]); + nr_files++; } - fpl->max = fpl->count = nr; - UNIXCB(skb).fp = fpl; - refcount_add(skb->truesize, &sk->sk_wmem_alloc); - skb_queue_head(&sk->sk_receive_queue, skb); + if (nr_files) { + fpl->max = SCM_MAX_FD; + fpl->count = nr_files; + UNIXCB(skb).fp = fpl; + skb->destructor = io_destruct_skb; + refcount_add(skb->truesize, &sk->sk_wmem_alloc); + skb_queue_head(&sk->sk_receive_queue, skb); - for (i = 0; i < nr; i++) - fput(fpl->fp[i]); + for (i = 0; i < nr_files; i++) + fput(fpl->fp[i]); + } else { + kfree_skb(skb); + kfree(fpl); + } return 0; } @@ -2999,7 +3636,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) return 0; while (total < ctx->nr_user_files) { - fput(ctx->user_files[total]); + struct file *file = io_file_from_index(ctx, total); + + if (file) + fput(file); total++; } @@ -3012,33 +3652,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) } #endif +static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables, + unsigned nr_files) +{ + int i; + + for (i = 0; i < nr_tables; i++) { + struct fixed_file_table *table = &ctx->file_table[i]; + unsigned this_files; + + this_files = min(nr_files, IORING_MAX_FILES_TABLE); + table->files = kcalloc(this_files, sizeof(struct file *), + GFP_KERNEL); + if (!table->files) + break; + nr_files -= this_files; + } + + if (i == nr_tables) + return 0; + + for (i = 0; i < nr_tables; i++) { + struct fixed_file_table *table = &ctx->file_table[i]; + kfree(table->files); + } + return 1; +} + static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) { __s32 __user *fds = (__s32 __user *) arg; + unsigned nr_tables; int fd, ret = 0; unsigned i; - if (ctx->user_files) + if (ctx->file_table) return -EBUSY; if (!nr_args) return -EINVAL; if (nr_args > IORING_MAX_FIXED_FILES) return -EMFILE; - ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL); - if (!ctx->user_files) + nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE); + ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table), + GFP_KERNEL); + if (!ctx->file_table) + return -ENOMEM; + + if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) { + kfree(ctx->file_table); + ctx->file_table = NULL; return -ENOMEM; + } + + for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { + struct fixed_file_table *table; + unsigned index; - for (i = 0; i < nr_args; i++) { ret = -EFAULT; if (copy_from_user(&fd, &fds[i], sizeof(fd))) break; + /* allow sparse sets */ + if (fd == -1) { + ret = 0; + continue; + } - ctx->user_files[i] = fget(fd); + table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT]; + index = i & IORING_FILE_TABLE_MASK; + table->files[index] = fget(fd); ret = -EBADF; - if (!ctx->user_files[i]) + if (!table->files[index]) break; /* * Don't allow io_uring instances to be registered. If UNIX @@ -3047,20 +3733,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, * handle it just fine, but there's still no point in allowing * a ring fd as it doesn't support regular read/write anyway. */ - if (ctx->user_files[i]->f_op == &io_uring_fops) { - fput(ctx->user_files[i]); + if (table->files[index]->f_op == &io_uring_fops) { + fput(table->files[index]); break; } - ctx->nr_user_files++; ret = 0; } if (ret) { - for (i = 0; i < ctx->nr_user_files; i++) - fput(ctx->user_files[i]); + for (i = 0; i < ctx->nr_user_files; i++) { + struct file *file; - kfree(ctx->user_files); - ctx->user_files = NULL; + file = io_file_from_index(ctx, i); + if (file) + fput(file); + } + for (i = 0; i < nr_tables; i++) + kfree(ctx->file_table[i].files); + + kfree(ctx->file_table); + ctx->file_table = NULL; ctx->nr_user_files = 0; return ret; } @@ -3072,9 +3764,202 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, return ret; } +static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index) +{ +#if defined(CONFIG_UNIX) + struct file *file = io_file_from_index(ctx, index); + struct sock *sock = ctx->ring_sock->sk; + struct sk_buff_head list, *head = &sock->sk_receive_queue; + struct sk_buff *skb; + int i; + + __skb_queue_head_init(&list); + + /* + * Find the skb that holds this file in its SCM_RIGHTS. When found, + * remove this entry and rearrange the file array. + */ + skb = skb_dequeue(head); + while (skb) { + struct scm_fp_list *fp; + + fp = UNIXCB(skb).fp; + for (i = 0; i < fp->count; i++) { + int left; + + if (fp->fp[i] != file) + continue; + + unix_notinflight(fp->user, fp->fp[i]); + left = fp->count - 1 - i; + if (left) { + memmove(&fp->fp[i], &fp->fp[i + 1], + left * sizeof(struct file *)); + } + fp->count--; + if (!fp->count) { + kfree_skb(skb); + skb = NULL; + } else { + __skb_queue_tail(&list, skb); + } + fput(file); + file = NULL; + break; + } + + if (!file) + break; + + __skb_queue_tail(&list, skb); + + skb = skb_dequeue(head); + } + + if (skb_peek(&list)) { + spin_lock_irq(&head->lock); + while ((skb = __skb_dequeue(&list)) != NULL) + __skb_queue_tail(head, skb); + spin_unlock_irq(&head->lock); + } +#else + fput(io_file_from_index(ctx, index)); +#endif +} + +static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, + int index) +{ +#if defined(CONFIG_UNIX) + struct sock *sock = ctx->ring_sock->sk; + struct sk_buff_head *head = &sock->sk_receive_queue; + struct sk_buff *skb; + + /* + * See if we can merge this file into an existing skb SCM_RIGHTS + * file set. If there's no room, fall back to allocating a new skb + * and filling it in. + */ + spin_lock_irq(&head->lock); + skb = skb_peek(head); + if (skb) { + struct scm_fp_list *fpl = UNIXCB(skb).fp; + + if (fpl->count < SCM_MAX_FD) { + __skb_unlink(skb, head); + spin_unlock_irq(&head->lock); + fpl->fp[fpl->count] = get_file(file); + unix_inflight(fpl->user, fpl->fp[fpl->count]); + fpl->count++; + spin_lock_irq(&head->lock); + __skb_queue_head(head, skb); + } else { + skb = NULL; + } + } + spin_unlock_irq(&head->lock); + + if (skb) { + fput(file); + return 0; + } + + return __io_sqe_files_scm(ctx, 1, index); +#else + return 0; +#endif +} + +static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg, + unsigned nr_args) +{ + struct io_uring_files_update up; + __s32 __user *fds; + int fd, i, err; + __u32 done; + + if (!ctx->file_table) + return -ENXIO; + if (!nr_args) + return -EINVAL; + if (copy_from_user(&up, arg, sizeof(up))) + return -EFAULT; + if (check_add_overflow(up.offset, nr_args, &done)) + return -EOVERFLOW; + if (done > ctx->nr_user_files) + return -EINVAL; + + done = 0; + fds = (__s32 __user *) up.fds; + while (nr_args) { + struct fixed_file_table *table; + unsigned index; + + err = 0; + if (copy_from_user(&fd, &fds[done], sizeof(fd))) { + err = -EFAULT; + break; + } + i = array_index_nospec(up.offset, ctx->nr_user_files); + table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT]; + index = i & IORING_FILE_TABLE_MASK; + if (table->files[index]) { + io_sqe_file_unregister(ctx, i); + table->files[index] = NULL; + } + if (fd != -1) { + struct file *file; + + file = fget(fd); + if (!file) { + err = -EBADF; + break; + } + /* + * Don't allow io_uring instances to be registered. If + * UNIX isn't enabled, then this causes a reference + * cycle and this instance can never get freed. If UNIX + * is enabled we'll handle it just fine, but there's + * still no point in allowing a ring fd as it doesn't + * support regular read/write anyway. + */ + if (file->f_op == &io_uring_fops) { + fput(file); + err = -EBADF; + break; + } + table->files[index] = file; + err = io_sqe_file_register(ctx, file, i); + if (err) + break; + } + nr_args--; + done++; + up.offset++; + } + + return done ? done : err; +} + +static void io_put_work(struct io_wq_work *work) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + io_put_req(req); +} + +static void io_get_work(struct io_wq_work *work) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + refcount_inc(&req->refs); +} + static int io_sq_offload_start(struct io_ring_ctx *ctx, struct io_uring_params *p) { + struct io_wq_data data; + unsigned concurrency; int ret; init_waitqueue_head(&ctx->sqo_wait); @@ -3118,26 +4003,18 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, goto err; } - /* Do QD, or 2 * CPUS, whatever is smallest */ - ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq", - WQ_UNBOUND | WQ_FREEZABLE, - min(ctx->sq_entries - 1, 2 * num_online_cpus())); - if (!ctx->sqo_wq[0]) { - ret = -ENOMEM; - goto err; - } - - /* - * This is for buffered writes, where we want to limit the parallelism - * due to file locking in file systems. As "normal" buffered writes - * should parellelize on writeout quite nicely, limit us to having 2 - * pending. This avoids massive contention on the inode when doing - * buffered async writes. - */ - ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq", - WQ_UNBOUND | WQ_FREEZABLE, 2); - if (!ctx->sqo_wq[1]) { - ret = -ENOMEM; + data.mm = ctx->sqo_mm; + data.user = ctx->user; + data.creds = ctx->creds; + data.get_work = io_get_work; + data.put_work = io_put_work; + + /* Do QD, or 4 * CPUS, whatever is smallest */ + concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); + ctx->io_wq = io_wq_create(concurrency, &data); + if (IS_ERR(ctx->io_wq)) { + ret = PTR_ERR(ctx->io_wq); + ctx->io_wq = NULL; goto err; } @@ -3483,6 +4360,9 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) io_unaccount_mem(ctx->user, ring_pages(ctx->sq_entries, ctx->cq_entries)); free_uid(ctx->user); + put_cred(ctx->creds); + kfree(ctx->completions); + kmem_cache_free(req_cachep, ctx->fallback_req); kfree(ctx); } @@ -3521,8 +4401,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_kill_timeouts(ctx); io_poll_remove_all(ctx); + + if (ctx->io_wq) + io_wq_cancel_all(ctx->io_wq); + io_iopoll_reap_events(ctx); - wait_for_completion(&ctx->ctx_done); + /* if we failed setting up the ctx, we might not have any rings */ + if (ctx->rings) + io_cqring_overflow_flush(ctx, true); + wait_for_completion(&ctx->completions[0]); io_ring_ctx_free(ctx); } @@ -3535,6 +4422,53 @@ static int io_uring_release(struct inode *inode, struct file *file) return 0; } +static void io_uring_cancel_files(struct io_ring_ctx *ctx, + struct files_struct *files) +{ + struct io_kiocb *req; + DEFINE_WAIT(wait); + + while (!list_empty_careful(&ctx->inflight_list)) { + struct io_kiocb *cancel_req = NULL; + + spin_lock_irq(&ctx->inflight_lock); + list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { + if (req->work.files != files) + continue; + /* req is being completed, ignore */ + if (!refcount_inc_not_zero(&req->refs)) + continue; + cancel_req = req; + break; + } + if (cancel_req) + prepare_to_wait(&ctx->inflight_wait, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&ctx->inflight_lock); + + /* We need to keep going until we don't find a matching req */ + if (!cancel_req) + break; + + io_wq_cancel_work(ctx->io_wq, &cancel_req->work); + io_put_req(cancel_req); + schedule(); + } + finish_wait(&ctx->inflight_wait, &wait); +} + +static int io_uring_flush(struct file *file, void *data) +{ + struct io_ring_ctx *ctx = file->private_data; + + io_uring_cancel_files(ctx, data); + if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) { + io_cqring_overflow_flush(ctx, true); + io_wq_cancel_all(ctx->io_wq); + } + return 0; +} + static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) { loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT; @@ -3596,25 +4530,20 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, */ ret = 0; if (ctx->flags & IORING_SETUP_SQPOLL) { + if (!list_empty_careful(&ctx->cq_overflow_list)) + io_cqring_overflow_flush(ctx, false); if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sqo_wait); submitted = to_submit; } else if (to_submit) { - bool block_for_last = false; + struct mm_struct *cur_mm; to_submit = min(to_submit, ctx->sq_entries); - - /* - * Allow last submission to block in a series, IFF the caller - * asked to wait for events and we don't currently have - * enough. This potentially avoids an async punt. - */ - if (to_submit == min_complete && - io_cqring_events(ctx->rings) < min_complete) - block_for_last = true; - mutex_lock(&ctx->uring_lock); - submitted = io_ring_submit(ctx, to_submit, block_for_last); + /* already have mm, so io_submit_sqes() won't try to grab it */ + cur_mm = ctx->sqo_mm; + submitted = io_submit_sqes(ctx, to_submit, f.file, fd, + &cur_mm, false); mutex_unlock(&ctx->uring_lock); } if (flags & IORING_ENTER_GETEVENTS) { @@ -3637,6 +4566,7 @@ out_fput: static const struct file_operations io_uring_fops = { .release = io_uring_release, + .flush = io_uring_flush, .mmap = io_uring_mmap, .poll = io_uring_poll, .fasync = io_uring_fasync, @@ -3668,12 +4598,18 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); - if (size == SIZE_MAX) + if (size == SIZE_MAX) { + io_mem_free(ctx->rings); + ctx->rings = NULL; return -EOVERFLOW; + } ctx->sq_sqes = io_mem_alloc(size); - if (!ctx->sq_sqes) + if (!ctx->sq_sqes) { + io_mem_free(ctx->rings); + ctx->rings = NULL; return -ENOMEM; + } return 0; } @@ -3736,10 +4672,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) * Use twice as many entries for the CQ ring. It's possible for the * application to drive a higher depth than the size of the SQ ring, * since the sqes are only used at submission time. This allows for - * some flexibility in overcommitting a bit. + * some flexibility in overcommitting a bit. If the application has + * set IORING_SETUP_CQSIZE, it will have passed in the desired number + * of CQ ring entries manually. */ p->sq_entries = roundup_pow_of_two(entries); - p->cq_entries = 2 * p->sq_entries; + if (p->flags & IORING_SETUP_CQSIZE) { + /* + * If IORING_SETUP_CQSIZE is set, we do the same roundup + * to a power-of-two, if it isn't already. We do NOT impose + * any cq vs sq ring sizing. + */ + if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES) + return -EINVAL; + p->cq_entries = roundup_pow_of_two(p->cq_entries); + } else { + p->cq_entries = 2 * p->sq_entries; + } user = get_uid(current_user()); account_mem = !capable(CAP_IPC_LOCK); @@ -3764,6 +4713,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ctx->compat = in_compat_syscall(); ctx->account_mem = account_mem; ctx->user = user; + ctx->creds = prepare_creds(); ret = io_allocate_scq_urings(ctx, p); if (ret) @@ -3773,10 +4723,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) if (ret) goto err; - ret = io_uring_get_fd(ctx); - if (ret < 0) - goto err; - memset(&p->sq_off, 0, sizeof(p->sq_off)); p->sq_off.head = offsetof(struct io_rings, sq.head); p->sq_off.tail = offsetof(struct io_rings, sq.tail); @@ -3794,7 +4740,16 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.cqes = offsetof(struct io_rings, cqes); - p->features = IORING_FEAT_SINGLE_MMAP; + /* + * Install ring fd as the very last thing, so we don't risk someone + * having closed it before we finish setup + */ + ret = io_uring_get_fd(ctx); + if (ret < 0) + goto err; + + p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP; + trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); return ret; err: io_ring_ctx_wait_and_kill(ctx); @@ -3820,7 +4775,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) } if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | - IORING_SETUP_SQ_AFF)) + IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE)) return -EINVAL; ret = io_uring_create(entries, &p); @@ -3864,7 +4819,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, * no new references will come in after we've killed the percpu ref. */ mutex_unlock(&ctx->uring_lock); - wait_for_completion(&ctx->ctx_done); + wait_for_completion(&ctx->completions[0]); mutex_lock(&ctx->uring_lock); switch (opcode) { @@ -3886,6 +4841,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_sqe_files_unregister(ctx); break; + case IORING_REGISTER_FILES_UPDATE: + ret = io_sqe_files_update(ctx, arg, nr_args); + break; case IORING_REGISTER_EVENTFD: ret = -EINVAL; if (nr_args != 1) @@ -3904,7 +4862,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, } /* bring the ctx back to life */ - reinit_completion(&ctx->ctx_done); + reinit_completion(&ctx->completions[0]); percpu_ref_reinit(&ctx->refs); return ret; } @@ -3929,6 +4887,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, mutex_lock(&ctx->uring_lock); ret = __io_uring_register(ctx, opcode, arg, nr_args); mutex_unlock(&ctx->uring_lock); + trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, + ctx->cq_ev_fd != NULL, ret); out_fput: fdput(f); return ret; diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c index 484dd8eda861..76925b40b5fd 100644 --- a/fs/iomap/apply.c +++ b/fs/iomap/apply.c @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> +#include "trace.h" /* * Execute a iomap write on a segment of the mapping that spans a @@ -28,6 +29,8 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, loff_t written = 0, ret; u64 end; + trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_); + /* * Need to map a range from start position for length bytes. This can * span multiple pages - it is only guaranteed to return a range of a @@ -48,6 +51,10 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, if (WARN_ON(iomap.length == 0)) return -EIO; + trace_iomap_apply_dstmap(inode, &iomap); + if (srcmap.type != IOMAP_HOLE) + trace_iomap_apply_srcmap(inode, &srcmap); + /* * Cut down the length to the one actually provided by the filesystem, * as it might not be able to give us the whole size that we requested. diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c62e807956b6..d33c7bc5ee92 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1067,20 +1067,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) lock_page(page); size = i_size_read(inode); - if ((page->mapping != inode->i_mapping) || - (page_offset(page) > size)) { + offset = page_offset(page); + if (page->mapping != inode->i_mapping || offset > size) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_SHIFT) > size) + if (offset > size - PAGE_SIZE) length = offset_in_page(size); else length = PAGE_SIZE; - offset = page_offset(page); while (length > 0) { ret = iomap_apply(inode, offset, length, IOMAP_WRITE | IOMAP_FAULT, ops, page, diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 2f88d64c2a4d..420c0c82f0ac 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -318,7 +318,9 @@ zero_tail: if (pad) iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); } - return copied ? copied : ret; + if (copied) + return copied; + return ret; } static loff_t @@ -500,8 +502,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, } pos += ret; - if (iov_iter_rw(iter) == READ && pos >= dio->i_size) + if (iov_iter_rw(iter) == READ && pos >= dio->i_size) { + /* + * We only report that we've read data up to i_size. + * Revert iter to a state corresponding to that as + * some callers (such as splice code) rely on it. + */ + iov_iter_revert(iter, pos - dio->i_size); break; + } } while ((count = iov_iter_count(iter)) > 0); blk_finish_plug(&plug); diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c index 690ef2d7c6c8..bccf305ea9ce 100644 --- a/fs/iomap/fiemap.c +++ b/fs/iomap/fiemap.c @@ -133,12 +133,16 @@ iomap_bmap(struct address_space *mapping, sector_t bno, struct inode *inode = mapping->host; loff_t pos = bno << inode->i_blkbits; unsigned blocksize = i_blocksize(inode); + int ret; if (filemap_write_and_wait(mapping)) return 0; bno = 0; - iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); + ret = iomap_apply(inode, pos, blocksize, 0, ops, &bno, + iomap_bmap_actor); + if (ret) + return 0; return bno; } EXPORT_SYMBOL_GPL(iomap_bmap); diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 4ca1aa2f3f6e..6dc227b8c47e 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -80,6 +80,109 @@ DEFINE_PAGE_EVENT(iomap_writepage); DEFINE_PAGE_EVENT(iomap_releasepage); DEFINE_PAGE_EVENT(iomap_invalidatepage); +#define IOMAP_TYPE_STRINGS \ + { IOMAP_HOLE, "HOLE" }, \ + { IOMAP_DELALLOC, "DELALLOC" }, \ + { IOMAP_MAPPED, "MAPPED" }, \ + { IOMAP_UNWRITTEN, "UNWRITTEN" }, \ + { IOMAP_INLINE, "INLINE" } + +#define IOMAP_FLAGS_STRINGS \ + { IOMAP_WRITE, "WRITE" }, \ + { IOMAP_ZERO, "ZERO" }, \ + { IOMAP_REPORT, "REPORT" }, \ + { IOMAP_FAULT, "FAULT" }, \ + { IOMAP_DIRECT, "DIRECT" }, \ + { IOMAP_NOWAIT, "NOWAIT" } + +#define IOMAP_F_FLAGS_STRINGS \ + { IOMAP_F_NEW, "NEW" }, \ + { IOMAP_F_DIRTY, "DIRTY" }, \ + { IOMAP_F_SHARED, "SHARED" }, \ + { IOMAP_F_MERGED, "MERGED" }, \ + { IOMAP_F_BUFFER_HEAD, "BH" }, \ + { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" } + +DECLARE_EVENT_CLASS(iomap_class, + TP_PROTO(struct inode *inode, struct iomap *iomap), + TP_ARGS(inode, iomap), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u64, ino) + __field(u64, addr) + __field(loff_t, offset) + __field(u64, length) + __field(u16, type) + __field(u16, flags) + __field(dev_t, bdev) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->addr = iomap->addr; + __entry->offset = iomap->offset; + __entry->length = iomap->length; + __entry->type = iomap->type; + __entry->flags = iomap->flags; + __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0; + ), + TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr %lld offset %lld " + "length %llu type %s flags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + MAJOR(__entry->bdev), MINOR(__entry->bdev), + __entry->addr, + __entry->offset, + __entry->length, + __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS), + __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS)) +) + +#define DEFINE_IOMAP_EVENT(name) \ +DEFINE_EVENT(iomap_class, name, \ + TP_PROTO(struct inode *inode, struct iomap *iomap), \ + TP_ARGS(inode, iomap)) +DEFINE_IOMAP_EVENT(iomap_apply_dstmap); +DEFINE_IOMAP_EVENT(iomap_apply_srcmap); + +TRACE_EVENT(iomap_apply, + TP_PROTO(struct inode *inode, loff_t pos, loff_t length, + unsigned int flags, const void *ops, void *actor, + unsigned long caller), + TP_ARGS(inode, pos, length, flags, ops, actor, caller), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u64, ino) + __field(loff_t, pos) + __field(loff_t, length) + __field(unsigned int, flags) + __field(const void *, ops) + __field(void *, actor) + __field(unsigned long, caller) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pos = pos; + __entry->length = length; + __entry->flags = flags; + __entry->ops = ops; + __entry->actor = actor; + __entry->caller = caller; + ), + TP_printk("dev %d:%d ino 0x%llx pos %lld length %lld flags %s (0x%x) " + "ops %ps caller %pS actor %ps", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->pos, + __entry->length, + __print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS), + __entry->flags, + __entry->ops, + (void *)__entry->caller, + __entry->actor) +); + #endif /* _IOMAP_TRACE_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index c068912408dd..27b9f9dee434 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -726,7 +726,7 @@ static void stop_this_handle(handle_t *handle) if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); - rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); + rwsem_release(&journal->j_trans_commit_map, _THIS_IP_); /* * Scope of the GFP_NOFS context is over here and so we can restore the * original alloc context. diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 6ebae6bbe6a5..9d96e6871e1a 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -438,7 +438,7 @@ void kernfs_put_active(struct kernfs_node *kn) return; if (kernfs_lockdep(kn)) - rwsem_release(&kn->dep_map, 1, _RET_IP_); + rwsem_release(&kn->dep_map, _RET_IP_); v = atomic_dec_return(&kn->active); if (likely(v != KN_DEACTIVATED_BIAS)) return; @@ -476,7 +476,7 @@ static void kernfs_drain(struct kernfs_node *kn) if (kernfs_lockdep(kn)) { lock_acquired(&kn->dep_map, _RET_IP_); - rwsem_release(&kn->dep_map, 1, _RET_IP_); + rwsem_release(&kn->dep_map, _RET_IP_); } kernfs_drain_open_files(kn); @@ -508,10 +508,6 @@ void kernfs_put(struct kernfs_node *kn) struct kernfs_node *parent; struct kernfs_root *root; - /* - * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino - * depends on this to filter reused stale node - */ if (!kn || !atomic_dec_and_test(&kn->count)) return; root = kernfs_root(kn); @@ -536,7 +532,7 @@ void kernfs_put(struct kernfs_node *kn) kmem_cache_free(kernfs_iattrs_cache, kn->iattr); } spin_lock(&kernfs_idr_lock); - idr_remove(&root->ino_idr, kn->id.ino); + idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); spin_unlock(&kernfs_idr_lock); kmem_cache_free(kernfs_node_cache, kn); @@ -621,8 +617,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, unsigned flags) { struct kernfs_node *kn; - u32 gen; - int cursor; + u32 id_highbits; int ret; name = kstrdup_const(name, GFP_KERNEL); @@ -635,23 +630,19 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, idr_preload(GFP_KERNEL); spin_lock(&kernfs_idr_lock); - cursor = idr_get_cursor(&root->ino_idr); ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); - if (ret >= 0 && ret < cursor) - root->next_generation++; - gen = root->next_generation; + if (ret >= 0 && ret < root->last_id_lowbits) + root->id_highbits++; + id_highbits = root->id_highbits; + root->last_id_lowbits = ret; spin_unlock(&kernfs_idr_lock); idr_preload_end(); if (ret < 0) goto err_out2; - kn->id.ino = ret; - kn->id.generation = gen; - /* - * set ino first. This RELEASE is paired with atomic_inc_not_zero in - * kernfs_find_and_get_node_by_ino - */ - atomic_set_release(&kn->count, 1); + kn->id = (u64)id_highbits << 32 | ret; + + atomic_set(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); @@ -680,7 +671,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, return kn; err_out3: - idr_remove(&root->ino_idr, kn->id.ino); + idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: @@ -705,50 +696,52 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, } /* - * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number + * kernfs_find_and_get_node_by_id - get kernfs_node from node id * @root: the kernfs root - * @ino: inode number + * @id: the target node id + * + * @id's lower 32bits encode ino and upper gen. If the gen portion is + * zero, all generations are matched. * * RETURNS: * NULL on failure. Return a kernfs node with reference counter incremented */ -struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, - unsigned int ino) +struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, + u64 id) { struct kernfs_node *kn; + ino_t ino = kernfs_id_ino(id); + u32 gen = kernfs_id_gen(id); - rcu_read_lock(); - kn = idr_find(&root->ino_idr, ino); + spin_lock(&kernfs_idr_lock); + + kn = idr_find(&root->ino_idr, (u32)ino); if (!kn) - goto out; + goto err_unlock; - /* - * Since kernfs_node is freed in RCU, it's possible an old node for ino - * is freed, but reused before RCU grace period. But a freed node (see - * kernfs_put) or an incompletedly initialized node (see - * __kernfs_new_node) should have 'count' 0. We can use this fact to - * filter out such node. - */ - if (!atomic_inc_not_zero(&kn->count)) { - kn = NULL; - goto out; + if (sizeof(ino_t) >= sizeof(u64)) { + /* we looked up with the low 32bits, compare the whole */ + if (kernfs_ino(kn) != ino) + goto err_unlock; + } else { + /* 0 matches all generations */ + if (unlikely(gen && kernfs_gen(kn) != gen)) + goto err_unlock; } /* - * The node could be a new node or a reused node. If it's a new node, - * we are ok. If it's reused because of RCU (because of - * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino' - * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate, - * hence we can use 'ino' to filter stale node. + * ACTIVATED is protected with kernfs_mutex but it was clear when + * @kn was added to idr and we just wanna see it set. No need to + * grab kernfs_mutex. */ - if (kn->id.ino != ino) - goto out; - rcu_read_unlock(); + if (unlikely(!(kn->flags & KERNFS_ACTIVATED) || + !atomic_inc_not_zero(&kn->count))) + goto err_unlock; + spin_unlock(&kernfs_idr_lock); return kn; -out: - rcu_read_unlock(); - kernfs_put(kn); +err_unlock: + spin_unlock(&kernfs_idr_lock); return NULL; } @@ -962,7 +955,17 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, idr_init(&root->ino_idr); INIT_LIST_HEAD(&root->supers); - root->next_generation = 1; + + /* + * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino. + * High bits generation. The starting value for both ino and + * genenration is 1. Initialize upper 32bit allocation + * accordingly. + */ + if (sizeof(ino_t) >= sizeof(u64)) + root->id_highbits = 0; + else + root->id_highbits = 1; kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, @@ -1678,7 +1681,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) const char *name = pos->name; unsigned int type = dt_type(pos); int len = strlen(name); - ino_t ino = pos->id.ino; + ino_t ino = kernfs_ino(pos); ctx->pos = pos->hash; file->private_data = pos; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index e8c792b49616..34366db3620d 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -892,7 +892,7 @@ repeat: * have the matching @file available. Look up the inodes * and generate the events manually. */ - inode = ilookup(info->sb, kn->id.ino); + inode = ilookup(info->sb, kernfs_ino(kn)); if (!inode) continue; @@ -901,7 +901,7 @@ repeat: if (parent) { struct inode *p_inode; - p_inode = ilookup(info->sb, parent->id.ino); + p_inode = ilookup(info->sb, kernfs_ino(parent)); if (p_inode) { fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, inode, FSNOTIFY_EVENT_INODE, &name, 0); diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index f3eaa8869f42..eac277c63d42 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -201,7 +201,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode) inode->i_private = kn; inode->i_mapping->a_ops = &kernfs_aops; inode->i_op = &kernfs_iops; - inode->i_generation = kn->id.generation; + inode->i_generation = kernfs_gen(kn); set_default_inode_attr(inode, kn->mode); kernfs_refresh_inode(kn, inode); @@ -247,7 +247,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn) { struct inode *inode; - inode = iget_locked(sb, kn->id.ino); + inode = iget_locked(sb, kernfs_ino(kn)); if (inode && (inode->i_state & I_NEW)) kernfs_init_inode(kn, inode); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 02ce570a9a3c..2f3c51d55261 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -109,8 +109,6 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags); -struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, - unsigned int ino); /* * file.c diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 6c12fac2c287..4d31503abaee 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -53,63 +53,85 @@ const struct super_operations kernfs_sops = { .show_path = kernfs_sop_show_path, }; -/* - * Similar to kernfs_fh_get_inode, this one gets kernfs node from inode - * number and generation - */ -struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root, - const union kernfs_node_id *id) +static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, + struct inode *parent) { - struct kernfs_node *kn; + struct kernfs_node *kn = inode->i_private; - kn = kernfs_find_and_get_node_by_ino(root, id->ino); - if (!kn) - return NULL; - if (kn->id.generation != id->generation) { - kernfs_put(kn); - return NULL; + if (*max_len < 2) { + *max_len = 2; + return FILEID_INVALID; } - return kn; + + *max_len = 2; + *(u64 *)fh = kn->id; + return FILEID_KERNFS; } -static struct inode *kernfs_fh_get_inode(struct super_block *sb, - u64 ino, u32 generation) +static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type, bool get_parent) { struct kernfs_super_info *info = kernfs_info(sb); - struct inode *inode; struct kernfs_node *kn; + struct inode *inode; + u64 id; - if (ino == 0) - return ERR_PTR(-ESTALE); + if (fh_len < 2) + return NULL; + + switch (fh_type) { + case FILEID_KERNFS: + id = *(u64 *)fid; + break; + case FILEID_INO32_GEN: + case FILEID_INO32_GEN_PARENT: + /* + * blk_log_action() exposes "LOW32,HIGH32" pair without + * type and userland can call us with generic fid + * constructed from them. Combine it back to ID. See + * blk_log_action(). + */ + id = ((u64)fid->i32.gen << 32) | fid->i32.ino; + break; + default: + return NULL; + } - kn = kernfs_find_and_get_node_by_ino(info->root, ino); + kn = kernfs_find_and_get_node_by_id(info->root, id); if (!kn) return ERR_PTR(-ESTALE); + + if (get_parent) { + struct kernfs_node *parent; + + parent = kernfs_get_parent(kn); + kernfs_put(kn); + kn = parent; + if (!kn) + return ERR_PTR(-ESTALE); + } + inode = kernfs_get_inode(sb, kn); kernfs_put(kn); if (!inode) return ERR_PTR(-ESTALE); - if (generation && inode->i_generation != generation) { - /* we didn't find the right inode.. */ - iput(inode); - return ERR_PTR(-ESTALE); - } - return inode; + return d_obtain_alias(inode); } -static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, - int fh_len, int fh_type) +static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type) { - return generic_fh_to_dentry(sb, fid, fh_len, fh_type, - kernfs_fh_get_inode); + return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false); } -static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid, - int fh_len, int fh_type) +static struct dentry *kernfs_fh_to_parent(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type) { - return generic_fh_to_parent(sb, fid, fh_len, fh_type, - kernfs_fh_get_inode); + return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true); } static struct dentry *kernfs_get_parent_dentry(struct dentry *child) @@ -120,6 +142,7 @@ static struct dentry *kernfs_get_parent_dentry(struct dentry *child) } static const struct export_operations kernfs_export_ops = { + .encode_fh = kernfs_encode_fh, .fh_to_dentry = kernfs_fh_to_dentry, .fh_to_parent = kernfs_fh_to_parent, .get_parent = kernfs_get_parent_dentry, @@ -363,18 +386,9 @@ void kernfs_kill_sb(struct super_block *sb) void __init kernfs_init(void) { - - /* - * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino - * can access the slab lock free. This could introduce stale nodes, - * please see how kernfs_find_and_get_node_by_ino filters out stale - * nodes. - */ kernfs_node_cache = kmem_cache_create("kernfs_node_cache", sizeof(struct kernfs_node), - 0, - SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, - NULL); + 0, SLAB_PANIC, NULL); /* Creates slab cache for kernfs inode attributes */ kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache", diff --git a/fs/libfs.c b/fs/libfs.c index 540611b99b9a..1463b038ffc4 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -473,8 +473,7 @@ EXPORT_SYMBOL(simple_write_begin); /** * simple_write_end - .write_end helper for non-block-device FSes - * @available: See .write_end of address_space_operations - * @file: " + * @file: See .write_end of address_space_operations * @mapping: " * @pos: " * @len: " diff --git a/fs/namespace.c b/fs/namespace.c index fe0e9e1410fe..2adfe7b166a3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2478,8 +2478,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * time64_to_tm(sb->s_time_max, 0, &tm); - pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n", - sb->s_type->name, mntpath, + pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n", + sb->s_type->name, + is_mounted(mnt) ? "remounted" : "mounted", + mntpath, tm.tm_year+1900, (unsigned long long)sb->s_time_max); free_page((unsigned long)buf); @@ -2764,14 +2766,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, if (IS_ERR(mnt)) return PTR_ERR(mnt); - error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags); - if (error < 0) { - mntput(mnt); - return error; - } - mnt_warn_timestamp_expiry(mountpoint, mnt); + error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags); + if (error < 0) + mntput(mnt); return error; } diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 071b90a45933..af549d70ec50 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -53,6 +53,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation, return false; } +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode) +{ + struct nfs_delegation *delegation; + + delegation = rcu_dereference(NFS_I(inode)->delegation); + if (nfs4_is_valid_delegation(delegation, 0)) + return delegation; + return NULL; +} + static int nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) { @@ -1181,7 +1191,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode) if (delegation != NULL && nfs4_stateid_match_other(dst, &delegation->stateid)) { dst->seqid = delegation->stateid.seqid; - return ret; + ret = true; } rcu_read_unlock(); out: diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 9eb87ae4c982..8b14d441e699 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -68,6 +68,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred); bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode); +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode); void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); int nfs4_have_delegation(struct inode *inode, fmode_t flags); int nfs4_check_delegation(struct inode *inode, fmode_t flags); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ab8ca20fd579..caacf5e7f5e1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1440,8 +1440,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, return 0; if ((delegation->type & fmode) != fmode) return 0; - if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) - return 0; switch (claim) { case NFS4_OPEN_CLAIM_NULL: case NFS4_OPEN_CLAIM_FH: @@ -1810,7 +1808,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) { struct nfs4_state *state = opendata->state; - struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; int open_mode = opendata->o_arg.open_flags; fmode_t fmode = opendata->o_arg.fmode; @@ -1827,7 +1824,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) } spin_unlock(&state->owner->so_lock); rcu_read_lock(); - delegation = rcu_dereference(nfsi->delegation); + delegation = nfs4_get_valid_delegation(state->inode); if (!can_open_delegated(delegation, fmode, claim)) { rcu_read_unlock(); break; @@ -2371,7 +2368,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) data->o_arg.open_flags, claim)) goto out_no_action; rcu_read_lock(); - delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); + delegation = nfs4_get_valid_delegation(data->state->inode); if (can_open_delegated(delegation, data->o_arg.fmode, claim)) goto unlock_no_action; rcu_read_unlock(); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 6e774c5ea13b..1c4c51f3df60 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1687,7 +1687,7 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb, spin_unlock_irqrestore(&lockres->l_lock, flags); #ifdef CONFIG_DEBUG_LOCK_ALLOC if (lockres->l_lockdep_map.key != NULL) - rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); + rwsem_release(&lockres->l_lockdep_map, caller_ip); #endif } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 2e982db3e1ae..9876db52913a 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); if (IS_ERR(transfer_to[USRQUOTA])) { status = PTR_ERR(transfer_to[USRQUOTA]); + transfer_to[USRQUOTA] = NULL; goto bail_unlock; } } @@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); if (IS_ERR(transfer_to[GRPQUOTA])) { status = PTR_ERR(transfer_to[GRPQUOTA]); + transfer_to[GRPQUOTA] = NULL; goto bail_unlock; } } @@ -2096,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) return 0; } -static int ocfs2_prepare_inode_for_refcount(struct inode *inode, - struct file *file, - loff_t pos, size_t count, - int *meta_level) +static int ocfs2_inode_lock_for_extent_tree(struct inode *inode, + struct buffer_head **di_bh, + int meta_level, + int overwrite_io, + int write_sem, + int wait) { - int ret; - struct buffer_head *di_bh = NULL; - u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; - u32 clusters = - ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos; + int ret = 0; - ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret) { - mlog_errno(ret); + if (wait) + ret = ocfs2_inode_lock(inode, NULL, meta_level); + else + ret = ocfs2_try_inode_lock(inode, + overwrite_io ? NULL : di_bh, meta_level); + if (ret < 0) goto out; + + if (wait) { + if (write_sem) + down_write(&OCFS2_I(inode)->ip_alloc_sem); + else + down_read(&OCFS2_I(inode)->ip_alloc_sem); + } else { + if (write_sem) + ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem); + else + ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem); + + if (!ret) { + ret = -EAGAIN; + goto out_unlock; + } } - *meta_level = 1; + return ret; - ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); - if (ret) - mlog_errno(ret); +out_unlock: + brelse(*di_bh); + ocfs2_inode_unlock(inode, meta_level); out: - brelse(di_bh); return ret; } +static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode, + struct buffer_head **di_bh, + int meta_level, + int write_sem) +{ + if (write_sem) + up_write(&OCFS2_I(inode)->ip_alloc_sem); + else + up_read(&OCFS2_I(inode)->ip_alloc_sem); + + brelse(*di_bh); + *di_bh = NULL; + + if (meta_level >= 0) + ocfs2_inode_unlock(inode, meta_level); +} + static int ocfs2_prepare_inode_for_write(struct file *file, loff_t pos, size_t count, int wait) { int ret = 0, meta_level = 0, overwrite_io = 0; + int write_sem = 0; struct dentry *dentry = file->f_path.dentry; struct inode *inode = d_inode(dentry); struct buffer_head *di_bh = NULL; + u32 cpos; + u32 clusters; /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ for(;;) { - if (wait) - ret = ocfs2_inode_lock(inode, NULL, meta_level); - else - ret = ocfs2_try_inode_lock(inode, - overwrite_io ? NULL : &di_bh, meta_level); + ret = ocfs2_inode_lock_for_extent_tree(inode, + &di_bh, + meta_level, + overwrite_io, + write_sem, + wait); if (ret < 0) { - meta_level = -1; if (ret != -EAGAIN) mlog_errno(ret); goto out; @@ -2154,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file, */ if (!wait && !overwrite_io) { overwrite_io = 1; - if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) { - ret = -EAGAIN; - goto out_unlock; - } ret = ocfs2_overwrite_io(inode, di_bh, pos, count); - brelse(di_bh); - di_bh = NULL; - up_read(&OCFS2_I(inode)->ip_alloc_sem); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); @@ -2181,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file, * set inode->i_size at the end of a write. */ if (should_remove_suid(dentry)) { if (meta_level == 0) { - ocfs2_inode_unlock(inode, meta_level); + ocfs2_inode_unlock_for_extent_tree(inode, + &di_bh, + meta_level, + write_sem); meta_level = 1; continue; } @@ -2195,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file, ret = ocfs2_check_range_for_refcount(inode, pos, count); if (ret == 1) { - ocfs2_inode_unlock(inode, meta_level); - meta_level = -1; - - ret = ocfs2_prepare_inode_for_refcount(inode, - file, - pos, - count, - &meta_level); + ocfs2_inode_unlock_for_extent_tree(inode, + &di_bh, + meta_level, + write_sem); + ret = ocfs2_inode_lock_for_extent_tree(inode, + &di_bh, + meta_level, + overwrite_io, + 1, + wait); + write_sem = 1; + if (ret < 0) { + if (ret != -EAGAIN) + mlog_errno(ret); + goto out; + } + + cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; + clusters = + ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos; + ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); } if (ret < 0) { - mlog_errno(ret); + if (ret != -EAGAIN) + mlog_errno(ret); goto out_unlock; } @@ -2217,10 +2265,10 @@ out_unlock: trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, pos, count, wait); - brelse(di_bh); - - if (meta_level >= 0) - ocfs2_inode_unlock(inode, meta_level); + ocfs2_inode_unlock_for_extent_tree(inode, + &di_bh, + meta_level, + write_sem); out: return ret; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index a032f0297dad..1afe57f425a0 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb) /* At this point, we know that no more recovery threads can be * launched, so wait for any recovery completion work to * complete. */ - flush_workqueue(osb->ocfs2_wq); + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); /* * Now that recovery is shut down, and the osb is about to be diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 158e5af767fd..720e9f94957e 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) struct ocfs2_dinode *alloc = NULL; cancel_delayed_work(&osb->la_enable_wq); - flush_workqueue(osb->ocfs2_wq); + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); if (osb->local_alloc_state == OCFS2_LA_UNUSED) goto out; diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d8507972ee13..90c830e3758e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, return loc->xl_ops->xlo_check_space(loc, xi); } +static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + loc->xl_ops->xlo_add_entry(loc, name_hash); + loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); + /* + * We can't leave the new entry's xe_name_offset at zero or + * add_namevalue() will go nuts. We set it to the size of our + * storage so that it can never be less than any other entry. + */ + loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); +} + static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { @@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, if (rc) goto out; - if (!loc->xl_entry) { - rc = -EINVAL; - goto out; - } - - if (ocfs2_xa_can_reuse_entry(loc, xi)) { - orig_value_size = loc->xl_entry->xe_value_size; - rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); - if (rc) - goto out; - goto alloc_value; - } + if (loc->xl_entry) { + if (ocfs2_xa_can_reuse_entry(loc, xi)) { + orig_value_size = loc->xl_entry->xe_value_size; + rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); + if (rc) + goto out; + goto alloc_value; + } - if (!ocfs2_xattr_is_local(loc->xl_entry)) { - orig_clusters = ocfs2_xa_value_clusters(loc); - rc = ocfs2_xa_value_truncate(loc, 0, ctxt); - if (rc) { - mlog_errno(rc); - ocfs2_xa_cleanup_value_truncate(loc, - "overwriting", - orig_clusters); - goto out; + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + ocfs2_xa_cleanup_value_truncate(loc, + "overwriting", + orig_clusters); + goto out; + } } - } - ocfs2_xa_wipe_namevalue(loc); + ocfs2_xa_wipe_namevalue(loc); + } else + ocfs2_xa_add_entry(loc, name_hash); /* * If we get here, we have a blank entry. Fill it. We grow our diff --git a/fs/pipe.c b/fs/pipe.c index 8a2ab2f974bd..a9149199e0e7 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -793,6 +793,8 @@ int create_pipe_files(struct file **res, int flags) } res[0]->private_data = inode->i_pipe; res[1] = f; + stream_open(inode, res[0]); + stream_open(inode, res[1]); return 0; } @@ -931,9 +933,9 @@ static int fifo_open(struct inode *inode, struct file *filp) __pipe_lock(pipe); /* We can only do regular read/write on fifos */ - filp->f_mode &= (FMODE_READ | FMODE_WRITE); + stream_open(inode, filp); - switch (filp->f_mode) { + switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) { case FMODE_READ: /* * O_RDONLY diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index ac9247371871..8c1f1bb1a5ce 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -132,9 +132,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR); show_val_kb(m, "ShmemPmdMapped: ", global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR); - show_val_kb(m, "FileHugePages: ", + show_val_kb(m, "FileHugePages: ", global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR); - show_val_kb(m, "FilePmdMapped: ", + show_val_kb(m, "FilePmdMapped: ", global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR); #endif diff --git a/fs/proc/page.c b/fs/proc/page.c index 544d1ee15aee..7c952ee732e6 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, return -EINVAL; while (count > 0) { - if (pfn_valid(pfn)) - ppage = pfn_to_page(pfn); - else - ppage = NULL; + /* + * TODO: ZONE_DEVICE support requires to identify + * memmaps that were actually initialized. + */ + ppage = pfn_to_online_page(pfn); + if (!ppage || PageSlab(ppage) || page_has_type(ppage)) pcount = 0; else @@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, return -EINVAL; while (count > 0) { - if (pfn_valid(pfn)) - ppage = pfn_to_page(pfn); - else - ppage = NULL; + /* + * TODO: ZONE_DEVICE support requires to identify + * memmaps that were actually initialized. + */ + ppage = pfn_to_online_page(pfn); if (put_user(stable_page_flags(ppage), out)) { ret = -EFAULT; @@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf, return -EINVAL; while (count > 0) { - if (pfn_valid(pfn)) - ppage = pfn_to_page(pfn); - else - ppage = NULL; + /* + * TODO: ZONE_DEVICE support requires to identify + * memmaps that were actually initialized. + */ + ppage = pfn_to_online_page(pfn); if (ppage) ino = page_cgroup_ino(ppage); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 80c305f206bb..37bdbec5b402 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -120,20 +120,23 @@ static int show_stat(struct seq_file *p, void *v) getboottime64(&boottime); for_each_possible_cpu(i) { - struct kernel_cpustat *kcs = &kcpustat_cpu(i); - - user += kcs->cpustat[CPUTIME_USER]; - nice += kcs->cpustat[CPUTIME_NICE]; - system += kcs->cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(kcs, i); - iowait += get_iowait_time(kcs, i); - irq += kcs->cpustat[CPUTIME_IRQ]; - softirq += kcs->cpustat[CPUTIME_SOFTIRQ]; - steal += kcs->cpustat[CPUTIME_STEAL]; - guest += kcs->cpustat[CPUTIME_GUEST]; - guest_nice += kcs->cpustat[CPUTIME_GUEST_NICE]; - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + user += cpustat[CPUTIME_USER]; + nice += cpustat[CPUTIME_NICE]; + system += cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(&kcpustat, i); + iowait += get_iowait_time(&kcpustat, i); + irq += cpustat[CPUTIME_IRQ]; + softirq += cpustat[CPUTIME_SOFTIRQ]; + steal += cpustat[CPUTIME_STEAL]; + guest += cpustat[CPUTIME_GUEST]; + guest_nice += cpustat[CPUTIME_USER]; + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -157,19 +160,22 @@ static int show_stat(struct seq_file *p, void *v) seq_putc(p, '\n'); for_each_online_cpu(i) { - struct kernel_cpustat *kcs = &kcpustat_cpu(i); + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kcs->cpustat[CPUTIME_USER]; - nice = kcs->cpustat[CPUTIME_NICE]; - system = kcs->cpustat[CPUTIME_SYSTEM]; - idle = get_idle_time(kcs, i); - iowait = get_iowait_time(kcs, i); - irq = kcs->cpustat[CPUTIME_IRQ]; - softirq = kcs->cpustat[CPUTIME_SOFTIRQ]; - steal = kcs->cpustat[CPUTIME_STEAL]; - guest = kcs->cpustat[CPUTIME_GUEST]; - guest_nice = kcs->cpustat[CPUTIME_GUEST_NICE]; + user = cpustat[CPUTIME_USER]; + nice = cpustat[CPUTIME_NICE]; + system = cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(&kcpustat, i); + iowait = get_iowait_time(&kcpustat, i); + irq = cpustat[CPUTIME_IRQ]; + softirq = cpustat[CPUTIME_SOFTIRQ]; + steal = cpustat[CPUTIME_STEAL]; + guest = cpustat[CPUTIME_GUEST]; + guest_nice = cpustat[CPUTIME_USER]; seq_printf(p, "cpu%d", i); seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); diff --git a/fs/readdir.c b/fs/readdir.c index 6e2623e57b2e..d26d5ea4de7b 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -105,9 +105,9 @@ EXPORT_SYMBOL(iterate_dir); */ static int verify_dirent_name(const char *name, int len) { - if (WARN_ON_ONCE(!len)) + if (!len) return -EIO; - if (WARN_ON_ONCE(memchr(name, '/', len))) + if (memchr(name, '/', len)) return -EIO; return 0; } diff --git a/fs/splice.c b/fs/splice.c index 98412721f056..e509239d7e06 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -945,12 +945,13 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, WARN_ON_ONCE(pipe->nrbufs != 0); while (len) { + unsigned int pipe_pages; size_t read_len; loff_t pos = sd->pos, prev_pos = pos; /* Don't try to read more the pipe has space for. */ - read_len = min_t(size_t, len, - (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT); + pipe_pages = pipe->buffers - pipe->nrbufs; + read_len = min(len, (size_t)pipe_pages << PAGE_SHIFT); ret = do_splice_to(in, &pos, pipe, read_len, flags); if (unlikely(ret <= 0)) goto out_release; @@ -1180,8 +1181,15 @@ static long do_splice(struct file *in, loff_t __user *off_in, pipe_lock(opipe); ret = wait_for_space(opipe, flags); - if (!ret) + if (!ret) { + unsigned int pipe_pages; + + /* Don't try to read more the pipe has space for. */ + pipe_pages = opipe->buffers - opipe->nrbufs; + len = min(len, (size_t)pipe_pages << PAGE_SHIFT); + ret = do_splice_to(in, &offset, opipe, len, flags); + } pipe_unlock(opipe); if (ret > 0) wakeup_pipe_readers(opipe); diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 39dd2b908106..e9371a8e0e26 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -366,11 +366,11 @@ struct xfs_bulkstat { uint64_t bs_blocks; /* number of blocks */ uint64_t bs_xflags; /* extended flags */ - uint64_t bs_atime; /* access time, seconds */ - uint64_t bs_mtime; /* modify time, seconds */ + int64_t bs_atime; /* access time, seconds */ + int64_t bs_mtime; /* modify time, seconds */ - uint64_t bs_ctime; /* inode change time, seconds */ - uint64_t bs_btime; /* creation time, seconds */ + int64_t bs_ctime; /* inode change time, seconds */ + int64_t bs_btime; /* creation time, seconds */ uint32_t bs_gen; /* generation count */ uint32_t bs_uid; /* user id */ |