summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c2
-rw-r--r--fs/9p/v9fs.c21
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_dir.c19
-rw-r--r--fs/9p/vfs_file.c24
-rw-r--r--fs/aio.c8
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/btrfs/backref.c39
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/check-integrity.c6
-rw-r--r--fs/btrfs/compression.c8
-rw-r--r--fs/btrfs/ctree.c85
-rw-r--r--fs/btrfs/ctree.h56
-rw-r--r--fs/btrfs/delayed-inode.c41
-rw-r--r--fs/btrfs/delayed-inode.h4
-rw-r--r--fs/btrfs/delayed-ref.c107
-rw-r--r--fs/btrfs/delayed-ref.h10
-rw-r--r--fs/btrfs/dev-replace.c64
-rw-r--r--fs/btrfs/dev-replace.h8
-rw-r--r--fs/btrfs/dir-item.c8
-rw-r--r--fs/btrfs/disk-io.c24
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-tree.c461
-rw-r--r--fs/btrfs/extent_io.c45
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/extent_map.c32
-rw-r--r--fs/btrfs/extent_map.h4
-rw-r--r--fs/btrfs/file.c45
-rw-r--r--fs/btrfs/free-space-cache.c48
-rw-r--r--fs/btrfs/inode.c147
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/qgroup.c455
-rw-r--r--fs/btrfs/qgroup.h8
-rw-r--r--fs/btrfs/ref-verify.c8
-rw-r--r--fs/btrfs/relocation.c74
-rw-r--r--fs/btrfs/scrub.c34
-rw-r--r--fs/btrfs/send.c24
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/tests/extent-io-tests.c10
-rw-r--r--fs/btrfs/tests/extent-map-tests.c4
-rw-r--r--fs/btrfs/transaction.c40
-rw-r--r--fs/btrfs/tree-checker.c14
-rw-r--r--fs/btrfs/tree-log.c91
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c117
-rw-r--r--fs/btrfs/volumes.h9
-rw-r--r--fs/buffer.c24
-rw-r--r--fs/ceph/acl.c13
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c21
-rw-r--r--fs/ceph/file.c573
-rw-r--r--fs/ceph/inode.c13
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/super.c13
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/ceph/xattr.c3
-rw-r--r--fs/cifs/cifs_debug.c17
-rw-r--r--fs/cifs/cifs_debug.h28
-rw-r--r--fs/cifs/cifs_dfs_ref.c7
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h11
-rw-r--r--fs/cifs/cifsfs.c30
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h14
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c23
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/file.c56
-rw-r--r--fs/cifs/inode.c73
-rw-r--r--fs/cifs/ioctl.c48
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb2glob.h2
-rw-r--r--fs/cifs/smb2inode.c332
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c212
-rw-r--r--fs/cifs/smb2pdu.c260
-rw-r--r--fs/cifs/smb2pdu.h13
-rw-r--r--fs/cifs/smb2proto.h28
-rw-r--r--fs/cifs/smbdirect.c38
-rw-r--r--fs/cifs/trace.h109
-rw-r--r--fs/cifs/transport.c78
-rw-r--r--fs/compat_binfmt_elf.c2
-rw-r--r--fs/compat_ioctl.c369
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/cramfs/inode.c12
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c10
-rw-r--r--fs/dax.c917
-rw-r--r--fs/dcache.c40
-rw-r--r--fs/ecryptfs/inode.c11
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext2/acl.c4
-rw-r--r--fs/ext2/ext2.h4
-rw-r--r--fs/ext2/super.c5
-rw-r--r--fs/ext4/acl.c4
-rw-r--r--fs/ext4/ext4.h26
-rw-r--r--fs/ext4/ext4_extents.h13
-rw-r--r--fs/ext4/extents.c595
-rw-r--r--fs/ext4/extents_status.c654
-rw-r--r--fs/ext4/extents_status.h80
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c144
-rw-r--r--fs/ext4/ioctl.c97
-rw-r--r--fs/ext4/mballoc.c14
-rw-r--r--fs/ext4/move_extent.c8
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/super.c81
-rw-r--r--fs/f2fs/acl.c12
-rw-r--r--fs/f2fs/acl.h5
-rw-r--r--fs/f2fs/checkpoint.c94
-rw-r--r--fs/f2fs/data.c226
-rw-r--r--fs/f2fs/debug.c35
-rw-r--r--fs/f2fs/dir.c32
-rw-r--r--fs/f2fs/extent_cache.c134
-rw-r--r--fs/f2fs/f2fs.h255
-rw-r--r--fs/f2fs/file.c196
-rw-r--r--fs/f2fs/gc.c115
-rw-r--r--fs/f2fs/gc.h5
-rw-r--r--fs/f2fs/hash.c5
-rw-r--r--fs/f2fs/inline.c10
-rw-r--r--fs/f2fs/inode.c28
-rw-r--r--fs/f2fs/namei.c57
-rw-r--r--fs/f2fs/node.c89
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c125
-rw-r--r--fs/f2fs/segment.c240
-rw-r--r--fs/f2fs/segment.h20
-rw-r--r--fs/f2fs/shrinker.c5
-rw-r--r--fs/f2fs/super.c415
-rw-r--r--fs/f2fs/sysfs.c17
-rw-r--r--fs/f2fs/trace.c5
-rw-r--r--fs/f2fs/trace.h5
-rw-r--r--fs/f2fs/xattr.c5
-rw-r--r--fs/f2fs/xattr.h5
-rw-r--r--fs/fat/dir.c6
-rw-r--r--fs/fat/fat.h4
-rw-r--r--fs/fat/file.c17
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fat/misc.c91
-rw-r--r--fs/fat/namei_msdos.c17
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fs-writeback.c25
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/control.c34
-rw-r--r--fs/fuse/dev.c221
-rw-r--r--fs/fuse/dir.c381
-rw-r--r--fs/fuse/file.c158
-rw-r--r--fs/fuse/fuse_i.h124
-rw-r--r--fs/fuse/inode.c53
-rw-r--r--fs/fuse/readdir.c569
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/bmap.c4
-rw-r--r--fs/gfs2/dir.c28
-rw-r--r--fs/gfs2/file.c18
-rw-r--r--fs/gfs2/glock.c17
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/lock_dlm.c10
-rw-r--r--fs/gfs2/log.c11
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/rgrp.c201
-rw-r--r--fs/gfs2/rgrp.h11
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/gfs2/trans.c15
-rw-r--r--fs/gfs2/util.c16
-rw-r--r--fs/gfs2/util.h2
-rw-r--r--fs/gfs2/xattr.c18
-rw-r--r--fs/hfs/brec.c5
-rw-r--r--fs/hfs/btree.c41
-rw-r--r--fs/hfs/btree.h1
-rw-r--r--fs/hfs/catalog.c16
-rw-r--r--fs/hfs/extent.c10
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/attributes.c10
-rw-r--r--fs/hfsplus/brec.c5
-rw-r--r--fs/hfsplus/btree.c44
-rw-r--r--fs/hfsplus/catalog.c24
-rw-r--r--fs/hfsplus/extents.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/inode.c4
-rw-r--r--fs/ioctl.c3
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/isofs/dir.c2
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jffs2/background.c2
-rw-r--r--fs/jffs2/super.c4
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/mount.c3
-rw-r--r--fs/kernfs/symlink.c5
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/dns_resolve.c15
-rw-r--r--fs/nfsd/cache.h20
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/netns.h8
-rw-r--r--fs/nfsd/nfs4callback.c98
-rw-r--r--fs/nfsd/nfs4idmap.c11
-rw-r--r--fs/nfsd/nfs4proc.c289
-rw-r--r--fs/nfsd/nfs4state.c41
-rw-r--r--fs/nfsd/nfs4xdr.c50
-rw-r--r--fs/nfsd/nfscache.c142
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/state.h10
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nfsd/xdr4.h28
-rw-r--r--fs/nfsd/xdr4cb.h10
-rw-r--r--fs/nilfs2/btnode.c26
-rw-r--r--fs/nilfs2/page.c29
-rw-r--r--fs/notify/fanotify/fanotify.c17
-rw-r--r--fs/notify/fanotify/fanotify.h4
-rw-r--r--fs/notify/fanotify/fanotify_user.c103
-rw-r--r--fs/notify/fdinfo.c29
-rw-r--r--fs/notify/fsnotify.c42
-rw-r--r--fs/notify/fsnotify.h11
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/notify/mark.c43
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/aops.c3
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmthread.c2
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/orangefs/acl.c4
-rw-r--r--fs/orangefs/inode.c8
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-sysfs.c2
-rw-r--r--fs/overlayfs/copy_up.c213
-rw-r--r--fs/overlayfs/dir.c34
-rw-r--r--fs/overlayfs/inode.c17
-rw-r--r--fs/overlayfs/namei.c4
-rw-r--r--fs/overlayfs/overlayfs.h14
-rw-r--r--fs/overlayfs/super.c68
-rw-r--r--fs/overlayfs/util.c46
-rw-r--r--fs/proc/base.c18
-rw-r--r--fs/proc/inode.c3
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/loadavg.c3
-rw-r--r--fs/proc/meminfo.c16
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/proc/vmcore.c38
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/pstore/inode.c11
-rw-r--r--fs/pstore/internal.h5
-rw-r--r--fs/pstore/platform.c75
-rw-r--r--fs/pstore/ram.c18
-rw-r--r--fs/pstore/ram_core.c11
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/Makefile9
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--fs/select.c20
-rw-r--r--fs/signalfd.c6
-rw-r--r--fs/stat.c3
-rw-r--r--fs/super.c2
-rw-r--r--fs/timerfd.c12
-rw-r--r--fs/udf/balloc.c30
-rw-r--r--fs/udf/super.c139
-rw-r--r--fs/udf/udf_sb.h10
-rw-r--r--fs/userfaultfd.c8
-rw-r--r--fs/utimes.c73
-rw-r--r--fs/xfs/libxfs/xfs_attr.c236
-rw-r--r--fs/xfs/libxfs/xfs_attr.h (renamed from fs/xfs/xfs_attr.h)2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c70
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_format.h8
-rw-r--r--fs/xfs/libxfs/xfs_sb.c5
-rw-r--r--fs/xfs/scrub/repair.c128
-rw-r--r--fs/xfs/scrub/scrub.c13
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_aops.h14
-rw-r--r--fs/xfs/xfs_bmap_util.c61
-rw-r--r--fs/xfs/xfs_buf.c109
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_fsops.c50
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_iomap.c53
-rw-r--r--fs/xfs/xfs_reflink.c33
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_stats.c52
-rw-r--r--fs/xfs/xfs_stats.h28
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_trans.h1
-rw-r--r--fs/xfs/xfs_trans_ail.c28
-rw-r--r--fs/xfs/xfs_trans_buf.c42
292 files changed, 9767 insertions, 5135 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 082d227fa56b..6261719f6f2a 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -276,7 +276,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
switch (handler->flags) {
case ACL_TYPE_ACCESS:
if (acl) {
- struct iattr iattr;
+ struct iattr iattr = { 0 };
struct posix_acl *old_acl = acl;
retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 89bac3d2f05b..619128b55837 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -61,6 +61,8 @@ enum {
Opt_cache_loose, Opt_fscache, Opt_mmap,
/* Access options */
Opt_access, Opt_posixacl,
+ /* Lock timeout option */
+ Opt_locktimeout,
/* Error token */
Opt_err
};
@@ -80,6 +82,7 @@ static const match_table_t tokens = {
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
+ {Opt_locktimeout, "locktimeout=%u"},
{Opt_err, NULL}
};
@@ -187,6 +190,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#ifdef CONFIG_9P_FSCACHE
v9ses->cachetag = NULL;
#endif
+ v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
if (!opts)
return 0;
@@ -359,6 +363,23 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#endif
break;
+ case Opt_locktimeout:
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
+ if (option < 1) {
+ p9_debug(P9_DEBUG_ERROR,
+ "locktimeout must be a greater than zero integer.\n");
+ ret = -EINVAL;
+ continue;
+ }
+ v9ses->session_lock_timeout = (long)option * HZ;
+ break;
+
default:
continue;
}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 982e017acadb..129e5243a6bf 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,6 +116,7 @@ struct v9fs_session_info {
struct p9_client *clnt; /* 9p client */
struct list_head slist; /* list of sessions registered with v9fs */
struct rw_semaphore rename_sem;
+ long session_lock_timeout; /* retry interval for blocking locks */
};
/* cache_validity flags */
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index d5db3c968a03..00745147329d 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat)
return rettype;
}
-static void p9stat_init(struct p9_wstat *stbuf)
-{
- stbuf->name = NULL;
- stbuf->uid = NULL;
- stbuf->gid = NULL;
- stbuf->muid = NULL;
- stbuf->extension = NULL;
-}
-
/**
* v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
* @filp: opened file structure
@@ -114,7 +105,6 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
int err = 0;
struct p9_fid *fid;
int buflen;
- int reclen = 0;
struct p9_rdir *rdir;
struct kvec kvec;
@@ -145,15 +135,12 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
rdir->tail = n;
}
while (rdir->head < rdir->tail) {
- p9stat_init(&st);
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
rdir->tail - rdir->head, &st);
- if (err) {
+ if (err <= 0) {
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
- p9stat_free(&st);
return -EIO;
}
- reclen = st.size+2;
over = !dir_emit(ctx, st.name, strlen(st.name),
v9fs_qid2ino(&st.qid), dt_type(&st));
@@ -161,8 +148,8 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
if (over)
return 0;
- rdir->head += reclen;
- ctx->pos += reclen;
+ rdir->head += err;
+ ctx->pos += err;
}
}
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 5f2e48d41d72..a25efa782fcc 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -154,6 +154,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
uint8_t status = P9_LOCK_ERROR;
int res = 0;
unsigned char fl_type;
+ struct v9fs_session_info *v9ses;
fid = filp->private_data;
BUG_ON(fid == NULL);
@@ -189,6 +190,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
if (IS_SETLKW(cmd))
flock.flags = P9_LOCK_FLAGS_BLOCK;
+ v9ses = v9fs_inode2v9ses(file_inode(filp));
+
/*
* if its a blocked request and we get P9_LOCK_BLOCKED as the status
* for lock request, keep on trying
@@ -202,8 +205,17 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
break;
if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
break;
- if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+ if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
+ != 0)
break;
+ /*
+ * p9_client_lock_dotl overwrites flock.client_id with the
+ * server message, free and reuse the client name
+ */
+ if (flock.client_id != fid->clnt->name) {
+ kfree(flock.client_id);
+ flock.client_id = fid->clnt->name;
+ }
}
/* map 9p status to VFS status */
@@ -216,7 +228,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
break;
default:
WARN_ONCE(1, "unknown lock status code: %d\n", status);
- /* fallthough */
+ /* fall through */
case P9_LOCK_ERROR:
case P9_LOCK_GRACE:
res = -ENOLCK;
@@ -235,6 +247,8 @@ out_unlock:
locks_lock_file_wait(filp, fl);
fl->fl_type = fl_type;
}
+ if (flock.client_id != fid->clnt->name)
+ kfree(flock.client_id);
out:
return res;
}
@@ -269,7 +283,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
res = p9_client_getlock_dotl(fid, &glock);
if (res < 0)
- return res;
+ goto out;
/* map 9p lock type to os lock type */
switch (glock.type) {
case P9_LOCK_TYPE_RDLCK:
@@ -290,7 +304,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
fl->fl_end = glock.start + glock.length - 1;
fl->fl_pid = -glock.proc_id;
}
- kfree(glock.client_id);
+out:
+ if (glock.client_id != fid->clnt->name)
+ kfree(glock.client_id);
return res;
}
diff --git a/fs/aio.c b/fs/aio.c
index b9350f3360c6..301e6314183b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2135,12 +2135,12 @@ COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout)
+ struct old_timespec32 __user *, timeout)
{
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
@@ -2160,7 +2160,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout,
+ struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
@@ -2168,7 +2168,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index efae2fb0930a..54207327f98f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1580,7 +1580,7 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
- const siginfo_t *siginfo)
+ const kernel_siginfo_t *siginfo)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -1782,7 +1782,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- const siginfo_t *siginfo, struct pt_regs *regs)
+ const kernel_siginfo_t *siginfo, struct pt_regs *regs)
{
struct task_struct *dump_task = current;
const struct user_regset_view *view = task_user_regset_view(dump_task);
@@ -2031,7 +2031,7 @@ static int elf_note_info_init(struct elf_note_info *info)
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- const siginfo_t *siginfo, struct pt_regs *regs)
+ const kernel_siginfo_t *siginfo, struct pt_regs *regs)
{
struct list_head *t;
struct core_thread *ct;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ae750b1574a2..68ebe188446a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -112,11 +112,11 @@ static int find_extent_in_eb(const struct extent_buffer *eb,
}
struct preftree {
- struct rb_root root;
+ struct rb_root_cached root;
unsigned int count;
};
-#define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
+#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
struct preftrees {
struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
@@ -225,14 +225,15 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
struct prelim_ref *newref,
struct share_check *sc)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct prelim_ref *ref;
int result;
+ bool leftmost = true;
root = &preftree->root;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p) {
parent = *p;
@@ -242,6 +243,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
p = &(*p)->rb_left;
} else if (result > 0) {
p = &(*p)->rb_right;
+ leftmost = false;
} else {
/* Identical refs, merge them and free @newref */
struct extent_inode_elem *eie = ref->inode_list;
@@ -272,7 +274,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
preftree->count++;
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
rb_link_node(&newref->rbnode, parent, p);
- rb_insert_color(&newref->rbnode, root);
+ rb_insert_color_cached(&newref->rbnode, root, leftmost);
}
/*
@@ -283,11 +285,11 @@ static void prelim_release(struct preftree *preftree)
{
struct prelim_ref *ref, *next_ref;
- rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
- rbnode)
+ rbtree_postorder_for_each_entry_safe(ref, next_ref,
+ &preftree->root.rb_root, rbnode)
free_pref(ref);
- preftree->root = RB_ROOT;
+ preftree->root = RB_ROOT_CACHED;
preftree->count = 0;
}
@@ -627,7 +629,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
* freeing the entire indirect tree when we're done. In some test
* cases, the tree can grow quite large (~200k objects).
*/
- while ((rnode = rb_first(&preftrees->indirect.root))) {
+ while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
struct prelim_ref *ref;
ref = rb_entry(rnode, struct prelim_ref, rbnode);
@@ -637,7 +639,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
goto out;
}
- rb_erase(&ref->rbnode, &preftrees->indirect.root);
+ rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
preftrees->indirect.count--;
if (ref->count == 0) {
@@ -717,9 +719,9 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
struct preftree *tree = &preftrees->indirect_missing_keys;
struct rb_node *node;
- while ((node = rb_first(&tree->root))) {
+ while ((node = rb_first_cached(&tree->root))) {
ref = rb_entry(node, struct prelim_ref, rbnode);
- rb_erase(node, &tree->root);
+ rb_erase_cached(node, &tree->root);
BUG_ON(ref->parent); /* should not be a direct ref */
BUG_ON(ref->key_for_search.type);
@@ -769,7 +771,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
spin_lock(&head->lock);
- for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+ for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
ref_node);
if (node->seq > seq)
@@ -1229,14 +1231,14 @@ again:
if (ret)
goto out;
- WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
+ WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
extent_item_pos, total_refs, sc, ignore_offset);
if (ret)
goto out;
- WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
+ WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
/*
* This walks the tree of merged and resolved refs. Tree blocks are
@@ -1245,7 +1247,7 @@ again:
*
* We release the entire tree in one go before returning.
*/
- node = rb_first(&preftrees.direct.root);
+ node = rb_first_cached(&preftrees.direct.root);
while (node) {
ref = rb_entry(node, struct prelim_ref, rbnode);
node = rb_next(&ref->rbnode);
@@ -1468,7 +1470,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
struct seq_list elem = SEQ_LIST_INIT(elem);
int ret = 0;
struct share_check shared = {
- .root_objectid = root->objectid,
+ .root_objectid = root->root_key.objectid,
.inum = inum,
.share_count = 0,
};
@@ -2031,7 +2033,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
/* path must be released before calling iterate()! */
btrfs_debug(fs_root->fs_info,
"following ref at offset %u for inode %llu in tree %llu",
- cur, found_key.objectid, fs_root->objectid);
+ cur, found_key.objectid,
+ fs_root->root_key.objectid);
ret = iterate(parent, name_len,
(unsigned long)(iref + 1), eb, ctx);
if (ret)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1343ac57b438..97d91e55b70a 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -206,7 +206,7 @@ static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
static inline unsigned long btrfs_inode_hash(u64 objectid,
const struct btrfs_root *root)
{
- u64 h = objectid ^ (root->objectid * GOLDEN_RATIO_PRIME);
+ u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
#if BITS_PER_LONG == 32
h = (h >> 32) ^ (h & 0xffffffff);
@@ -339,15 +339,15 @@ static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
struct btrfs_root *root = inode->root;
/* Output minus objectid, which is more meaningful */
- if (root->objectid >= BTRFS_LAST_FREE_OBJECTID)
+ if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
btrfs_warn_rl(root->fs_info,
"csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d",
- root->objectid, btrfs_ino(inode),
+ root->root_key.objectid, btrfs_ino(inode),
logical_start, csum, csum_expected, mirror_num);
else
btrfs_warn_rl(root->fs_info,
"csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d",
- root->objectid, btrfs_ino(inode),
+ root->root_key.objectid, btrfs_ino(inode),
logical_start, csum, csum_expected, mirror_num);
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 833cf3c35b4d..2e43fba44035 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1594,6 +1594,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
{
unsigned int num_pages;
unsigned int i;
+ size_t size;
u64 dev_bytenr;
int ret;
@@ -1608,9 +1609,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
PAGE_SHIFT;
- block_ctx->mem_to_free = kcalloc(sizeof(*block_ctx->datav) +
- sizeof(*block_ctx->pagev),
- num_pages, GFP_NOFS);
+ size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev);
+ block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS);
if (!block_ctx->mem_to_free)
return -ENOMEM;
block_ctx->datav = block_ctx->mem_to_free;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9bfa66592aa7..2955a4ea2fa8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -437,10 +437,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index)
break;
- rcu_read_lock();
- page = radix_tree_lookup(&mapping->i_pages, pg_index);
- rcu_read_unlock();
- if (page && !radix_tree_exceptional_entry(page)) {
+ page = xa_load(&mapping->i_pages, pg_index);
+ if (page && !xa_is_value(page)) {
misses++;
if (misses > 4)
break;
@@ -528,7 +526,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
unsigned long compressed_len;
@@ -545,7 +542,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int faili = 0;
u32 *sums;
- tree = &BTRFS_I(inode)->io_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
/* we need the actual starting offset of this extent in the file */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d436fb4c002e..539901fb5165 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -52,42 +52,6 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
}
}
-/*
- * reset all the locked nodes in the patch to spinning locks.
- *
- * held is used to keep lockdep happy, when lockdep is enabled
- * we set held to a blocking lock before we go around and
- * retake all the spinlocks in the path. You can safely use NULL
- * for held
- */
-noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw)
-{
- int i;
-
- if (held) {
- btrfs_set_lock_blocking_rw(held, held_rw);
- if (held_rw == BTRFS_WRITE_LOCK)
- held_rw = BTRFS_WRITE_LOCK_BLOCKING;
- else if (held_rw == BTRFS_READ_LOCK)
- held_rw = BTRFS_READ_LOCK_BLOCKING;
- }
- btrfs_set_path_blocking(p);
-
- for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
- if (p->nodes[i] && p->locks[i]) {
- btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
- if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
- p->locks[i] = BTRFS_WRITE_LOCK;
- else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
- p->locks[i] = BTRFS_READ_LOCK;
- }
- }
-
- if (held)
- btrfs_clear_lock_blocking_rw(held, held_rw);
-}
-
/* this also releases the path */
void btrfs_free_path(struct btrfs_path *p)
{
@@ -207,7 +171,7 @@ static void add_root_to_dirty_list(struct btrfs_root *root)
spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */
- if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list,
&fs_info->dirty_cowonly_roots);
else
@@ -1050,9 +1014,26 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
+ /*
+ * If we are COWing a node/leaf from the extent, chunk or device trees,
+ * make sure that we do not finish block group creation of pending block
+ * groups. We do this to avoid a deadlock.
+ * COWing can result in allocation of a new chunk, and flushing pending
+ * block groups (btrfs_create_pending_block_groups()) can be triggered
+ * when finishing allocation of a new chunk. Creation of a pending block
+ * group modifies the extent, chunk and device trees, therefore we could
+ * deadlock with ourselves since we are holding a lock on an extent
+ * buffer that btrfs_create_pending_block_groups() may try to COW later.
+ */
+ if (root == fs_info->extent_root ||
+ root == fs_info->chunk_root ||
+ root == fs_info->dev_root)
+ trans->can_flush_pending_bgs = false;
+
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
search_start, empty_size);
+ trans->can_flush_pending_bgs = true;
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -1306,7 +1287,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
}
}
- btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
@@ -1815,8 +1795,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
int orig_slot = path->slots[level];
u64 orig_ptr;
- if (level == 0)
- return 0;
+ ASSERT(level > 0);
mid = path->nodes[level];
@@ -2483,7 +2462,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(sret > 0);
if (sret) {
@@ -2504,7 +2482,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
if (sret) {
ret = sret;
@@ -2789,7 +2766,10 @@ again:
}
cow_done:
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
+ /*
+ * Leave path with blocking locks to avoid massive
+ * lock context switch, this is made on purpose.
+ */
/*
* we have a lock on b and as long as we aren't changing
@@ -2871,8 +2851,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_WRITE_LOCK);
}
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
@@ -2880,8 +2858,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
p->locks[level] = BTRFS_READ_LOCK;
}
@@ -2900,7 +2876,6 @@ cow_done:
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(err > 0);
if (err) {
@@ -2910,7 +2885,7 @@ cow_done:
}
if (!p->search_for_split)
unlock_up(p, level, lowest_unlock,
- min_write_lock_level, &write_lock_level);
+ min_write_lock_level, NULL);
goto done;
}
}
@@ -2961,13 +2936,16 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
again:
b = get_old_root(root, time_seq);
+ if (!b) {
+ ret = -EIO;
+ goto done;
+ }
level = btrfs_header_level(b);
p->locks[level] = BTRFS_READ_LOCK;
while (b) {
level = btrfs_header_level(b);
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
/*
* we have a lock on b and as long as we aren't changing
@@ -3013,8 +2991,6 @@ again:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) {
@@ -5198,7 +5174,6 @@ find_next_key:
path->locks[level - 1] = BTRFS_READ_LOCK;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1, 0, NULL);
- btrfs_clear_path_blocking(path, NULL, 0);
}
out:
path->keep_locks = keep_locks;
@@ -5783,8 +5758,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
@@ -5820,8 +5793,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2cddfe7806a4..68ca41dbbef3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -41,12 +41,6 @@ extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-#define STATIC noinline
-#else
-#define STATIC static noinline
-#endif
-
#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
#define BTRFS_MAX_MIRRORS 3
@@ -367,11 +361,13 @@ struct btrfs_dev_replace {
struct mutex lock_finishing_cancel_unmount;
rwlock_t lock;
- atomic_t read_locks;
atomic_t blocking_readers;
wait_queue_head_t read_lock_wq;
struct btrfs_scrub_progress scrub_progress;
+
+ struct percpu_counter bio_counter;
+ wait_queue_head_t replace_wait;
};
/* For raid type sysfs entries */
@@ -1094,9 +1090,6 @@ struct btrfs_fs_info {
/* device replace state */
struct btrfs_dev_replace dev_replace;
- struct percpu_counter bio_counter;
- wait_queue_head_t replace_wait;
-
struct semaphore uuid_tree_rescan_sem;
/* Used to reclaim the metadata space in the background. */
@@ -1202,18 +1195,12 @@ struct btrfs_root {
int last_log_commit;
pid_t log_start_pid;
- u64 objectid;
u64 last_trans;
u32 type;
u64 highest_objectid;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */
- u64 alloc_bytenr;
-#endif
-
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
@@ -1286,6 +1273,10 @@ struct btrfs_root {
spinlock_t qgroup_meta_rsv_lock;
u64 qgroup_meta_rsv_pertrans;
u64 qgroup_meta_rsv_prealloc;
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ u64 alloc_bytenr;
+#endif
};
struct btrfs_file_private {
@@ -2607,10 +2598,8 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@@ -2771,7 +2760,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
- int update_size);
+ bool update_size);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
@@ -2877,8 +2866,6 @@ void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
-void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3021,8 +3008,7 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
/* dir-item.c */
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
const char *name, int name_len);
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
int name_len, struct btrfs_inode *dir,
struct btrfs_key *location, u8 type, u64 index);
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
@@ -3180,8 +3166,8 @@ void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset,
- u64 start, u64 end, int create);
+ struct page *page, size_t pg_offset,
+ u64 start, u64 end, int create);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
@@ -3201,9 +3187,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
extern const struct dentry_operations btrfs_dentry_operations;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-void btrfs_test_inode_set_ops(struct inode *inode);
-#endif
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -3716,18 +3699,19 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
/* Sanity test specific functions */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_inode_set_ops(struct inode *inode);
void btrfs_test_destroy_inode(struct inode *inode);
-#endif
static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
{
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
- &fs_info->fs_state)))
- return 1;
-#endif
+ return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+}
+#else
+static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
+{
return 0;
}
+#endif
static inline void cond_wake_up(struct wait_queue_head *wq)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index f51b509f2d9b..c669f250d4a0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -42,8 +42,8 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root;
delayed_node->inode_id = inode_id;
refcount_set(&delayed_node->refs, 0);
- delayed_node->ins_root = RB_ROOT;
- delayed_node->del_root = RB_ROOT;
+ delayed_node->ins_root = RB_ROOT_CACHED;
+ delayed_node->del_root = RB_ROOT_CACHED;
mutex_init(&delayed_node->mutex);
INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list);
@@ -390,7 +390,7 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node,
struct btrfs_key *key)
{
- return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
NULL, NULL);
}
@@ -400,9 +400,10 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
{
struct rb_node **p, *node;
struct rb_node *parent_node = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct btrfs_delayed_item *item;
int cmp;
+ bool leftmost = true;
if (action == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
@@ -410,7 +411,7 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
root = &delayed_node->del_root;
else
BUG();
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
node = &ins->rb_node;
while (*p) {
@@ -419,16 +420,18 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
rb_node);
cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
- if (cmp < 0)
+ if (cmp < 0) {
p = &(*p)->rb_right;
- else if (cmp > 0)
+ leftmost = false;
+ } else if (cmp > 0) {
p = &(*p)->rb_left;
- else
+ } else {
return -EEXIST;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
ins->delayed_node = delayed_node;
ins->ins_or_del = action;
@@ -468,7 +471,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
@@ -482,7 +485,7 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
else
root = &delayed_item->delayed_node->del_root;
- rb_erase(&delayed_item->rb_node, root);
+ rb_erase_cached(&delayed_item->rb_node, root);
delayed_item->delayed_node->count--;
finish_one_item(delayed_root);
@@ -503,7 +506,7 @@ static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
struct rb_node *p;
struct btrfs_delayed_item *item = NULL;
- p = rb_first(&delayed_node->ins_root);
+ p = rb_first_cached(&delayed_node->ins_root);
if (p)
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
@@ -516,7 +519,7 @@ static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
struct rb_node *p;
struct btrfs_delayed_item *item = NULL;
- p = rb_first(&delayed_node->del_root);
+ p = rb_first_cached(&delayed_node->del_root);
if (p)
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
@@ -559,7 +562,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
* reserved space when starting a transaction. So no need to reserve
* qgroup space here.
*/
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid,
@@ -647,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata(
return ret;
}
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1);
@@ -762,9 +765,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
i++;
}
- /* reset all the locked nodes in the patch to spinning locks. */
- btrfs_clear_path_blocking(path, NULL, 0);
-
/* insert the keys of the items */
setup_items_for_insert(root, path, keys, data_size,
total_data_size, total_size, nitems);
@@ -1462,7 +1462,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->objectid,
+ name_len, name, delayed_node->root->root_key.objectid,
delayed_node->inode_id, ret);
BUG();
}
@@ -1533,7 +1533,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- index, node->root->objectid, node->inode_id, ret);
+ index, node->root->root_key.objectid,
+ node->inode_id, ret);
BUG();
}
mutex_unlock(&node->mutex);
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 33536cd681d4..74ae226ffaf0 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -50,8 +50,8 @@ struct btrfs_delayed_node {
* is waiting to be dealt with by the async worker.
*/
struct list_head p_list;
- struct rb_root ins_root;
- struct rb_root del_root;
+ struct rb_root_cached ins_root;
+ struct rb_root_cached del_root;
struct mutex mutex;
struct btrfs_inode_item inode_item;
refcount_t refs;
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 62ff545ba1f7..9301b3ad9217 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -101,14 +101,15 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
}
/* insert a new ref to head ref rbtree */
-static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
struct rb_node *node)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent_node = NULL;
struct btrfs_delayed_ref_head *entry;
struct btrfs_delayed_ref_head *ins;
u64 bytenr;
+ bool leftmost = true;
ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
bytenr = ins->bytenr;
@@ -117,26 +118,29 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
href_node);
- if (bytenr < entry->bytenr)
+ if (bytenr < entry->bytenr) {
p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
+ } else if (bytenr > entry->bytenr) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return entry;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
return NULL;
}
-static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
struct btrfs_delayed_ref_node *ins)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *node = &ins->ref_node;
struct rb_node *parent_node = NULL;
struct btrfs_delayed_ref_node *entry;
+ bool leftmost = true;
while (*p) {
int comp;
@@ -145,29 +149,46 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
ref_node);
comp = comp_refs(ins, entry, true);
- if (comp < 0)
+ if (comp < 0) {
p = &(*p)->rb_left;
- else if (comp > 0)
+ } else if (comp > 0) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return entry;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
return NULL;
}
+static struct btrfs_delayed_ref_head *find_first_ref_head(
+ struct btrfs_delayed_ref_root *dr)
+{
+ struct rb_node *n;
+ struct btrfs_delayed_ref_head *entry;
+
+ n = rb_first_cached(&dr->href_root);
+ if (!n)
+ return NULL;
+
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
+
+ return entry;
+}
+
/*
- * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot.
- * If return_bigger is given, the next bigger entry is returned if no exact
- * match is found.
+ * Find a head entry based on bytenr. This returns the delayed ref head if it
+ * was able to find one, or NULL if nothing was in that spot. If return_bigger
+ * is given, the next bigger entry is returned if no exact match is found.
*/
-static struct btrfs_delayed_ref_head *
-find_ref_head(struct rb_root *root, u64 bytenr,
- int return_bigger)
+static struct btrfs_delayed_ref_head *find_ref_head(
+ struct btrfs_delayed_ref_root *dr, u64 bytenr,
+ bool return_bigger)
{
+ struct rb_root *root = &dr->href_root.rb_root;
struct rb_node *n;
struct btrfs_delayed_ref_head *entry;
@@ -187,22 +208,18 @@ find_ref_head(struct rb_root *root, u64 bytenr,
if (bytenr > entry->bytenr) {
n = rb_next(&entry->href_node);
if (!n)
- n = rb_first(root);
+ return NULL;
entry = rb_entry(n, struct btrfs_delayed_ref_head,
href_node);
- return entry;
}
return entry;
}
return NULL;
}
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
- struct btrfs_delayed_ref_root *delayed_refs;
-
- delayed_refs = &trans->transaction->delayed_refs;
lockdep_assert_held(&delayed_refs->lock);
if (mutex_trylock(&head->mutex))
return 0;
@@ -227,7 +244,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref)
{
lockdep_assert_held(&head->lock);
- rb_erase(&ref->ref_node, &head->ref_tree);
+ rb_erase_cached(&ref->ref_node, &head->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -296,7 +313,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
lockdep_assert_held(&head->lock);
- if (RB_EMPTY_ROOT(&head->ref_tree))
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
return;
/* We don't have too many refs to merge for data. */
@@ -314,7 +331,8 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&fs_info->tree_mod_seq_lock);
again:
- for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&head->ref_tree); node;
+ node = rb_next(node)) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
continue;
@@ -345,40 +363,29 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
return ret;
}
-struct btrfs_delayed_ref_head *
-btrfs_select_ref_head(struct btrfs_trans_handle *trans)
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ struct btrfs_delayed_ref_root *delayed_refs)
{
- struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *head;
- u64 start;
- bool loop = false;
-
- delayed_refs = &trans->transaction->delayed_refs;
again:
- start = delayed_refs->run_delayed_start;
- head = find_ref_head(&delayed_refs->href_root, start, 1);
- if (!head && !loop) {
+ head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
+ true);
+ if (!head && delayed_refs->run_delayed_start != 0) {
delayed_refs->run_delayed_start = 0;
- start = 0;
- loop = true;
- head = find_ref_head(&delayed_refs->href_root, start, 1);
- if (!head)
- return NULL;
- } else if (!head && loop) {
- return NULL;
+ head = find_first_ref_head(delayed_refs);
}
+ if (!head)
+ return NULL;
while (head->processing) {
struct rb_node *node;
node = rb_next(&head->href_node);
if (!node) {
- if (loop)
+ if (delayed_refs->run_delayed_start == 0)
return NULL;
delayed_refs->run_delayed_start = 0;
- start = 0;
- loop = true;
goto again;
}
head = rb_entry(node, struct btrfs_delayed_ref_head,
@@ -569,7 +576,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
- head_ref->ref_tree = RB_ROOT;
+ head_ref->ref_tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
@@ -903,7 +910,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
{
- return find_ref_head(&delayed_refs->href_root, bytenr, 0);
+ return find_ref_head(delayed_refs, bytenr, false);
}
void __cold btrfs_delayed_ref_exit(void)
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index d9f2a4ebd5db..8e20c5cb5404 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -79,7 +79,7 @@ struct btrfs_delayed_ref_head {
struct mutex mutex;
spinlock_t lock;
- struct rb_root ref_tree;
+ struct rb_root_cached ref_tree;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
@@ -148,7 +148,7 @@ struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_root {
/* head ref rbtree */
- struct rb_root href_root;
+ struct rb_root_cached href_root;
/* dirty extent records */
struct rb_root dirty_extent_root;
@@ -255,7 +255,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
u64 bytenr);
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
@@ -263,8 +263,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
}
-struct btrfs_delayed_ref_head *
-btrfs_select_ref_head(struct btrfs_trans_handle *trans);
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ struct btrfs_delayed_ref_root *delayed_refs);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index dec01970d8c5..2aa48aecc52b 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -382,14 +382,6 @@ out:
return ret;
}
-void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
-
- dev_replace->committed_cursor_left =
- dev_replace->cursor_left_last_write_of_item;
-}
-
static char* btrfs_dev_name(struct btrfs_device *device)
{
if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
@@ -408,11 +400,12 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
int ret;
struct btrfs_device *tgt_device = NULL;
struct btrfs_device *src_device = NULL;
+ bool need_unlock;
- ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
- srcdev_name, &src_device);
- if (ret)
- return ret;
+ src_device = btrfs_find_device_by_devspec(fs_info, srcdevid,
+ srcdev_name);
+ if (IS_ERR(src_device))
+ return PTR_ERR(src_device);
ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
@@ -432,6 +425,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return PTR_ERR(trans);
}
+ need_unlock = true;
btrfs_dev_replace_write_lock(dev_replace);
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
@@ -440,6 +434,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ ASSERT(0);
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
goto leave;
}
@@ -470,6 +465,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
atomic64_set(&dev_replace->num_write_errors, 0);
atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
btrfs_dev_replace_write_unlock(dev_replace);
+ need_unlock = false;
ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
if (ret)
@@ -481,7 +477,12 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ need_unlock = true;
btrfs_dev_replace_write_lock(dev_replace);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
goto leave;
}
@@ -503,9 +504,8 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret;
leave:
- dev_replace->srcdev = NULL;
- dev_replace->tgtdev = NULL;
- btrfs_dev_replace_write_unlock(dev_replace);
+ if (need_unlock)
+ btrfs_dev_replace_write_unlock(dev_replace);
btrfs_destroy_dev_replace_tgtdev(tgt_device);
return ret;
}
@@ -545,8 +545,8 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
{
set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
- wait_event(fs_info->replace_wait, !percpu_counter_sum(
- &fs_info->bio_counter));
+ wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum(
+ &fs_info->dev_replace.bio_counter));
}
/*
@@ -555,7 +555,7 @@ static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
{
clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
- wake_up(&fs_info->replace_wait);
+ wake_up(&fs_info->dev_replace.replace_wait);
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
@@ -961,13 +961,10 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace)
{
read_lock(&dev_replace->lock);
- atomic_inc(&dev_replace->read_locks);
}
void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace)
{
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
- atomic_dec(&dev_replace->read_locks);
read_unlock(&dev_replace->lock);
}
@@ -985,7 +982,6 @@ again:
void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace)
{
- ASSERT(atomic_read(&dev_replace->blocking_readers) == 0);
write_unlock(&dev_replace->lock);
}
@@ -994,45 +990,31 @@ void btrfs_dev_replace_set_lock_blocking(
struct btrfs_dev_replace *dev_replace)
{
/* only set blocking for read lock */
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
atomic_inc(&dev_replace->blocking_readers);
read_unlock(&dev_replace->lock);
}
-/* acquire read lock and dec blocking cnt */
-void btrfs_dev_replace_clear_lock_blocking(
- struct btrfs_dev_replace *dev_replace)
-{
- /* only set blocking for read lock */
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
- ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
- read_lock(&dev_replace->lock);
- /* Barrier implied by atomic_dec_and_test */
- if (atomic_dec_and_test(&dev_replace->blocking_readers))
- cond_wake_up_nomb(&dev_replace->read_lock_wq);
-}
-
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
{
- percpu_counter_inc(&fs_info->bio_counter);
+ percpu_counter_inc(&fs_info->dev_replace.bio_counter);
}
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
{
- percpu_counter_sub(&fs_info->bio_counter, amount);
- cond_wake_up_nomb(&fs_info->replace_wait);
+ percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount);
+ cond_wake_up_nomb(&fs_info->dev_replace.replace_wait);
}
void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
{
while (1) {
- percpu_counter_inc(&fs_info->bio_counter);
+ percpu_counter_inc(&fs_info->dev_replace.bio_counter);
if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
&fs_info->fs_state)))
break;
btrfs_bio_counter_dec(fs_info);
- wait_event(fs_info->replace_wait,
+ wait_event(fs_info->dev_replace.replace_wait,
!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
&fs_info->fs_state));
}
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index b6d4206188bb..795c551f5b5e 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -11,7 +11,6 @@ struct btrfs_ioctl_dev_replace_args;
int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
@@ -28,12 +27,5 @@ void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_clear_lock_blocking(
- struct btrfs_dev_replace *dev_replace);
-
-static inline void btrfs_dev_replace_stats_inc(atomic64_t *stat_value)
-{
- atomic64_inc(stat_value);
-}
#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index a678b07fcf01..8de74d835dba 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -105,13 +105,13 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* to use for the second index (if one is created).
* Will return 0 or -ENOMEM
*/
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, const char *name, int name_len,
- struct btrfs_inode *dir, struct btrfs_key *location,
- u8 type, u64 index)
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+ int name_len, struct btrfs_inode *dir,
+ struct btrfs_key *location, u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
+ struct btrfs_root *root = dir->root;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
struct extent_buffer *leaf;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 05dc3c17cb62..b0ab41da91d1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -125,8 +125,8 @@ struct async_submit_bio {
* Different roots are used for different purposes and may nest inside each
* other and they require separate keysets. As lockdep keys should be
* static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->objectid. This ensures that all special purpose roots
- * have separate keysets.
+ * by btrfs_root->root_key.objectid. This ensures that all special purpose
+ * roots have separate keysets.
*
* Lock-nesting across peer nodes is always done with the immediate parent
* node locked thus preventing deadlock. As lockdep doesn't know this, use
@@ -1148,7 +1148,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->state = 0;
root->orphan_cleanup_state = 0;
- root->objectid = objectid;
root->last_trans = 0;
root->highest_objectid = 0;
root->nr_delalloc_inodes = 0;
@@ -2156,9 +2155,8 @@ static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
rwlock_init(&fs_info->dev_replace.lock);
- atomic_set(&fs_info->dev_replace.read_locks, 0);
atomic_set(&fs_info->dev_replace.blocking_readers, 0);
- init_waitqueue_head(&fs_info->replace_wait);
+ init_waitqueue_head(&fs_info->dev_replace.replace_wait);
init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
}
@@ -2648,7 +2646,8 @@ int open_ctree(struct super_block *sb,
goto fail_dirty_metadata_bytes;
}
- ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
+ ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
+ GFP_KERNEL);
if (ret) {
err = ret;
goto fail_delalloc_bytes;
@@ -3309,7 +3308,7 @@ fail_iput:
iput(fs_info->btree_inode);
fail_bio_counter:
- percpu_counter_destroy(&fs_info->bio_counter);
+ percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
fail_delalloc_bytes:
percpu_counter_destroy(&fs_info->delalloc_bytes);
fail_dirty_metadata_bytes:
@@ -3977,6 +3976,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
+ ASSERT(list_empty(&fs_info->delayed_iputs));
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
btrfs_free_qgroup_config(fs_info);
@@ -4018,7 +4018,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
- percpu_counter_destroy(&fs_info->bio_counter);
+ percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
cleanup_srcu_struct(&fs_info->subvol_srcu);
btrfs_free_stripe_hash_table(fs_info);
@@ -4204,7 +4204,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
return ret;
}
- while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
struct rb_node *n;
bool pin_bytes = false;
@@ -4222,11 +4222,11 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
continue;
}
spin_lock(&head->lock);
- while ((n = rb_first(&head->ref_tree)) != NULL) {
+ while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
ref = rb_entry(n, struct btrfs_delayed_ref_node,
ref_node);
ref->in_tree = 0;
- rb_erase(&ref->ref_node, &head->ref_tree);
+ rb_erase_cached(&ref->ref_node, &head->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -4240,7 +4240,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (head->processing == 0)
delayed_refs->num_heads_ready--;
atomic_dec(&delayed_refs->num_entries);
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1f3755b3a37a..ddf28ecf17f9 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -33,7 +33,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = btrfs_ino(BTRFS_I(inode));
- fid->root_objectid = BTRFS_I(inode)->root->objectid;
+ fid->root_objectid = BTRFS_I(inode)->root->root_key.objectid;
fid->gen = inode->i_generation;
if (parent) {
@@ -41,7 +41,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
- parent_root_id = BTRFS_I(parent)->root->objectid;
+ parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2d9074295d7f..a1febf155747 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2366,6 +2366,9 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
insert_reserved);
else
BUG();
+ if (ret && insert_reserved)
+ btrfs_pin_extent(trans->fs_info, node->bytenr,
+ node->num_bytes, 1);
return ret;
}
@@ -2374,7 +2377,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
- if (RB_EMPTY_ROOT(&head->ref_tree))
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
return NULL;
/*
@@ -2387,7 +2390,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
return list_first_entry(&head->ref_add_list,
struct btrfs_delayed_ref_node, add_list);
- ref = rb_entry(rb_first(&head->ref_tree),
+ ref = rb_entry(rb_first_cached(&head->ref_tree),
struct btrfs_delayed_ref_node, ref_node);
ASSERT(list_empty(&ref->add_list));
return ref;
@@ -2448,13 +2451,13 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&head->lock);
spin_lock(&delayed_refs->lock);
spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
return 1;
}
delayed_refs->num_heads--;
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
@@ -2502,102 +2505,66 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
return 0;
}
-/*
- * Returns 0 on success or if called with an already aborted transaction.
- * Returns -ENOMEM or -EIO on failure and will abort the transaction.
- */
-static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long nr)
+static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+ struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_delayed_ref_head *head = NULL;
+ int ret;
+
+ spin_lock(&delayed_refs->lock);
+ head = btrfs_select_ref_head(delayed_refs);
+ if (!head) {
+ spin_unlock(&delayed_refs->lock);
+ return head;
+ }
+
+ /*
+ * Grab the lock that says we are going to process all the refs for
+ * this head
+ */
+ ret = btrfs_delayed_ref_lock(delayed_refs, head);
+ spin_unlock(&delayed_refs->lock);
+
+ /*
+ * We may have dropped the spin lock to get the head mutex lock, and
+ * that might have given someone else time to free the head. If that's
+ * true, it has been removed from our list and we can move on.
+ */
+ if (ret == -EAGAIN)
+ head = ERR_PTR(-EAGAIN);
+
+ return head;
+}
+
+static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *locked_ref,
+ unsigned long *run_refs)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
- ktime_t start = ktime_get();
- int ret;
- unsigned long count = 0;
- unsigned long actual_count = 0;
+ struct btrfs_delayed_ref_node *ref;
int must_insert_reserved = 0;
+ int ret;
delayed_refs = &trans->transaction->delayed_refs;
- while (1) {
- if (!locked_ref) {
- if (count >= nr)
- break;
- spin_lock(&delayed_refs->lock);
- locked_ref = btrfs_select_ref_head(trans);
- if (!locked_ref) {
- spin_unlock(&delayed_refs->lock);
- break;
- }
-
- /* grab the lock that says we are going to process
- * all the refs for this head */
- ret = btrfs_delayed_ref_lock(trans, locked_ref);
- spin_unlock(&delayed_refs->lock);
- /*
- * we may have dropped the spin lock to get the head
- * mutex lock, and that might have given someone else
- * time to free the head. If that's true, it has been
- * removed from our list and we can move on.
- */
- if (ret == -EAGAIN) {
- locked_ref = NULL;
- count++;
- continue;
- }
- }
+ lockdep_assert_held(&locked_ref->mutex);
+ lockdep_assert_held(&locked_ref->lock);
- /*
- * We need to try and merge add/drops of the same ref since we
- * can run into issues with relocate dropping the implicit ref
- * and then it being added back again before the drop can
- * finish. If we merged anything we need to re-loop so we can
- * get a good ref.
- * Or we can get node references of the same type that weren't
- * merged when created due to bumps in the tree mod seq, and
- * we need to merge them to prevent adding an inline extent
- * backref before dropping it (triggering a BUG_ON at
- * insert_inline_extent_backref()).
- */
- spin_lock(&locked_ref->lock);
- btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
-
- ref = select_delayed_ref(locked_ref);
-
- if (ref && ref->seq &&
+ while ((ref = select_delayed_ref(locked_ref))) {
+ if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
spin_unlock(&locked_ref->lock);
unselect_delayed_ref_head(delayed_refs, locked_ref);
- locked_ref = NULL;
- cond_resched();
- count++;
- continue;
- }
-
- /*
- * We're done processing refs in this ref_head, clean everything
- * up and move on to the next ref_head.
- */
- if (!ref) {
- ret = cleanup_ref_head(trans, locked_ref);
- if (ret > 0 ) {
- /* We dropped our lock, we need to loop. */
- ret = 0;
- continue;
- } else if (ret) {
- return ret;
- }
- locked_ref = NULL;
- count++;
- continue;
+ return -EAGAIN;
}
- actual_count++;
+ (*run_refs)++;
ref->in_tree = 0;
- rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+ rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -2619,8 +2586,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
atomic_dec(&delayed_refs->num_entries);
/*
- * Record the must-insert_reserved flag before we drop the spin
- * lock.
+ * Record the must_insert_reserved flag before we drop the
+ * spin lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
locked_ref->must_insert_reserved = 0;
@@ -2642,10 +2609,90 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
btrfs_put_delayed_ref(ref);
- count++;
cond_resched();
+
+ spin_lock(&locked_ref->lock);
+ btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
}
+ return 0;
+}
+
+/*
+ * Returns 0 on success or if called with an already aborted transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
+ */
+static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+ unsigned long nr)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_head *locked_ref = NULL;
+ ktime_t start = ktime_get();
+ int ret;
+ unsigned long count = 0;
+ unsigned long actual_count = 0;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ do {
+ if (!locked_ref) {
+ locked_ref = btrfs_obtain_ref_head(trans);
+ if (IS_ERR_OR_NULL(locked_ref)) {
+ if (PTR_ERR(locked_ref) == -EAGAIN) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ count++;
+ }
+ /*
+ * We need to try and merge add/drops of the same ref since we
+ * can run into issues with relocate dropping the implicit ref
+ * and then it being added back again before the drop can
+ * finish. If we merged anything we need to re-loop so we can
+ * get a good ref.
+ * Or we can get node references of the same type that weren't
+ * merged when created due to bumps in the tree mod seq, and
+ * we need to merge them to prevent adding an inline extent
+ * backref before dropping it (triggering a BUG_ON at
+ * insert_inline_extent_backref()).
+ */
+ spin_lock(&locked_ref->lock);
+ btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
+
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
+ &actual_count);
+ if (ret < 0 && ret != -EAGAIN) {
+ /*
+ * Error, btrfs_run_delayed_refs_for_head already
+ * unlocked everything so just bail out
+ */
+ return ret;
+ } else if (!ret) {
+ /*
+ * Success, perform the usual cleanup of a processed
+ * head
+ */
+ ret = cleanup_ref_head(trans, locked_ref);
+ if (ret > 0 ) {
+ /* We dropped our lock, we need to loop. */
+ ret = 0;
+ continue;
+ } else if (ret) {
+ return ret;
+ }
+ }
+
+ /*
+ * Either success case or btrfs_run_delayed_refs_for_head
+ * returned -EAGAIN, meaning we need to select another head
+ */
+
+ locked_ref = NULL;
+ cond_resched();
+ } while ((nr != -1 && count < nr) || locked_ref);
+
/*
* We don't want to include ref heads since we can have empty ref heads
* and those will drastically skew our runtime down since we just do
@@ -2745,9 +2792,9 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
return num_csums;
}
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
@@ -2782,8 +2829,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
return ret;
}
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
{
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
@@ -2791,14 +2837,14 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
u64 val;
smp_mb();
- avg_runtime = fs_info->avg_delayed_ref_runtime;
+ avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
if (val >= NSEC_PER_SEC)
return 1;
if (val >= NSEC_PER_SEC / 2)
return 2;
- return btrfs_check_space_for_delayed_refs(trans, fs_info);
+ return btrfs_check_space_for_delayed_refs(trans);
}
struct async_delayed_refs {
@@ -2911,7 +2957,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head;
int ret;
int run_all = count == (unsigned long)-1;
- bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
@@ -2928,7 +2973,6 @@ again:
#ifdef SCRAMBLE_DELAYED_REFS
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- trans->can_flush_pending_bgs = false;
ret = __btrfs_run_delayed_refs(trans, count);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -2940,7 +2984,7 @@ again:
btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
- node = rb_first(&delayed_refs->href_root);
+ node = rb_first_cached(&delayed_refs->href_root);
if (!node) {
spin_unlock(&delayed_refs->lock);
goto out;
@@ -2959,7 +3003,6 @@ again:
goto again;
}
out:
- trans->can_flush_pending_bgs = can_flush_pending_bgs;
return 0;
}
@@ -3040,7 +3083,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
* XXX: We should replace this with a proper search function in the
* future.
*/
- for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&head->ref_tree); node;
+ node = rb_next(node)) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -3139,7 +3183,6 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
{
struct btrfs_path *path;
int ret;
- int ret2;
path = btrfs_alloc_path();
if (!path)
@@ -3151,17 +3194,9 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
if (ret && ret != -ENOENT)
goto out;
- ret2 = check_delayed_ref(root, path, objectid,
- offset, bytenr);
- } while (ret2 == -EAGAIN);
-
- if (ret2 && ret2 != -ENOENT) {
- ret = ret2;
- goto out;
- }
+ ret = check_delayed_ref(root, path, objectid, offset, bytenr);
+ } while (ret == -EAGAIN);
- if (ret != -ENOENT || ret2 != -ENOENT)
- ret = 0;
out:
btrfs_free_path(path);
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
@@ -4533,6 +4568,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
goto out;
} else {
ret = 1;
+ space_info->max_extent_size = 0;
}
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
@@ -4554,11 +4590,9 @@ out:
* the block groups that were made dirty during the lifetime of the
* transaction.
*/
- if (trans->can_flush_pending_bgs &&
- trans->chunk_bytes_reserved >= (u64)SZ_2M) {
+ if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
btrfs_create_pending_block_groups(trans);
- btrfs_trans_release_chunk_metadata(trans);
- }
+
return ret;
}
@@ -5284,7 +5318,7 @@ static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
}
static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
- u64 num_bytes, int update_size)
+ u64 num_bytes, bool update_size)
{
spin_lock(&block_rsv->lock);
block_rsv->reserved += num_bytes;
@@ -5316,7 +5350,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
global_rsv->full = 0;
spin_unlock(&global_rsv->lock);
- block_rsv_add_bytes(dest, num_bytes, 1);
+ block_rsv_add_bytes(dest, num_bytes, true);
return 0;
}
@@ -5479,7 +5513,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
struct btrfs_block_rsv *dst, u64 num_bytes,
- int update_size)
+ bool update_size)
{
int ret;
@@ -5539,10 +5573,8 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
- if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 1);
- return 0;
- }
+ if (!ret)
+ block_rsv_add_bytes(block_rsv, num_bytes, true);
return ret;
}
@@ -5587,7 +5619,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
}
@@ -5629,7 +5661,7 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
return ret;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ block_rsv_add_bytes(block_rsv, num_bytes, false);
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 1);
@@ -5835,7 +5867,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
- ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true);
if (ret && qgroup_num_bytes)
btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
@@ -6399,10 +6431,6 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
} else {
cache->reserved += num_bytes;
space_info->bytes_reserved += num_bytes;
-
- trace_btrfs_space_reservation(cache->fs_info,
- "space_info", space_info->flags,
- ram_bytes, 0);
space_info->bytes_may_use -= ram_bytes;
if (delalloc)
cache->delalloc_bytes += num_bytes;
@@ -6424,11 +6452,10 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
* reserve set to 0 in order to clear the reservation.
*/
-static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int delalloc)
+static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
- int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
@@ -6436,12 +6463,12 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
space_info->bytes_readonly += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
+ space_info->max_extent_size = 0;
if (delalloc)
cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
- return ret;
}
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
@@ -6925,7 +6952,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
goto out_delayed_unlock;
spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree))
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
goto out;
if (head->extent_op) {
@@ -6946,7 +6973,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
@@ -7233,6 +7260,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group = NULL;
u64 search_start = 0;
u64 max_extent_size = 0;
+ u64 max_free_space = 0;
u64 empty_cluster = 0;
struct btrfs_space_info *space_info;
int loop = 0;
@@ -7528,8 +7556,8 @@ unclustered_alloc:
spin_lock(&ctl->tree_lock);
if (ctl->free_space <
num_bytes + empty_cluster + empty_size) {
- if (ctl->free_space > max_extent_size)
- max_extent_size = ctl->free_space;
+ max_free_space = max(max_free_space,
+ ctl->free_space);
spin_unlock(&ctl->tree_lock);
goto loop;
}
@@ -7696,6 +7724,8 @@ loop:
}
out:
if (ret == -ENOSPC) {
+ if (!max_extent_size)
+ max_extent_size = max_free_space;
spin_lock(&space_info->lock);
space_info->max_extent_size = max_extent_size;
spin_unlock(&space_info->lock);
@@ -7977,21 +8007,14 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
}
path = btrfs_alloc_path();
- if (!path) {
- btrfs_free_and_pin_reserved_extent(fs_info,
- extent_key.objectid,
- fs_info->nodesize);
+ if (!path)
return -ENOMEM;
- }
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
&extent_key, size);
if (ret) {
btrfs_free_path(path);
- btrfs_free_and_pin_reserved_extent(fs_info,
- extent_key.objectid,
- fs_info->nodesize);
return ret;
}
@@ -8119,6 +8142,19 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
+ /*
+ * Extra safety check in case the extent tree is corrupted and extent
+ * allocator chooses to use a tree block which is already used and
+ * locked.
+ */
+ if (buf->lock_owner == current->pid) {
+ btrfs_err_rl(fs_info,
+"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
+ buf->start, btrfs_header_owner(buf), current->pid);
+ free_extent_buffer(buf);
+ return ERR_PTR(-EUCLEAN);
+ }
+
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(fs_info, buf);
@@ -8215,7 +8251,7 @@ try_reserve:
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u32 blocksize)
{
- block_rsv_add_bytes(block_rsv, blocksize, 0);
+ block_rsv_add_bytes(block_rsv, blocksize, false);
block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
}
@@ -8642,7 +8678,13 @@ skip:
parent = 0;
}
- if (need_account) {
+ /*
+ * Reloc tree doesn't contribute to qgroup numbers, and we have
+ * already accounted them at merge time (replace_path),
+ * thus we could skip expensive subtree trace here.
+ */
+ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
+ need_account) {
ret = btrfs_qgroup_trace_subtree(trans, next,
generation, level - 1);
if (ret) {
@@ -8763,15 +8805,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(eb));
+ else if (root->root_key.objectid != btrfs_header_owner(eb))
+ goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(path->nodes[level + 1]));
+ else if (root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]))
+ goto owner_mismatch;
}
btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
@@ -8779,6 +8820,11 @@ out:
wc->refs[level] = 0;
wc->flags[level] = 0;
return 0;
+
+owner_mismatch:
+ btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
+ btrfs_header_owner(eb), root->root_key.objectid);
+ return -EUCLEAN;
}
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
@@ -8832,6 +8878,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
ret = walk_up_proc(trans, root, path, wc);
if (ret > 0)
return 0;
+ if (ret < 0)
+ return ret;
if (path->locks[level]) {
btrfs_tree_unlock_rw(path->nodes[level],
@@ -8875,7 +8923,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
bool root_dropped = false;
- btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
+ btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
path = btrfs_alloc_path();
if (!path) {
@@ -9613,6 +9661,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
block_group = btrfs_lookup_first_block_group(info, last);
while (block_group) {
+ wait_block_group_cache_done(block_group);
spin_lock(&block_group->lock);
if (block_group->iref)
break;
@@ -10074,15 +10123,19 @@ error:
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group_cache *block_group;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
int ret = 0;
- bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
- trans->can_flush_pending_bgs = false;
- list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+ if (!trans->can_flush_pending_bgs)
+ return;
+
+ while (!list_empty(&trans->new_bgs)) {
+ block_group = list_first_entry(&trans->new_bgs,
+ struct btrfs_block_group_cache,
+ bg_list);
if (ret)
goto next;
@@ -10103,7 +10156,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
next:
list_del_init(&block_group->bg_list);
}
- trans->can_flush_pending_bgs = can_flush_pending_bgs;
+ btrfs_trans_release_chunk_metadata(trans);
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
@@ -10753,14 +10806,16 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
* We don't want a transaction for this since the discard may take a
* substantial amount of time. We don't require that a transaction be
* running, but we do need to take a running transaction into account
- * to ensure that we're not discarding chunks that were released in
- * the current transaction.
+ * to ensure that we're not discarding chunks that were released or
+ * allocated in the current transaction.
*
* Holding the chunks lock will prevent other threads from allocating
* or releasing chunks, but it won't prevent a running transaction
* from committing and releasing the memory that the pending chunks
* list head uses. For that, we need to take a reference to the
- * transaction.
+ * transaction and hold the commit root sem. We only need to hold
+ * it while performing the free space search since we have already
+ * held back allocations.
*/
static int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed)
@@ -10770,6 +10825,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
*trimmed = 0;
+ /* Discard not supported = nothing to do. */
+ if (!blk_queue_discard(bdev_get_queue(device->bdev)))
+ return 0;
+
/* Not writeable = nothing to do. */
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
@@ -10787,9 +10846,13 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
if (ret)
- return ret;
+ break;
- down_read(&fs_info->commit_root_sem);
+ ret = down_read_killable(&fs_info->commit_root_sem);
+ if (ret) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ break;
+ }
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
@@ -10797,13 +10860,17 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
+ if (!trans)
+ up_read(&fs_info->commit_root_sem);
+
ret = find_free_dev_extent_start(trans, device, minlen, start,
&start, &len);
- if (trans)
+ if (trans) {
+ up_read(&fs_info->commit_root_sem);
btrfs_put_transaction(trans);
+ }
if (ret) {
- up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret == -ENOSPC)
ret = 0;
@@ -10811,7 +10878,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
}
ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
- up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret)
@@ -10831,6 +10897,15 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
return ret;
}
+/*
+ * Trim the whole filesystem by:
+ * 1) trimming the free space in each block group
+ * 2) trimming the unallocated space on each device
+ *
+ * This will also continue trimming even if a block group or device encounters
+ * an error. The return value will be the last error, or 0 if nothing bad
+ * happens.
+ */
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
struct btrfs_block_group_cache *cache = NULL;
@@ -10840,18 +10915,14 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
u64 start;
u64 end;
u64 trimmed = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
+ u64 bg_failed = 0;
+ u64 dev_failed = 0;
+ int bg_ret = 0;
+ int dev_ret = 0;
int ret = 0;
- /*
- * try to trim all FS space, our block group may start from non-zero.
- */
- if (range->len == total_bytes)
- cache = btrfs_lookup_first_block_group(fs_info, range->start);
- else
- cache = btrfs_lookup_block_group(fs_info, range->start);
-
- while (cache) {
+ cache = btrfs_lookup_first_block_group(fs_info, range->start);
+ for (; cache; cache = next_block_group(fs_info, cache)) {
if (cache->key.objectid >= (range->start + range->len)) {
btrfs_put_block_group(cache);
break;
@@ -10865,13 +10936,15 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
if (!block_group_cache_done(cache)) {
ret = cache_block_group(cache, 0);
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
ret = wait_block_group_cache_done(cache);
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
}
ret = btrfs_trim_block_group(cache,
@@ -10882,28 +10955,40 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
trimmed += group_trimmed;
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
}
-
- cache = next_block_group(fs_info, cache);
}
+ if (bg_failed)
+ btrfs_warn(fs_info,
+ "failed to trim %llu block group(s), last error %d",
+ bg_failed, bg_ret);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- devices = &fs_info->fs_devices->alloc_list;
- list_for_each_entry(device, devices, dev_alloc_list) {
+ devices = &fs_info->fs_devices->devices;
+ list_for_each_entry(device, devices, dev_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
- if (ret)
+ if (ret) {
+ dev_failed++;
+ dev_ret = ret;
break;
+ }
trimmed += group_trimmed;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ if (dev_failed)
+ btrfs_warn(fs_info,
+ "failed to trim %llu device(s), last error %d",
+ dev_failed, dev_ret);
range->len = trimmed;
- return ret;
+ if (bg_ret)
+ return bg_ret;
+ return dev_ret;
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4dd6faab02bb..d228f706ff3e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1424,20 +1424,15 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
struct extent_state **cached_state)
{
struct extent_state *state;
- struct rb_node *n;
int ret = 1;
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
state = *cached_state;
if (state->end == start - 1 && extent_state_in_tree(state)) {
- n = rb_next(&state->rb_node);
- while (n) {
- state = rb_entry(n, struct extent_state,
- rb_node);
+ while ((state = next_state(state)) != NULL) {
if (state->state & bits)
goto got_it;
- n = rb_next(n);
}
free_extent_state(*cached_state);
*cached_state = NULL;
@@ -1568,7 +1563,7 @@ static noinline int lock_delalloc_pages(struct inode *inode,
*
* 1 is returned if we find something, 0 if nothing was in the tree
*/
-STATIC u64 find_lock_delalloc_range(struct inode *inode,
+static noinline_for_stack u64 find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
u64 *end, u64 max_bytes)
@@ -1648,6 +1643,17 @@ out_failed:
return found;
}
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+u64 btrfs_find_lock_delalloc_range(struct inode *inode,
+ struct extent_io_tree *tree,
+ struct page *locked_page, u64 *start,
+ u64 *end, u64 max_bytes)
+{
+ return find_lock_delalloc_range(inode, tree, locked_page, start, end,
+ max_bytes);
+}
+#endif
+
static int __process_pages_contig(struct address_space *mapping,
struct page *locked_page,
pgoff_t start_index, pgoff_t end_index,
@@ -3778,7 +3784,7 @@ int btree_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
int scanned = 0;
- int tag;
+ xa_mark_t tag;
pagevec_init(&pvec);
if (wbc->range_cyclic) {
@@ -3903,7 +3909,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
pgoff_t done_index;
int range_whole = 0;
int scanned = 0;
- int tag;
+ xa_mark_t tag;
/*
* We have to hold onto the inode so that ordered extents can do their
@@ -5153,11 +5159,9 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
clear_page_dirty_for_io(page);
xa_lock_irq(&page->mapping->i_pages);
- if (!PageDirty(page)) {
- radix_tree_tag_clear(&page->mapping->i_pages,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- }
+ if (!PageDirty(page))
+ __xa_clear_mark(&page->mapping->i_pages,
+ page_index(page), PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&page->mapping->i_pages);
ClearPageError(page);
unlock_page(page);
@@ -5165,11 +5169,11 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->refs) == 0);
}
-int set_extent_buffer_dirty(struct extent_buffer *eb)
+bool set_extent_buffer_dirty(struct extent_buffer *eb)
{
int i;
int num_pages;
- int was_dirty = 0;
+ bool was_dirty;
check_buffer_tree_ref(eb);
@@ -5179,8 +5183,15 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+ if (!was_dirty)
+ for (i = 0; i < num_pages; i++)
+ set_page_dirty(eb->pages[i]);
+
+#ifdef CONFIG_BTRFS_DEBUG
for (i = 0; i < num_pages; i++)
- set_page_dirty(eb->pages[i]);
+ ASSERT(PageDirty(eb->pages[i]));
+#endif
+
return was_dirty;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index b4d03e677e1d..369daa5d4f73 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -479,7 +479,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len);
void clear_extent_buffer_dirty(struct extent_buffer *eb);
-int set_extent_buffer_dirty(struct extent_buffer *eb);
+bool set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_under_io(struct extent_buffer *eb);
@@ -546,7 +546,7 @@ int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-noinline u64 find_lock_delalloc_range(struct inode *inode,
+u64 btrfs_find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
u64 *end, u64 max_bytes);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 6648d55e5339..7eea8b6e2cd3 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -34,7 +34,7 @@ void __cold extent_map_exit(void)
*/
void extent_map_tree_init(struct extent_map_tree *tree)
{
- tree->map = RB_ROOT;
+ tree->map = RB_ROOT_CACHED;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
@@ -90,24 +90,27 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-static int tree_insert(struct rb_root *root, struct extent_map *em)
+static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct extent_map *entry = NULL;
struct rb_node *orig_parent = NULL;
u64 end = range_end(em->start, em->len);
+ bool leftmost = true;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
- if (em->start < entry->start)
+ if (em->start < entry->start) {
p = &(*p)->rb_left;
- else if (em->start >= extent_map_end(entry))
+ } else if (em->start >= extent_map_end(entry)) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return -EEXIST;
+ }
}
orig_parent = parent;
@@ -130,7 +133,7 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
return -EEXIST;
rb_link_node(&em->rb_node, orig_parent, p);
- rb_insert_color(&em->rb_node, root);
+ rb_insert_color_cached(&em->rb_node, root, leftmost);
return 0;
}
@@ -242,7 +245,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
- rb_erase(&merge->rb_node, &tree->map);
+ rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge);
}
@@ -254,7 +257,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->block_len;
- rb_erase(&merge->rb_node, &tree->map);
+ rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
@@ -367,7 +370,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
struct rb_node *next = NULL;
u64 end = range_end(start, len);
- rb_node = __tree_search(&tree->map, start, &prev, &next);
+ rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
if (!rb_node) {
if (prev)
rb_node = prev;
@@ -428,16 +431,13 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
* Removes @em from @tree. No reference counts are dropped, and no checks
* are done to see if the range is in use
*/
-int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
+void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
- int ret = 0;
-
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
- rb_erase(&em->rb_node, &tree->map);
+ rb_erase_cached(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list);
RB_CLEAR_NODE(&em->rb_node);
- return ret;
}
void replace_extent_mapping(struct extent_map_tree *tree,
@@ -449,7 +449,7 @@ void replace_extent_mapping(struct extent_map_tree *tree,
ASSERT(extent_map_in_tree(cur));
if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
list_del_init(&cur->list);
- rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
+ rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
RB_CLEAR_NODE(&cur->rb_node);
setup_extent_mapping(tree, new, modified);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 25d985e7532a..31977ffd6190 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -49,7 +49,7 @@ struct extent_map {
};
struct extent_map_tree {
- struct rb_root map;
+ struct rb_root_cached map;
struct list_head modified_extents;
rwlock_t lock;
};
@@ -78,7 +78,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em, int modified);
-int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
void replace_extent_mapping(struct extent_map_tree *tree,
struct extent_map *cur,
struct extent_map *new,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2be00e873e92..97c7a086f7bd 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -531,6 +531,14 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
end_of_last_block = start_pos + num_bytes - 1;
+ /*
+ * The pages may have already been dirty, clear out old accounting so
+ * we can set things up properly
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached);
+
if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
if (start_pos >= isize &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
@@ -1500,18 +1508,27 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
if (ordered)
btrfs_put_ordered_extent(ordered);
- clear_extent_bit(&inode->io_tree, start_pos, last_pos,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state);
+
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
}
+ /*
+ * It's possible the pages are dirty right now, but we don't want
+ * to clean them yet because copy_from_user may catch a page fault
+ * and we might have to fall back to one page at a time. If that
+ * happens, we'll unlock these pages and we'd have a window where
+ * reclaim could sneak in and drop the once-dirty page on the floor
+ * without writing it.
+ *
+ * We have the pages locked and the extent range locked, so there's
+ * no way someone can start IO on any dirty pages in this range.
+ *
+ * We'll call btrfs_dirty_pages() later on, and that will flip around
+ * delalloc bits and dirty the pages as required.
+ */
for (i = 0; i < num_pages; i++) {
- if (clear_page_dirty_for_io(pages[i]))
- account_page_redirty(pages[i]);
set_page_extent_mapped(pages[i]);
WARN_ON(!PageLocked(pages[i]));
}
@@ -2061,6 +2078,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
inode_lock(inode);
+
+ /*
+ * We take the dio_sem here because the tree log stuff can race with
+ * lockless dio writes and get an extent map logged for an extent we
+ * never waited on. We need it this high up for lockdep reasons.
+ */
+ down_write(&BTRFS_I(inode)->dio_sem);
+
atomic_inc(&root->log_batch);
/*
@@ -2069,6 +2094,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2092,6 +2118,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* checked called fsync.
*/
ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2110,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2131,6 +2159,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
/*
@@ -2544,7 +2573,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
- min_size, 0);
+ min_size, false);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2594,7 +2623,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, 0);
+ rsv, min_size, false);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0adf38b00fa0..4ba0aedc878b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/ratelimit.h>
#include <linux/error-injection.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "free-space-cache.h"
#include "transaction.h"
@@ -47,6 +48,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct inode *inode = NULL;
+ unsigned nofs_flag;
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -68,7 +70,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path);
+ /*
+ * We are often under a trans handle at this point, so we need to make
+ * sure NOFS is set to keep us from deadlocking.
+ */
+ nofs_flag = memalloc_nofs_save();
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
return inode;
@@ -1679,6 +1687,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes;
+ if (info->max_extent_size > ctl->unit)
+ info->max_extent_size = 0;
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
@@ -1762,6 +1772,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
return -1;
}
+static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
+{
+ if (entry->bitmap)
+ return entry->max_extent_size;
+ return entry->bytes;
+}
+
/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
@@ -1783,8 +1800,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
for (node = &entry->offset_index; node; node = rb_next(node)) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
if (entry->bytes < *bytes) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1802,8 +1819,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
}
if (entry->bytes < *bytes + align_off) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1815,8 +1832,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*offset = tmp;
*bytes = size;
return entry;
- } else if (size > *max_extent_size) {
- *max_extent_size = size;
+ } else {
+ *max_extent_size =
+ max(get_max_extent_size(entry),
+ *max_extent_size);
}
continue;
}
@@ -2110,8 +2129,7 @@ new_bitmap:
out:
if (info) {
- if (info->bitmap)
- kfree(info->bitmap);
+ kfree(info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, info);
}
@@ -2440,6 +2458,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
struct rb_node *n;
int count = 0;
+ spin_lock(&ctl->tree_lock);
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
@@ -2448,6 +2467,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
+ spin_unlock(&ctl->tree_lock);
btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(fs_info,
@@ -2676,8 +2696,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
if (err) {
- if (search_bytes > *max_extent_size)
- *max_extent_size = search_bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
return 0;
}
@@ -2714,8 +2734,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry = rb_entry(node, struct btrfs_free_space, offset_index);
while (1) {
- if (entry->bytes < bytes && entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ if (entry->bytes < bytes)
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
@@ -3601,8 +3622,7 @@ again:
if (info)
kmem_cache_free(btrfs_free_space_cachep, info);
- if (map)
- kfree(map);
+ kfree(map);
return 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3ea5339603cf..d3df5b52278c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -64,7 +64,6 @@ static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
-static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static const struct extent_io_ops btrfs_extent_io_ops;
@@ -503,6 +502,7 @@ again:
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
+ nr_pages = 0;
goto cont;
}
@@ -2750,12 +2750,9 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
struct btrfs_path *path;
struct sa_defrag_extent_backref *backref;
struct sa_defrag_extent_backref *prev = NULL;
- struct inode *inode;
struct rb_node *node;
int ret;
- inode = new->inode;
-
path = btrfs_alloc_path();
if (!path)
return;
@@ -2944,6 +2941,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
bool truncated = false;
bool range_locked = false;
bool clear_new_delalloc_bytes = false;
+ bool clear_reserved_extent = true;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
@@ -3047,10 +3045,12 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
logical_len, logical_len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
- if (!ret)
+ if (!ret) {
+ clear_reserved_extent = false;
btrfs_release_delalloc_bytes(fs_info,
ordered_extent->start,
ordered_extent->disk_len);
+ }
}
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset, ordered_extent->len,
@@ -3111,8 +3111,13 @@ out:
* wrong we need to return the space for this ordered extent
* back to the allocator. We only free the extent in the
* truncated case if we didn't write out the extent at all.
+ *
+ * If we made it past insert_reserved_file_extent before we
+ * errored out then we don't need to do this as the accounting
+ * has already been done.
*/
if ((ret || !logical_len) &&
+ clear_reserved_extent &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
btrfs_free_reserved_extent(fs_info,
@@ -3471,8 +3476,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
/* this will do delete_inode and everything for us */
iput(inode);
- if (ret)
- goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
@@ -3738,7 +3741,7 @@ cache_acl:
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_symlink_aops;
+ inode->i_mapping->a_ops = &btrfs_aops;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
@@ -3910,12 +3913,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto err;
- }
- if (!di) {
- ret = -ENOENT;
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto err;
}
leaf = path->nodes[0];
@@ -4075,10 +4074,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
- if (!di)
- ret = -ENOENT;
- else
- ret = PTR_ERR(di);
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
@@ -4270,18 +4266,17 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
* again is not run concurrently.
*/
spin_lock(&dest->root_item_lock);
- root_flags = btrfs_root_flags(&dest->root_item);
- if (dest->send_in_progress == 0) {
- btrfs_set_root_flags(&dest->root_item,
- root_flags | BTRFS_ROOT_SUBVOL_DEAD);
- spin_unlock(&dest->root_item_lock);
- } else {
+ if (dest->send_in_progress) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
dest->root_key.objectid);
return -EPERM;
}
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
down_write(&fs_info->subvol_sem);
@@ -4727,7 +4722,7 @@ delete:
btrfs_abort_transaction(trans, ret);
break;
}
- if (btrfs_should_throttle_delayed_refs(trans, fs_info))
+ if (btrfs_should_throttle_delayed_refs(trans))
btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
@@ -4736,8 +4731,7 @@ delete:
extent_num_bytes)) {
should_end = true;
}
- if (btrfs_should_throttle_delayed_refs(trans,
- fs_info))
+ if (btrfs_should_throttle_delayed_refs(trans))
should_throttle = true;
}
}
@@ -5235,10 +5229,10 @@ static void evict_inode_truncate_pages(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
write_lock(&map_tree->lock);
- while (!RB_EMPTY_ROOT(&map_tree->map)) {
+ while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
struct extent_map *em;
- node = rb_first(&map_tree->map);
+ node = rb_first_cached(&map_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
@@ -5274,11 +5268,13 @@ static void evict_inode_truncate_pages(struct inode *inode)
struct extent_state *cached_state = NULL;
u64 start;
u64 end;
+ unsigned state_flags;
node = rb_first(&io_tree->state);
state = rb_entry(node, struct extent_state, rb_node);
start = state->start;
end = state->end;
+ state_flags = state->state;
spin_unlock(&io_tree->lock);
lock_extent_bits(io_tree, start, end, &cached_state);
@@ -5291,7 +5287,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
*
* Note, end is the bytenr of last byte, so we need + 1 here.
*/
- if (state->state & EXTENT_DELALLOC)
+ if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
clear_extent_bit(io_tree, start, end,
@@ -5306,8 +5302,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
}
static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv,
- u64 min_size)
+ struct btrfs_block_rsv *rsv)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -5317,7 +5312,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
struct btrfs_trans_handle *trans;
int ret;
- ret = btrfs_block_rsv_refill(root, rsv, min_size,
+ ret = btrfs_block_rsv_refill(root, rsv, rsv->size,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret && ++failures > 2) {
@@ -5334,8 +5329,8 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
* Try to steal from the global reserve if there is space for
* it.
*/
- if (!btrfs_check_space_for_delayed_refs(trans, fs_info) &&
- !btrfs_block_rsv_migrate(global_rsv, rsv, min_size, 0))
+ if (!btrfs_check_space_for_delayed_refs(trans) &&
+ !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
return trans;
/* If not, commit and try again. */
@@ -5351,7 +5346,6 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
- u64 min_size;
int ret;
trace_btrfs_inode_evict(inode);
@@ -5361,8 +5355,6 @@ void btrfs_evict_inode(struct inode *inode)
return;
}
- min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
-
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
@@ -5373,9 +5365,6 @@ void btrfs_evict_inode(struct inode *inode)
if (is_bad_inode(inode))
goto no_delete;
- /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
- if (!special_file(inode->i_mode))
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
@@ -5395,13 +5384,13 @@ void btrfs_evict_inode(struct inode *inode)
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
goto no_delete;
- rsv->size = min_size;
+ rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
rsv->failfast = 1;
btrfs_i_size_write(BTRFS_I(inode), 0);
while (1) {
- trans = evict_refill_and_join(root, rsv, min_size);
+ trans = evict_refill_and_join(root, rsv);
if (IS_ERR(trans))
goto free_rsv;
@@ -5426,7 +5415,7 @@ void btrfs_evict_inode(struct inode *inode)
* If it turns out that we are dropping too many of these, we might want
* to add a mechanism for retrying these after a commit.
*/
- trans = evict_refill_and_join(root, rsv, min_size);
+ trans = evict_refill_and_join(root, rsv);
if (!IS_ERR(trans)) {
trans->block_rsv = rsv;
btrfs_orphan_del(trans, BTRFS_I(inode));
@@ -5471,12 +5460,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
name, namelen, 0);
- if (!di) {
- ret = -ENOENT;
- goto out;
- }
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
@@ -5790,16 +5775,10 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct inode *inode;
-
- inode = btrfs_lookup_dentry(dir, dentry);
- if (IS_ERR(inode)) {
- if (PTR_ERR(inode) == -ENOENT)
- inode = NULL;
- else
- return ERR_CAST(inode);
- }
+ struct inode *inode = btrfs_lookup_dentry(dir, dentry);
+ if (inode == ERR_PTR(-ENOENT))
+ inode = NULL;
return d_splice_alias(inode, dentry);
}
@@ -6390,8 +6369,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode, &key,
+ ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
btrfs_inode_type(&inode->vfs_inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
@@ -6584,7 +6562,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
- if (root->objectid != BTRFS_I(inode)->root->objectid)
+ if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
@@ -6777,9 +6755,9 @@ static noinline int uncompress_inline(struct btrfs_path *path,
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page,
- size_t pg_offset, u64 start, u64 len,
- int create)
+ struct page *page,
+ size_t pg_offset, u64 start, u64 len,
+ int create)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
@@ -6823,19 +6801,21 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
em->len = (u64)-1;
em->block_len = (u64)-1;
+ path = btrfs_alloc_path();
if (!path) {
- path = btrfs_alloc_path();
- if (!path) {
- err = -ENOMEM;
- goto out;
- }
- /*
- * Chances are we'll be called again, so go ahead and do
- * readahead
- */
- path->reada = READA_FORWARD;
+ err = -ENOMEM;
+ goto out;
}
+ /* Chances are we'll be called again, so go ahead and do readahead */
+ path->reada = READA_FORWARD;
+
+ /*
+ * Unless we're going to uncompress the inline extent, no sleep would
+ * happen.
+ */
+ path->leave_spinning = 1;
+
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
err = ret;
@@ -6938,6 +6918,8 @@ next:
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
+
+ btrfs_set_path_blocking(path);
if (!PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
@@ -6985,10 +6967,10 @@ insert:
err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
write_unlock(&em_tree->lock);
out:
+ btrfs_free_path(path);
trace_btrfs_get_extent(root, inode, em);
- btrfs_free_path(path);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
@@ -9021,7 +9003,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
- min_size, 0);
+ min_size, false);
BUG_ON(ret);
/*
@@ -9058,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
btrfs_block_rsv_release(fs_info, rsv, -1);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, 0);
+ rsv, min_size, false);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
}
@@ -10191,7 +10173,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_symlink_aops;
+ inode->i_mapping->a_ops = &btrfs_aops;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(BTRFS_I(inode), name_len);
err = btrfs_update_inode(trans, root, inode);
@@ -10567,13 +10549,6 @@ static const struct address_space_operations btrfs_aops = {
.error_remove_page = generic_error_remove_page,
};
-static const struct address_space_operations btrfs_symlink_aops = {
- .readpage = btrfs_readpage,
- .writepage = btrfs_writepage,
- .invalidatepage = btrfs_invalidatepage,
- .releasepage = btrfs_releasepage,
-};
-
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d60b6caf09e8..a990a9045139 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -491,7 +491,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -515,11 +514,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
- if (range.start > total_bytes ||
- range.len < fs_info->sb->s_blocksize)
+
+ /*
+ * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
+ * block group is in the logical address space, which can be any
+ * sectorsize aligned bytenr in the range [0, U64_MAX].
+ */
+ if (range.len < fs_info->sb->s_blocksize)
return -EINVAL;
- range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info, &range);
if (ret < 0)
@@ -686,8 +689,7 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- ret = btrfs_insert_dir_item(trans, root,
- name, namelen, BTRFS_I(dir), &key,
+ ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
BTRFS_FT_DIR, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1324,7 +1326,7 @@ again:
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
@@ -4393,7 +4395,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
- if (!is_fstree(new_root->objectid)) {
+ if (!is_fstree(new_root->root_key.objectid)) {
ret = -ENOENT;
goto out;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index d4917c0cddf5..45868fd76209 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1416,13 +1416,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
if (!qgroup) {
ret = -ENOENT;
goto out;
- } else {
- /* check if there are no children of this qgroup */
- if (!list_empty(&qgroup->members)) {
- ret = -EBUSY;
- goto out;
- }
}
+
+ /* Check if there are no children of this qgroup */
+ if (!list_empty(&qgroup->members)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ret = del_qgroup_item(trans, qgroupid);
if (ret && ret != -ENOENT)
goto out;
@@ -1712,6 +1713,416 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
return 0;
}
+/*
+ * Helper function to trace a subtree tree block swap.
+ *
+ * The swap will happen in highest tree block, but there may be a lot of
+ * tree blocks involved.
+ *
+ * For example:
+ * OO = Old tree blocks
+ * NN = New tree blocks allocated during balance
+ *
+ * File tree (257) Reloc tree for 257
+ * L2 OO NN
+ * / \ / \
+ * L1 OO OO (a) OO NN (a)
+ * / \ / \ / \ / \
+ * L0 OO OO OO OO OO OO NN NN
+ * (b) (c) (b) (c)
+ *
+ * When calling qgroup_trace_extent_swap(), we will pass:
+ * @src_eb = OO(a)
+ * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
+ * @dst_level = 0
+ * @root_level = 1
+ *
+ * In that case, qgroup_trace_extent_swap() will search from OO(a) to
+ * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
+ *
+ * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
+ *
+ * 1) Tree search from @src_eb
+ * It should acts as a simplified btrfs_search_slot().
+ * The key for search can be extracted from @dst_path->nodes[dst_level]
+ * (first key).
+ *
+ * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
+ * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
+ * They should be marked during preivous (@dst_level = 1) iteration.
+ *
+ * 3) Mark file extents in leaves dirty
+ * We don't have good way to pick out new file extents only.
+ * So we still follow the old method by scanning all file extents in
+ * the leave.
+ *
+ * This function can free us from keeping two pathes, thus later we only need
+ * to care about how to iterate all new tree blocks in reloc tree.
+ */
+static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
+ struct extent_buffer *src_eb,
+ struct btrfs_path *dst_path,
+ int dst_level, int root_level,
+ bool trace_leaf)
+{
+ struct btrfs_key key;
+ struct btrfs_path *src_path;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ u32 nodesize = fs_info->nodesize;
+ int cur_level = root_level;
+ int ret;
+
+ BUG_ON(dst_level > root_level);
+ /* Level mismatch */
+ if (btrfs_header_level(src_eb) != root_level)
+ return -EINVAL;
+
+ src_path = btrfs_alloc_path();
+ if (!src_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (dst_level)
+ btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
+ else
+ btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
+
+ /* For src_path */
+ extent_buffer_get(src_eb);
+ src_path->nodes[root_level] = src_eb;
+ src_path->slots[root_level] = dst_path->slots[root_level];
+ src_path->locks[root_level] = 0;
+
+ /* A simplified version of btrfs_search_slot() */
+ while (cur_level >= dst_level) {
+ struct btrfs_key src_key;
+ struct btrfs_key dst_key;
+
+ if (src_path->nodes[cur_level] == NULL) {
+ struct btrfs_key first_key;
+ struct extent_buffer *eb;
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ eb = src_path->nodes[cur_level + 1];
+ parent_slot = src_path->slots[cur_level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+ btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ cur_level, &first_key);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ src_path->nodes[cur_level] = eb;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ }
+
+ src_path->slots[cur_level] = dst_path->slots[cur_level];
+ if (cur_level) {
+ btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
+ &dst_key, dst_path->slots[cur_level]);
+ btrfs_node_key_to_cpu(src_path->nodes[cur_level],
+ &src_key, src_path->slots[cur_level]);
+ } else {
+ btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
+ &dst_key, dst_path->slots[cur_level]);
+ btrfs_item_key_to_cpu(src_path->nodes[cur_level],
+ &src_key, src_path->slots[cur_level]);
+ }
+ /* Content mismatch, something went wrong */
+ if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
+ ret = -ENOENT;
+ goto out;
+ }
+ cur_level--;
+ }
+
+ /*
+ * Now both @dst_path and @src_path have been populated, record the tree
+ * blocks for qgroup accounting.
+ */
+ ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
+ nodesize, GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_qgroup_trace_extent(trans,
+ dst_path->nodes[dst_level]->start,
+ nodesize, GFP_NOFS);
+ if (ret < 0)
+ goto out;
+
+ /* Record leaf file extents */
+ if (dst_level == 0 && trace_leaf) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
+ }
+out:
+ btrfs_free_path(src_path);
+ return ret;
+}
+
+/*
+ * Helper function to do recursive generation-aware depth-first search, to
+ * locate all new tree blocks in a subtree of reloc tree.
+ *
+ * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
+ * reloc tree
+ * L2 NN (a)
+ * / \
+ * L1 OO NN (b)
+ * / \ / \
+ * L0 OO OO OO NN
+ * (c) (d)
+ * If we pass:
+ * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
+ * @cur_level = 1
+ * @root_level = 1
+ *
+ * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
+ * above tree blocks along with their counter parts in file tree.
+ * While during search, old tree blocsk OO(c) will be skiped as tree block swap
+ * won't affect OO(c).
+ */
+static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
+ struct extent_buffer *src_eb,
+ struct btrfs_path *dst_path,
+ int cur_level, int root_level,
+ u64 last_snapshot, bool trace_leaf)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct extent_buffer *eb;
+ bool need_cleanup = false;
+ int ret = 0;
+ int i;
+
+ /* Level sanity check */
+ if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
+ root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+ root_level < cur_level) {
+ btrfs_err_rl(fs_info,
+ "%s: bad levels, cur_level=%d root_level=%d",
+ __func__, cur_level, root_level);
+ return -EUCLEAN;
+ }
+
+ /* Read the tree block if needed */
+ if (dst_path->nodes[cur_level] == NULL) {
+ struct btrfs_key first_key;
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ /*
+ * dst_path->nodes[root_level] must be initialized before
+ * calling this function.
+ */
+ if (cur_level == root_level) {
+ btrfs_err_rl(fs_info,
+ "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
+ __func__, root_level, root_level, cur_level);
+ return -EUCLEAN;
+ }
+
+ /*
+ * We need to get child blockptr/gen from parent before we can
+ * read it.
+ */
+ eb = dst_path->nodes[cur_level + 1];
+ parent_slot = dst_path->slots[cur_level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+ btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
+
+ /* This node is old, no need to trace */
+ if (child_gen < last_snapshot)
+ goto out;
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ cur_level, &first_key);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ dst_path->nodes[cur_level] = eb;
+ dst_path->slots[cur_level] = 0;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ need_cleanup = true;
+ }
+
+ /* Now record this tree block and its counter part for qgroups */
+ ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
+ root_level, trace_leaf);
+ if (ret < 0)
+ goto cleanup;
+
+ eb = dst_path->nodes[cur_level];
+
+ if (cur_level > 0) {
+ /* Iterate all child tree blocks */
+ for (i = 0; i < btrfs_header_nritems(eb); i++) {
+ /* Skip old tree blocks as they won't be swapped */
+ if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
+ continue;
+ dst_path->slots[cur_level] = i;
+
+ /* Recursive call (at most 7 times) */
+ ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
+ dst_path, cur_level - 1, root_level,
+ last_snapshot, trace_leaf);
+ if (ret < 0)
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ if (need_cleanup) {
+ /* Clean up */
+ btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
+ dst_path->locks[cur_level]);
+ free_extent_buffer(dst_path->nodes[cur_level]);
+ dst_path->nodes[cur_level] = NULL;
+ dst_path->slots[cur_level] = 0;
+ dst_path->locks[cur_level] = 0;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Inform qgroup to trace subtree swap used in balance.
+ *
+ * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
+ * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
+ *
+ * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
+ * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
+ * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
+ * the conterpart of the tree block, then mark both tree blocks as qgroup dirty,
+ * and skip all tree blocks whose generation is smaller than last_snapshot.
+ *
+ * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
+ * which could be the cause of very slow balance if the file tree is large.
+ *
+ * @src_parent, @src_slot: pointer to src (file tree) eb.
+ * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
+ */
+int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *bg_cache,
+ struct extent_buffer *src_parent, int src_slot,
+ struct extent_buffer *dst_parent, int dst_slot,
+ u64 last_snapshot)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_path *dst_path = NULL;
+ struct btrfs_key first_key;
+ struct extent_buffer *src_eb = NULL;
+ struct extent_buffer *dst_eb = NULL;
+ bool trace_leaf = false;
+ u64 child_gen;
+ u64 child_bytenr;
+ int level;
+ int ret;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ /* Check parameter order */
+ if (btrfs_node_ptr_generation(src_parent, src_slot) >
+ btrfs_node_ptr_generation(dst_parent, dst_slot)) {
+ btrfs_err_rl(fs_info,
+ "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
+ btrfs_node_ptr_generation(src_parent, src_slot),
+ btrfs_node_ptr_generation(dst_parent, dst_slot));
+ return -EUCLEAN;
+ }
+
+ /*
+ * Only trace leaf if we're relocating data block groups, this could
+ * reduce tons of data extents tracing for meta/sys bg relocation.
+ */
+ if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
+ trace_leaf = true;
+ /* Read out real @src_eb, pointed by @src_parent and @src_slot */
+ child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
+ child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
+ btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
+
+ src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ btrfs_header_level(src_parent) - 1, &first_key);
+ if (IS_ERR(src_eb)) {
+ ret = PTR_ERR(src_eb);
+ goto out;
+ }
+
+ /* Read out real @dst_eb, pointed by @src_parent and @src_slot */
+ child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
+ child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
+ btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
+
+ dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ btrfs_header_level(dst_parent) - 1, &first_key);
+ if (IS_ERR(dst_eb)) {
+ ret = PTR_ERR(dst_eb);
+ goto out;
+ }
+
+ if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ level = btrfs_header_level(dst_eb);
+ dst_path = btrfs_alloc_path();
+ if (!dst_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* For dst_path */
+ extent_buffer_get(dst_eb);
+ dst_path->nodes[level] = dst_eb;
+ dst_path->slots[level] = 0;
+ dst_path->locks[level] = 0;
+
+ /* Do the generation-aware breadth-first search */
+ ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
+ level, last_snapshot, trace_leaf);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ free_extent_buffer(src_eb);
+ free_extent_buffer(dst_eb);
+ btrfs_free_path(dst_path);
+ if (ret < 0)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ return ret;
+}
+
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
@@ -2132,6 +2543,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
struct btrfs_delayed_ref_root *delayed_refs;
struct ulist *new_roots = NULL;
struct rb_node *node;
+ u64 num_dirty_extents = 0;
u64 qgroup_to_skip;
int ret = 0;
@@ -2141,6 +2553,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
+ num_dirty_extents++;
trace_btrfs_qgroup_account_extents(fs_info, record);
if (!ret) {
@@ -2186,6 +2599,8 @@ cleanup:
kfree(record);
}
+ trace_qgroup_num_dirty_extents(fs_info, trans->transid,
+ num_dirty_extents);
return ret;
}
@@ -2897,6 +3312,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
qgroup->rfer_cmpr = 0;
qgroup->excl = 0;
qgroup->excl_cmpr = 0;
+ qgroup_dirty(fs_info, qgroup);
}
spin_unlock(&fs_info->qgroup_lock);
}
@@ -3004,7 +3420,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
- !is_fstree(root->objectid) || len == 0)
+ !is_fstree(root->root_key.objectid) || len == 0)
return 0;
/* @reserved parameter is mandatory for qgroup */
@@ -3090,7 +3506,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
goto out;
freed += changeset.bytes_changed;
}
- btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed,
+ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
BTRFS_QGROUP_RSV_DATA);
ret = freed;
out:
@@ -3106,6 +3522,10 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
int trace_op = QGROUP_RELEASE;
int ret;
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
+ &BTRFS_I(inode)->root->fs_info->flags))
+ return 0;
+
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
if (free && reserved)
@@ -3122,7 +3542,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
changeset.bytes_changed, trace_op);
if (free)
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
- BTRFS_I(inode)->root->objectid,
+ BTRFS_I(inode)->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
ret = changeset.bytes_changed;
out:
@@ -3215,7 +3635,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid) || num_bytes == 0)
+ !is_fstree(root->root_key.objectid) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
@@ -3240,13 +3660,13 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/* TODO: Update trace point to handle such free */
trace_qgroup_meta_free_all_pertrans(root);
/* Special value -1 means to free all reserved space */
- btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1,
+ btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
BTRFS_QGROUP_RSV_META_PERTRANS);
}
@@ -3256,7 +3676,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/*
@@ -3267,7 +3687,8 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
- btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
+ btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
+ num_bytes, type);
}
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
@@ -3321,13 +3742,13 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
- qgroup_convert_meta(fs_info, root->objectid, num_bytes);
+ qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
}
/*
@@ -3354,7 +3775,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
inode->i_ino, unode->val, unode->aux);
}
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
- BTRFS_I(inode)->root->objectid,
+ BTRFS_I(inode)->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 54b8bb282c0e..d8f78f5ab854 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -236,6 +236,12 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
+
+int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *bg_cache,
+ struct extent_buffer *src_parent, int src_slot,
+ struct extent_buffer *dst_parent, int dst_slot,
+ u64 last_snapshot);
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes, struct ulist *old_roots,
struct ulist *new_roots);
@@ -249,6 +255,8 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes)
{
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return;
trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes,
BTRFS_QGROUP_RSV_DATA);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e5b9e596bb92..d69fbfb30aa9 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -732,7 +732,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
INIT_LIST_HEAD(&ra->list);
ra->action = action;
- ra->root = root->objectid;
+ ra->root = root->root_key.objectid;
/*
* This is an allocation, preallocate the block_entry in case we haven't
@@ -787,8 +787,8 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* one we want to lookup below when we modify the
* re->num_refs.
*/
- ref_root = root->objectid;
- re->root_objectid = root->objectid;
+ ref_root = root->root_key.objectid;
+ re->root_objectid = root->root_key.objectid;
re->num_refs = 0;
}
@@ -862,7 +862,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* didn't thik of some other corner case.
*/
btrfs_err(fs_info, "failed to find root %llu for %llu",
- root->objectid, be->bytenr);
+ root->root_key.objectid, be->bytenr);
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
kfree(ra);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8783a1776540..924116f654a1 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -648,8 +648,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
int level, u64 bytenr)
{
struct backref_cache *cache = &rc->backref_cache;
- struct btrfs_path *path1;
- struct btrfs_path *path2;
+ struct btrfs_path *path1; /* For searching extent root */
+ struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
struct extent_buffer *eb;
struct btrfs_root *root;
struct backref_node *cur;
@@ -662,7 +662,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
struct btrfs_key key;
unsigned long end;
unsigned long ptr;
- LIST_HEAD(list);
+ LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
LIST_HEAD(useless);
int cowonly;
int ret;
@@ -778,6 +778,10 @@ again:
key.type != BTRFS_SHARED_BLOCK_REF_KEY);
}
+ /*
+ * Parent node found and matches current inline ref, no need to
+ * rebuild this node for this inline ref.
+ */
if (exist &&
((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
exist->owner == key.offset) ||
@@ -787,11 +791,12 @@ again:
goto next;
}
+ /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
if (key.objectid == key.offset) {
/*
- * only root blocks of reloc trees use
- * backref of this type.
+ * Only root blocks of reloc trees use backref
+ * pointing to itself.
*/
root = find_reloc_root(rc, cur->bytenr);
ASSERT(root);
@@ -840,7 +845,11 @@ again:
goto next;
}
- /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
+ /*
+ * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
+ * means the root objectid. We need to search the tree to get
+ * its parent bytenr.
+ */
root = read_fs_root(rc->extent_root->fs_info, key.offset);
if (IS_ERR(root)) {
err = PTR_ERR(root);
@@ -863,10 +872,7 @@ again:
level = cur->level + 1;
- /*
- * searching the tree to find upper level blocks
- * reference the block.
- */
+ /* Search the tree to find parent blocks referring the block. */
path2->search_commit_root = 1;
path2->skip_locking = 1;
path2->lowest_level = level;
@@ -884,7 +890,8 @@ again:
cur->bytenr) {
btrfs_err(root->fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
- cur->bytenr, level - 1, root->objectid,
+ cur->bytenr, level - 1,
+ root->root_key.objectid,
node_key->objectid, node_key->type,
node_key->offset);
err = -ENOENT;
@@ -892,6 +899,8 @@ again:
}
lower = cur;
need_check = true;
+
+ /* Add all nodes and edges in the path */
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path2->nodes[level]) {
ASSERT(btrfs_root_bytenr(&root->root_item) ==
@@ -1281,7 +1290,7 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
- if (rc) {
+ if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
@@ -1735,7 +1744,7 @@ int memcmp_node_keys(struct extent_buffer *eb, int slot,
* errors, a negative error number is returned.
*/
static noinline_for_stack
-int replace_path(struct btrfs_trans_handle *trans,
+int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
struct btrfs_root *dest, struct btrfs_root *src,
struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level)
@@ -1879,14 +1888,9 @@ again:
* and tree block numbers, if current trans doesn't free
* data reloc tree inode.
*/
- ret = btrfs_qgroup_trace_subtree(trans, parent,
- btrfs_header_generation(parent),
- btrfs_header_level(parent));
- if (ret < 0)
- break;
- ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level],
- btrfs_header_generation(path->nodes[level]),
- btrfs_header_level(path->nodes[level]));
+ ret = btrfs_qgroup_trace_subtree_swap(trans, rc->block_group,
+ parent, slot, path->nodes[level],
+ path->slots[level], last_snapshot);
if (ret < 0)
break;
@@ -2205,7 +2209,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
ret = 0;
} else {
- ret = replace_path(trans, root, reloc_root, path,
+ ret = replace_path(trans, rc, root, reloc_root, path,
&next_key, level, max_level);
}
if (ret < 0) {
@@ -2911,7 +2915,6 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
free_extent_buffer(eb);
return -EIO;
}
- WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
else
@@ -2987,7 +2990,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
- struct rb_node *rb_node;
+ struct tree_block *next;
int ret;
int err = 0;
@@ -2997,29 +3000,23 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
goto out_free_blocks;
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
+ /* Kick in readahead for tree blocks with missing keys */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready)
readahead_tree_block(fs_info, block->bytenr);
- rb_node = rb_next(rb_node);
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
+ /* Get first keys */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready) {
err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
- rb_node = rb_next(rb_node);
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
-
+ /* Do tree relocation */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
node = build_backref_tree(rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
@@ -3030,11 +3027,10 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
ret = relocate_tree_block(trans, rc, node, &block->key,
path);
if (ret < 0) {
- if (ret != -EAGAIN || rb_node == rb_first(blocks))
+ if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
err = ret;
goto out;
}
- rb_node = rb_next(rb_node);
}
out:
err = finish_pending_nodes(trans, rc, path, err);
@@ -4669,7 +4665,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
if (rc->merge_reloc_tree) {
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
- rc->nodes_relocated, 1);
+ rc->nodes_relocated, true);
if (ret)
return ret;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 3be1456b5116..902819d3cf41 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1124,7 +1124,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
if (scrub_write_page_to_dev_replace(sblock_other,
page_num) != 0) {
- btrfs_dev_replace_stats_inc(
+ atomic64_inc(
&fs_info->dev_replace.num_write_errors);
success = 0;
}
@@ -1564,8 +1564,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (btrfsic_submit_bio_wait(bio)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_write_errors);
+ atomic64_inc(&fs_info->dev_replace.num_write_errors);
bio_put(bio);
return -EIO;
}
@@ -1592,8 +1591,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
ret = scrub_write_page_to_dev_replace(sblock, page_num);
if (ret)
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_write_errors);
+ atomic64_inc(&fs_info->dev_replace.num_write_errors);
}
}
@@ -1726,8 +1724,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
struct scrub_page *spage = sbio->pagev[i];
spage->io_error = 1;
- btrfs_dev_replace_stats_inc(&dev_replace->
- num_write_errors);
+ atomic64_inc(&dev_replace->num_write_errors);
}
}
@@ -3022,8 +3019,7 @@ out:
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
- int num, u64 base, u64 length,
- int is_dev_replace)
+ int num, u64 base, u64 length)
{
struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -3299,7 +3295,7 @@ again:
extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev;
extent_mirror_num = mirror_num;
- if (is_dev_replace)
+ if (sctx->is_dev_replace)
scrub_remap_extent(fs_info, extent_logical,
extent_len, &extent_physical,
&extent_dev,
@@ -3397,8 +3393,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
u64 chunk_offset, u64 length,
u64 dev_offset,
- struct btrfs_block_group_cache *cache,
- int is_dev_replace)
+ struct btrfs_block_group_cache *cache)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
@@ -3435,8 +3430,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
ret = scrub_stripe(sctx, map, scrub_dev, i,
- chunk_offset, length,
- is_dev_replace);
+ chunk_offset, length);
if (ret)
goto out;
}
@@ -3449,8 +3443,7 @@ out:
static noinline_for_stack
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
- struct btrfs_device *scrub_dev, u64 start, u64 end,
- int is_dev_replace)
+ struct btrfs_device *scrub_dev, u64 start, u64 end)
{
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
@@ -3544,7 +3537,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
scrub_pause_on(fs_info);
ret = btrfs_inc_block_group_ro(cache);
- if (!ret && is_dev_replace) {
+ if (!ret && sctx->is_dev_replace) {
/*
* If we are doing a device replace wait for any tasks
* that started dellaloc right before we set the block
@@ -3609,7 +3602,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->item_needs_writeback = 1;
btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
- found_key.offset, cache, is_dev_replace);
+ found_key.offset, cache);
/*
* flush, submit all pending read and write bios, afterwards
@@ -3670,7 +3663,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_put_block_group(cache);
if (ret)
break;
- if (is_dev_replace &&
+ if (sctx->is_dev_replace &&
atomic64_read(&dev_replace->num_write_errors) > 0) {
ret = -EIO;
break;
@@ -3893,8 +3886,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
}
if (!ret)
- ret = scrub_enumerate_chunks(sctx, dev, start, end,
- is_dev_replace);
+ ret = scrub_enumerate_chunks(sctx, dev, start, end);
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index ba8950bfd9c7..094cc1444a90 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1186,9 +1186,9 @@ static int __clone_root_cmp_bsearch(const void *key, const void *elt)
u64 root = (u64)(uintptr_t)key;
struct clone_root *cr = (struct clone_root *)elt;
- if (root < cr->root->objectid)
+ if (root < cr->root->root_key.objectid)
return -1;
- if (root > cr->root->objectid)
+ if (root > cr->root->root_key.objectid)
return 1;
return 0;
}
@@ -1198,9 +1198,9 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2)
struct clone_root *cr1 = (struct clone_root *)e1;
struct clone_root *cr2 = (struct clone_root *)e2;
- if (cr1->root->objectid < cr2->root->objectid)
+ if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
return -1;
- if (cr1->root->objectid > cr2->root->objectid)
+ if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
return 1;
return 0;
}
@@ -1693,12 +1693,8 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
di = btrfs_lookup_dir_item(NULL, root, path,
dir, name, name_len, 0);
- if (!di) {
- ret = -ENOENT;
- goto out;
- }
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
@@ -2346,7 +2342,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
return -ENOMEM;
}
- key.objectid = send_root->objectid;
+ key.objectid = send_root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = 0;
@@ -2362,7 +2358,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type != BTRFS_ROOT_BACKREF_KEY ||
- key.objectid != send_root->objectid) {
+ key.objectid != send_root->root_key.objectid) {
ret = -ENOENT;
goto out;
}
@@ -4907,8 +4903,8 @@ static int send_clone(struct send_ctx *sctx,
btrfs_debug(sctx->send_root->fs_info,
"send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
- offset, len, clone_root->root->objectid, clone_root->ino,
- clone_root->offset);
+ offset, len, clone_root->root->root_key.objectid,
+ clone_root->ino, clone_root->offset);
p = fs_path_alloc();
if (!p)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6601c9aa5e35..b362b45dd757 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2177,8 +2177,10 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
- buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32;
- buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid;
+ buf->f_fsid.val[0] ^=
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
+ buf->f_fsid.val[1] ^=
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid;
return 0;
}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index d9269a531a4d..9e0f4a01be14 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -106,7 +106,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
start = 0;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("should have found at least one delalloc");
@@ -137,7 +137,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("couldn't find delalloc in our range");
@@ -171,7 +171,7 @@ static int test_find_delalloc(u32 sectorsize)
}
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (found) {
test_err("found range when we shouldn't have");
@@ -192,7 +192,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("didn't find our range");
@@ -233,7 +233,7 @@ static int test_find_delalloc(u32 sectorsize)
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("didn't find our range");
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 385a5316e4bf..bf15d3a7f20e 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -12,8 +12,8 @@ static void free_extent_map_tree(struct extent_map_tree *em_tree)
struct extent_map *em;
struct rb_node *node;
- while (!RB_EMPTY_ROOT(&em_tree->map)) {
- node = rb_first(&em_tree->map);
+ while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
+ node = rb_first_cached(&em_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
remove_extent_mapping(em_tree, em);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b84f5015029..d1eeef9ec5da 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -44,7 +44,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
- WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
+ WARN_ON(!RB_EMPTY_ROOT(
+ &transaction->delayed_refs.href_root.rb_root));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
@@ -118,7 +119,7 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans)
list_del_init(&root->dirty_list);
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
- if (is_fstree(root->objectid))
+ if (is_fstree(root->root_key.objectid))
btrfs_unpin_free_ino(root);
clear_btree_io_tree(&root->dirty_log_pages);
}
@@ -245,7 +246,7 @@ loop:
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
- cur_trans->delayed_refs.href_root = RB_ROOT;
+ cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
@@ -759,7 +760,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- if (btrfs_check_space_for_delayed_refs(trans, fs_info))
+ if (btrfs_check_space_for_delayed_refs(trans))
return 1;
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
@@ -834,7 +835,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
- btrfs_should_throttle_delayed_refs(trans, info);
+ btrfs_should_throttle_delayed_refs(trans);
cur = max_t(unsigned long, cur, 32);
/*
@@ -1197,7 +1198,10 @@ again:
list_add_tail(&fs_info->extent_root->dirty_list,
&trans->transaction->switch_commits);
- btrfs_after_dev_replace_commit(fs_info);
+
+ /* Update dev-replace pointer once everything is committed */
+ fs_info->dev_replace.committed_cursor_left =
+ fs_info->dev_replace.cursor_left_last_write_of_item;
return 0;
}
@@ -1613,10 +1617,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret < 0)
goto fail;
- ret = btrfs_insert_dir_item(trans, parent_root,
- dentry->d_name.name, dentry->d_name.len,
- BTRFS_I(parent_inode), &key,
- BTRFS_FT_DIR, index);
+ ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
+ dentry->d_name.len, BTRFS_I(parent_inode),
+ &key, BTRFS_FT_DIR, index);
/* We have check then name at the beginning, so it is impossible. */
BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
@@ -1929,6 +1932,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
+ btrfs_trans_release_metadata(trans);
+ trans->block_rsv = NULL;
+
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
@@ -1938,9 +1944,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
- btrfs_trans_release_metadata(trans);
- trans->block_rsv = NULL;
-
cur_trans = trans->transaction;
/*
@@ -2280,15 +2283,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- /*
- * If fs has been frozen, we can not handle delayed iputs, otherwise
- * it'll result in deadlock about SB_FREEZE_FS.
- */
- if (current != fs_info->transaction_kthread &&
- current != fs_info->cleaner_kthread &&
- !test_bit(BTRFS_FS_FROZEN, &fs_info->flags))
- btrfs_run_delayed_iputs(fs_info);
-
return ret;
scrub_continue:
@@ -2330,7 +2324,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
+ btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
btrfs_kill_all_delayed_nodes(root);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index db835635372f..cab0b1f1f741 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -487,6 +487,13 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
u32 nritems = btrfs_header_nritems(leaf);
int slot;
+ if (btrfs_header_level(leaf) != 0) {
+ generic_err(fs_info, leaf, 0,
+ "invalid level for leaf, have %d expect 0",
+ btrfs_header_level(leaf));
+ return -EUCLEAN;
+ }
+
/*
* Extent buffers from a relocation tree have a owner field that
* corresponds to the subvolume tree they are based on. So just from an
@@ -645,9 +652,16 @@ int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
unsigned long nr = btrfs_header_nritems(node);
struct btrfs_key key, next_key;
int slot;
+ int level = btrfs_header_level(node);
u64 bytenr;
int ret = 0;
+ if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ generic_err(fs_info, node, 0,
+ "invalid level for node, have %d expect [1, %d]",
+ level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
btrfs_crit(fs_info,
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 3c2ae0e4f25a..e07f3376b7df 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -205,14 +205,11 @@ static int join_running_log_trans(struct btrfs_root *root)
* until you call btrfs_end_log_trans() or it makes any future
* log transactions wait until you call btrfs_end_log_trans()
*/
-int btrfs_pin_log_trans(struct btrfs_root *root)
+void btrfs_pin_log_trans(struct btrfs_root *root)
{
- int ret = -ENOENT;
-
mutex_lock(&root->log_mutex);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
- return ret;
}
/*
@@ -258,6 +255,13 @@ struct walk_control {
/* what stage of the replay code we're currently in */
int stage;
+ /*
+ * Ignore any items from the inode currently being processed. Needs
+ * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
+ * the LOG_WALK_REPLAY_INODES stage.
+ */
+ bool ignore_cur_inode;
+
/* the root we are currently replaying */
struct btrfs_root *replay_dest;
@@ -2487,6 +2491,20 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
inode_item = btrfs_item_ptr(eb, i,
struct btrfs_inode_item);
+ /*
+ * If we have a tmpfile (O_TMPFILE) that got fsync'ed
+ * and never got linked before the fsync, skip it, as
+ * replaying it is pointless since it would be deleted
+ * later. We skip logging tmpfiles, but it's always
+ * possible we are replaying a log created with a kernel
+ * that used to log tmpfiles.
+ */
+ if (btrfs_inode_nlink(eb, inode_item) == 0) {
+ wc->ignore_cur_inode = true;
+ continue;
+ } else {
+ wc->ignore_cur_inode = false;
+ }
ret = replay_xattr_deletes(wc->trans, root, log,
path, key.objectid);
if (ret)
@@ -2524,16 +2542,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
root->fs_info->sectorsize);
ret = btrfs_drop_extents(wc->trans, root, inode,
from, (u64)-1, 1);
- /*
- * If the nlink count is zero here, the iput
- * will free the inode. We bump it to make
- * sure it doesn't get freed until the link
- * count fixup is done.
- */
if (!ret) {
- if (inode->i_nlink == 0)
- inc_nlink(inode);
- /* Update link count and nbytes. */
+ /* Update the inode's nbytes. */
ret = btrfs_update_inode(wc->trans,
root, inode);
}
@@ -2548,6 +2558,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
break;
}
+ if (wc->ignore_cur_inode)
+ continue;
+
if (key.type == BTRFS_DIR_INDEX_KEY &&
wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
ret = replay_one_dir_item(wc->trans, root, path,
@@ -3196,9 +3209,12 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
};
ret = walk_log_tree(trans, log, &wc);
- /* I don't think this can happen but just in case */
- if (ret)
- btrfs_abort_transaction(trans, ret);
+ if (ret) {
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(log->fs_info, ret, NULL);
+ }
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -4374,7 +4390,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&extents);
- down_write(&inode->dio_sem);
write_lock(&tree->lock);
test_gen = root->fs_info->last_trans_committed;
logged_start = start;
@@ -4440,7 +4455,6 @@ process:
}
WARN_ON(!list_empty(&extents));
write_unlock(&tree->lock);
- up_write(&inode->dio_sem);
btrfs_release_path(path);
if (!ret)
@@ -4636,7 +4650,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
ASSERT(len == i_size ||
(len == fs_info->sectorsize &&
btrfs_file_extent_compression(leaf, extent) !=
- BTRFS_COMPRESS_NONE));
+ BTRFS_COMPRESS_NONE) ||
+ (len < i_size && i_size < fs_info->sectorsize));
return 0;
}
@@ -5564,9 +5579,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL);
- /* If parent inode was deleted, skip it. */
- if (IS_ERR(dir_inode))
- continue;
+ /*
+ * If the parent inode was deleted, return an error to
+ * fallback to a transaction commit. This is to prevent
+ * getting an inode that was moved from one parent A to
+ * a parent B, got its former parent A deleted and then
+ * it got fsync'ed, from existing at both parents after
+ * a log replay (and the old parent still existing).
+ * Example:
+ *
+ * mkdir /mnt/A
+ * mkdir /mnt/B
+ * touch /mnt/B/bar
+ * sync
+ * mv /mnt/B/bar /mnt/A/bar
+ * mv -T /mnt/A /mnt/B
+ * fsync /mnt/B/bar
+ * <power fail>
+ *
+ * If we ignore the old parent B which got deleted,
+ * after a log replay we would have file bar linked
+ * at both parents and the old parent B would still
+ * exist.
+ */
+ if (IS_ERR(dir_inode)) {
+ ret = PTR_ERR(dir_inode);
+ goto out;
+ }
if (ctx)
ctx->log_new_dentries = false;
@@ -5640,7 +5679,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (ret)
goto end_no_trans;
- if (btrfs_inode_in_log(inode, trans->transid)) {
+ /*
+ * Skip already logged inodes or inodes corresponding to tmpfiles
+ * (since logging them is pointless, a link count of 0 means they
+ * will never be accessible).
+ */
+ if (btrfs_inode_in_log(inode, trans->transid) ||
+ inode->vfs_inode.i_nlink == 0) {
ret = BTRFS_NO_LOG_SYNC;
goto end_no_trans;
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 7ab9bb88a639..767765031e59 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -65,7 +65,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *inode, u64 dirid);
void btrfs_end_log_trans(struct btrfs_root *root);
-int btrfs_pin_log_trans(struct btrfs_root *root);
+void btrfs_pin_log_trans(struct btrfs_root *root);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
int for_rename);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f4405e430da6..f435d397019e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1613,7 +1613,7 @@ static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
- n = rb_last(&em_tree->map);
+ n = rb_last(&em_tree->map.rb_root);
if (n) {
em = rb_entry(n, struct extent_map, rb_node);
ret = em->start + em->len;
@@ -1854,6 +1854,24 @@ void btrfs_assign_next_active_device(struct btrfs_device *device,
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
+/*
+ * Return btrfs_fs_devices::num_devices excluding the device that's being
+ * currently replaced.
+ */
+static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
+{
+ u64 num_devices = fs_info->fs_devices->num_devices;
+
+ btrfs_dev_replace_read_lock(&fs_info->dev_replace);
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
+ ASSERT(num_devices > 1);
+ num_devices--;
+ }
+ btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+
+ return num_devices;
+}
+
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
@@ -1865,22 +1883,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
mutex_lock(&uuid_mutex);
- num_devices = fs_devices->num_devices;
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- WARN_ON(num_devices < 1);
- num_devices--;
- }
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
- ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
- &device);
- if (ret)
+ device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
+
+ if (IS_ERR(device)) {
+ if (PTR_ERR(device) == -ENOENT &&
+ strcmp(device_path, "missing") == 0)
+ ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
+ else
+ ret = PTR_ERR(device);
goto out;
+ }
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
@@ -2096,9 +2114,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
call_rcu(&tgtdev->rcu, free_device_rcu);
}
-static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device)
+static struct btrfs_device *btrfs_find_device_by_path(
+ struct btrfs_fs_info *fs_info, const char *device_path)
{
int ret = 0;
struct btrfs_super_block *disk_super;
@@ -2106,28 +2123,27 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
u8 *dev_uuid;
struct block_device *bdev;
struct buffer_head *bh;
+ struct btrfs_device *device;
- *device = NULL;
ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
fs_info->bdev_holder, 0, &bdev, &bh);
if (ret)
- return ret;
+ return ERR_PTR(ret);
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
+ device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
brelse(bh);
- if (!*device)
- ret = -ENOENT;
+ if (!device)
+ device = ERR_PTR(-ENOENT);
blkdev_put(bdev, FMODE_READ);
- return ret;
+ return device;
}
-int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device)
+static struct btrfs_device *btrfs_find_device_missing_or_by_path(
+ struct btrfs_fs_info *fs_info, const char *device_path)
{
- *device = NULL;
+ struct btrfs_device *device = NULL;
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
struct btrfs_device *tmp;
@@ -2136,42 +2152,38 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
list_for_each_entry(tmp, devices, dev_list) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&tmp->dev_state) && !tmp->bdev) {
- *device = tmp;
+ device = tmp;
break;
}
}
- if (!*device)
- return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
-
- return 0;
+ if (!device)
+ return ERR_PTR(-ENOENT);
} else {
- return btrfs_find_device_by_path(fs_info, device_path, device);
+ device = btrfs_find_device_by_path(fs_info, device_path);
}
+
+ return device;
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
-int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
- const char *devpath,
- struct btrfs_device **device)
+struct btrfs_device *btrfs_find_device_by_devspec(
+ struct btrfs_fs_info *fs_info, u64 devid, const char *devpath)
{
- int ret;
+ struct btrfs_device *device;
if (devid) {
- ret = 0;
- *device = btrfs_find_device(fs_info, devid, NULL, NULL);
- if (!*device)
- ret = -ENOENT;
+ device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ if (!device)
+ return ERR_PTR(-ENOENT);
} else {
if (!devpath || !devpath[0])
- return -EINVAL;
-
- ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
- device);
+ return ERR_PTR(-EINVAL);
+ device = btrfs_find_device_missing_or_by_path(fs_info, devpath);
}
- return ret;
+ return device;
}
/*
@@ -3679,7 +3691,7 @@ static int alloc_profile_is_valid(u64 flags, int extended)
return !extended; /* "0" is valid for usual profiles */
/* true if exactly one bit set */
- return (flags & (flags - 1)) == 0;
+ return is_power_of_2(flags);
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
@@ -3740,13 +3752,8 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
}
}
- num_devices = fs_info->fs_devices->num_devices;
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- BUG_ON(num_devices < 1);
- num_devices--;
- }
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ num_devices = btrfs_num_devices(fs_info);
+
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
if (num_devices > 1)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
@@ -5897,7 +5904,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
out:
if (dev_replace_is_ongoing) {
- btrfs_dev_replace_clear_lock_blocking(dev_replace);
+ ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
+ btrfs_dev_replace_read_lock(dev_replace);
+ /* Barrier implied by atomic_dec_and_test */
+ if (atomic_dec_and_test(&dev_replace->blocking_readers))
+ cond_wake_up_nomb(&dev_replace->read_lock_wq);
btrfs_dev_replace_read_unlock(dev_replace);
}
free_extent_map(em);
@@ -7438,7 +7449,7 @@ static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
int ret = 0;
read_lock(&em_tree->lock);
- for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
em = rb_entry(node, struct extent_map, rb_node);
if (em->map_lookup->num_stripes !=
em->map_lookup->verified_stripes) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 23e9285d88de..aefce895e994 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -410,12 +410,9 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev);
-int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device);
-int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
- const char *devpath,
- struct btrfs_device **device);
+struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
+ u64 devid,
+ const char *devpath);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3ac9789..d60d61e8ed7d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -562,7 +562,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
EXPORT_SYMBOL(mark_buffer_dirty_inode);
/*
- * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * Mark the page dirty, and set it dirty in the page cache, and mark the inode
* dirty.
*
* If warn is true, then emit a warning if the page is not uptodate and has
@@ -579,8 +579,8 @@ void __set_page_dirty(struct page *page, struct address_space *mapping,
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->i_pages,
- page_index(page), PAGECACHE_TAG_DIRTY);
+ __xa_set_mark(&mapping->i_pages, page_index(page),
+ PAGECACHE_TAG_DIRTY);
}
xa_unlock_irqrestore(&mapping->i_pages, flags);
}
@@ -1050,7 +1050,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* The relationship between dirty buffers and dirty pages:
*
* Whenever a page has any dirty buffers, the page's dirty bit is set, and
- * the page is tagged dirty in its radix tree.
+ * the page is tagged dirty in the page cache.
*
* At all times, the dirtiness of the buffers represents the dirtiness of
* subsections of the page. If the page has buffers, the page dirty bit is
@@ -1073,9 +1073,9 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* mark_buffer_dirty - mark a buffer_head as needing writeout
* @bh: the buffer_head to mark dirty
*
- * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
- * backing page dirty, then tag the page as dirty in its address_space's radix
- * tree and then attach the address_space's inode to its superblock's dirty
+ * mark_buffer_dirty() will set the dirty bit against the buffer, then set
+ * its backing page dirty, then tag the page as dirty in the page cache
+ * and then attach the address_space's inode to its superblock's dirty
* inode list.
*
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
@@ -3060,11 +3060,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_io(wbc, bh->b_page, bh->b_size);
- }
-
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
@@ -3084,6 +3079,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_io(wbc, bh->b_page, bh->b_size);
+ }
+
submit_bio(bio);
return 0;
}
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 027408d55aee..5f0103f40079 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -104,6 +104,11 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
struct timespec64 old_ctime = inode->i_ctime;
umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
+ if (ceph_snap(inode) != CEPH_NOSNAP) {
+ ret = -EROFS;
+ goto out;
+ }
+
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
@@ -138,11 +143,6 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
goto out_free;
}
- if (ceph_snap(inode) != CEPH_NOSNAP) {
- ret = -EROFS;
- goto out_free;
- }
-
if (new_mode != old_mode) {
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
@@ -206,10 +206,9 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
tmp_buf = kmalloc(max(val_size1, val_size2), GFP_KERNEL);
if (!tmp_buf)
goto out_err;
- pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_KERNEL);
+ pagelist = ceph_pagelist_alloc(GFP_KERNEL);
if (!pagelist)
goto out_err;
- ceph_pagelist_init(pagelist);
err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
if (err)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9c332a6f6667..8eade7a993c1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -322,7 +322,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
/* caller of readpages does not hold buffer and read caps
* (fadvise, madvise and readahead cases) */
int want = CEPH_CAP_FILE_CACHE;
- ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+ ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
} else if (!(got & want)) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dd7dfdd2ba13..f3496db4bb3e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -519,9 +519,9 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
* -> we take mdsc->cap_delay_lock
*/
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
+ struct ceph_inode_info *ci,
+ bool set_timeout)
{
- __cap_set_timeouts(mdsc, ci);
dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
ci->i_ceph_flags, ci->i_hold_caps_max);
if (!mdsc->stopping) {
@@ -531,6 +531,8 @@ static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
goto no_change;
list_del_init(&ci->i_cap_delay_list);
}
+ if (set_timeout)
+ __cap_set_timeouts(mdsc, ci);
list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
no_change:
spin_unlock(&mdsc->cap_delay_lock);
@@ -720,7 +722,7 @@ void ceph_add_cap(struct inode *inode,
dout(" issued %s, mds wanted %s, actual %s, queueing\n",
ceph_cap_string(issued), ceph_cap_string(wanted),
ceph_cap_string(actual_wanted));
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
}
if (flags & CEPH_CAP_FLAG_AUTH) {
@@ -1647,7 +1649,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
(mask & CEPH_CAP_FILE_BUFFER))
dirty |= I_DIRTY_DATASYNC;
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
return dirty;
}
@@ -2065,7 +2067,7 @@ ack:
/* Reschedule delayed caps release if we delayed anything */
if (delayed)
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, false);
spin_unlock(&ci->i_ceph_lock);
@@ -2125,7 +2127,7 @@ retry:
if (delayed) {
spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
spin_unlock(&ci->i_ceph_lock);
}
} else {
@@ -2671,17 +2673,18 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
-int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
+ bool nonblock, int *got)
{
int ret, err = 0;
BUG_ON(need & ~CEPH_CAP_FILE_RD);
- BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+ BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED));
ret = ceph_pool_perm_check(ci, need);
if (ret < 0)
return ret;
- ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+ ret = try_get_cap_refs(ci, need, want, 0, nonblock, got, &err);
if (ret) {
if (err == -EAGAIN) {
ret = 0;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 5dd433aa9b23..27cad84dab23 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/striper.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -557,90 +558,26 @@ enum {
};
/*
- * Read a range of bytes striped over one or more objects. Iterate over
- * objects we stripe over. (That's not atomic, but good enough for now.)
- *
- * If we get a short result from the OSD, check against i_size; we need to
- * only return a short read to the caller if we hit EOF.
- */
-static int striped_read(struct inode *inode,
- u64 pos, u64 len,
- struct page **pages, int num_pages,
- int page_align, int *checkeof)
-{
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_inode_info *ci = ceph_inode(inode);
- u64 this_len;
- loff_t i_size;
- int page_idx;
- int ret, read = 0;
- bool hit_stripe, was_short;
-
- /*
- * we may need to do multiple reads. not atomic, unfortunately.
- */
-more:
- this_len = len;
- page_idx = (page_align + read) >> PAGE_SHIFT;
- ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
- &ci->i_layout, pos, &this_len,
- ci->i_truncate_seq, ci->i_truncate_size,
- pages + page_idx, num_pages - page_idx,
- ((page_align + read) & ~PAGE_MASK));
- if (ret == -ENOENT)
- ret = 0;
- hit_stripe = this_len < len;
- was_short = ret >= 0 && ret < this_len;
- dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
- ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
-
- i_size = i_size_read(inode);
- if (ret >= 0) {
- if (was_short && (pos + ret < i_size)) {
- int zlen = min(this_len - ret, i_size - pos - ret);
- int zoff = page_align + read + ret;
- dout(" zero gap %llu to %llu\n",
- pos + ret, pos + ret + zlen);
- ceph_zero_page_vector_range(zoff, zlen, pages);
- ret += zlen;
- }
-
- read += ret;
- pos += ret;
- len -= ret;
-
- /* hit stripe and need continue*/
- if (len && hit_stripe && pos < i_size)
- goto more;
- }
-
- if (read > 0) {
- ret = read;
- /* did we bounce off eof? */
- if (pos + len > i_size)
- *checkeof = CHECK_EOF;
- }
-
- dout("striped_read returns %d\n", ret);
- return ret;
-}
-
-/*
* Completely synchronous read and write methods. Direct from __user
* buffer to osd, or directly to user pages (if O_DIRECT).
*
- * If the read spans object boundary, just do multiple reads.
+ * If the read spans object boundary, just do multiple reads. (That's not
+ * atomic, but good enough for now.)
+ *
+ * If we get a short result from the OSD, check against i_size; we need to
+ * only return a short read to the caller if we hit EOF.
*/
static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
- int *checkeof)
+ int *retry_op)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct page **pages;
- u64 off = iocb->ki_pos;
- int num_pages;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
- size_t len = iov_iter_count(to);
+ u64 off = iocb->ki_pos;
+ u64 len = iov_iter_count(to);
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
@@ -653,61 +590,118 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
* but it will at least behave sensibly when they are
* in sequence.
*/
- ret = filemap_write_and_wait_range(inode->i_mapping, off,
- off + len);
+ ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len);
if (ret < 0)
return ret;
- if (unlikely(iov_iter_is_pipe(to))) {
+ ret = 0;
+ while ((len = iov_iter_count(to)) > 0) {
+ struct ceph_osd_request *req;
+ struct page **pages;
+ int num_pages;
size_t page_off;
- ret = iov_iter_get_pages_alloc(to, &pages, len,
- &page_off);
- if (ret <= 0)
- return -ENOMEM;
- num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
+ u64 i_size;
+ bool more;
+
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, off, &len, 0, 1,
+ CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ break;
+ }
+
+ more = len < iov_iter_count(to);
- ret = striped_read(inode, off, ret, pages, num_pages,
- page_off, checkeof);
- if (ret > 0) {
- iov_iter_advance(to, ret);
- off += ret;
+ if (unlikely(iov_iter_is_pipe(to))) {
+ ret = iov_iter_get_pages_alloc(to, &pages, len,
+ &page_off);
+ if (ret <= 0) {
+ ceph_osdc_put_request(req);
+ ret = -ENOMEM;
+ break;
+ }
+ num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
+ if (ret < len) {
+ len = ret;
+ osd_req_op_extent_update(req, 0, len);
+ more = false;
+ }
} else {
- iov_iter_advance(to, 0);
+ num_pages = calc_pages_for(off, len);
+ page_off = off & ~PAGE_MASK;
+ pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ ceph_osdc_put_request(req);
+ ret = PTR_ERR(pages);
+ break;
+ }
}
- ceph_put_page_vector(pages, num_pages, false);
- } else {
- num_pages = calc_pages_for(off, len);
- pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- ret = striped_read(inode, off, len, pages, num_pages,
- (off & ~PAGE_MASK), checkeof);
- if (ret > 0) {
- int l, k = 0;
- size_t left = ret;
-
- while (left) {
- size_t page_off = off & ~PAGE_MASK;
- size_t copy = min_t(size_t, left,
- PAGE_SIZE - page_off);
- l = copy_page_to_iter(pages[k++], page_off,
- copy, to);
- off += l;
- left -= l;
- if (l < copy)
+
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
+ false, false);
+ ret = ceph_osdc_start_request(osdc, req, false);
+ if (!ret)
+ ret = ceph_osdc_wait_request(osdc, req);
+ ceph_osdc_put_request(req);
+
+ i_size = i_size_read(inode);
+ dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
+ off, len, ret, i_size, (more ? " MORE" : ""));
+
+ if (ret == -ENOENT)
+ ret = 0;
+ if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ int zlen = min(len - ret, i_size - off - ret);
+ int zoff = page_off + ret;
+ dout("sync_read zero gap %llu~%llu\n",
+ off + ret, off + ret + zlen);
+ ceph_zero_page_vector_range(zoff, zlen, pages);
+ ret += zlen;
+ }
+
+ if (unlikely(iov_iter_is_pipe(to))) {
+ if (ret > 0) {
+ iov_iter_advance(to, ret);
+ off += ret;
+ } else {
+ iov_iter_advance(to, 0);
+ }
+ ceph_put_page_vector(pages, num_pages, false);
+ } else {
+ int idx = 0;
+ size_t left = ret > 0 ? ret : 0;
+ while (left > 0) {
+ size_t len, copied;
+ page_off = off & ~PAGE_MASK;
+ len = min_t(size_t, left, PAGE_SIZE - page_off);
+ copied = copy_page_to_iter(pages[idx++],
+ page_off, len, to);
+ off += copied;
+ left -= copied;
+ if (copied < len) {
+ ret = -EFAULT;
break;
+ }
}
+ ceph_release_page_vector(pages, num_pages);
}
- ceph_release_page_vector(pages, num_pages);
+
+ if (ret <= 0 || off >= i_size || !more)
+ break;
}
if (off > iocb->ki_pos) {
+ if (ret >= 0 &&
+ iov_iter_count(to) > 0 && off >= i_size_read(inode))
+ *retry_op = CHECK_EOF;
ret = off - iocb->ki_pos;
iocb->ki_pos = off;
}
- dout("sync_read result %zd\n", ret);
+ dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
@@ -865,7 +859,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
}
spin_unlock(&ci->i_ceph_lock);
- req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
+ req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
false, GFP_NOFS);
if (!req) {
ret = -ENOMEM;
@@ -877,6 +871,11 @@ static void ceph_aio_retry_work(struct work_struct *work)
ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
+ req->r_ops[0] = orig_req->r_ops[0];
+
+ req->r_mtime = aio_req->mtime;
+ req->r_data_offset = req->r_ops[0].extent.offset;
+
ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
if (ret) {
ceph_osdc_put_request(req);
@@ -884,11 +883,6 @@ static void ceph_aio_retry_work(struct work_struct *work)
goto out;
}
- req->r_ops[0] = orig_req->r_ops[0];
-
- req->r_mtime = aio_req->mtime;
- req->r_data_offset = req->r_ops[0].extent.offset;
-
ceph_osdc_put_request(orig_req);
req->r_callback = ceph_aio_complete_req;
@@ -1734,7 +1728,6 @@ static long ceph_fallocate(struct file *file, int mode,
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
int want, got = 0;
int dirty;
@@ -1742,10 +1735,7 @@ static long ceph_fallocate(struct file *file, int mode,
loff_t endoff = 0;
loff_t size;
- if ((offset + length) > max(i_size_read(inode), fsc->max_file_size))
- return -EFBIG;
-
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (!S_ISREG(inode->i_mode))
@@ -1762,18 +1752,6 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
- if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
- ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
- ret = -EDQUOT;
- goto unlock;
- }
-
- if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL) &&
- !(mode & FALLOC_FL_PUNCH_HOLE)) {
- ret = -ENOSPC;
- goto unlock;
- }
-
if (ci->i_inline_version != CEPH_INLINE_NONE) {
ret = ceph_uninline_data(file, NULL);
if (ret < 0)
@@ -1781,12 +1759,12 @@ static long ceph_fallocate(struct file *file, int mode,
}
size = i_size_read(inode);
- if (!(mode & FALLOC_FL_KEEP_SIZE)) {
- endoff = offset + length;
- ret = inode_newsize_ok(inode, endoff);
- if (ret)
- goto unlock;
- }
+
+ /* Are we punching a hole beyond EOF? */
+ if (offset >= size)
+ goto unlock;
+ if ((offset + length) > size)
+ length = size - offset;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
@@ -1797,16 +1775,8 @@ static long ceph_fallocate(struct file *file, int mode,
if (ret < 0)
goto unlock;
- if (mode & FALLOC_FL_PUNCH_HOLE) {
- if (offset < size)
- ceph_zero_pagecache_range(inode, offset, length);
- ret = ceph_zero_objects(inode, offset, length);
- } else if (endoff > size) {
- truncate_pagecache_range(inode, size, -1);
- if (ceph_inode_set_size(inode, endoff))
- ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_AUTHONLY, NULL);
- }
+ ceph_zero_pagecache_range(inode, offset, length);
+ ret = ceph_zero_objects(inode, offset, length);
if (!ret) {
spin_lock(&ci->i_ceph_lock);
@@ -1816,9 +1786,6 @@ static long ceph_fallocate(struct file *file, int mode,
spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
- if ((endoff > size) &&
- ceph_quota_is_max_bytes_approaching(inode, endoff))
- ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
}
ceph_put_cap_refs(ci, got);
@@ -1828,6 +1795,300 @@ unlock:
return ret;
}
+/*
+ * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
+ * src_ci. Two attempts are made to obtain both caps, and an error is return if
+ * this fails; zero is returned on success.
+ */
+static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
+ loff_t src_endoff, int *src_got,
+ struct ceph_inode_info *dst_ci,
+ loff_t dst_endoff, int *dst_got)
+{
+ int ret = 0;
+ bool retrying = false;
+
+retry_caps:
+ ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
+ dst_endoff, dst_got, NULL);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Since we're already holding the FILE_WR capability for the dst file,
+ * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
+ * retry dance instead to try to get both capabilities.
+ */
+ ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
+ false, src_got);
+ if (ret <= 0) {
+ /* Start by dropping dst_ci caps and getting src_ci caps */
+ ceph_put_cap_refs(dst_ci, *dst_got);
+ if (retrying) {
+ if (!ret)
+ /* ceph_try_get_caps masks EAGAIN */
+ ret = -EAGAIN;
+ return ret;
+ }
+ ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
+ CEPH_CAP_FILE_SHARED, src_endoff,
+ src_got, NULL);
+ if (ret < 0)
+ return ret;
+ /*... drop src_ci caps too, and retry */
+ ceph_put_cap_refs(src_ci, *src_got);
+ retrying = true;
+ goto retry_caps;
+ }
+ return ret;
+}
+
+static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
+ struct ceph_inode_info *dst_ci, int dst_got)
+{
+ ceph_put_cap_refs(src_ci, src_got);
+ ceph_put_cap_refs(dst_ci, dst_got);
+}
+
+/*
+ * This function does several size-related checks, returning an error if:
+ * - source file is smaller than off+len
+ * - destination file size is not OK (inode_newsize_ok())
+ * - max bytes quotas is exceeded
+ */
+static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
+ loff_t src_off, loff_t dst_off, size_t len)
+{
+ loff_t size, endoff;
+
+ size = i_size_read(src_inode);
+ /*
+ * Don't copy beyond source file EOF. Instead of simply setting length
+ * to (size - src_off), just drop to VFS default implementation, as the
+ * local i_size may be stale due to other clients writing to the source
+ * inode.
+ */
+ if (src_off + len > size) {
+ dout("Copy beyond EOF (%llu + %zu > %llu)\n",
+ src_off, len, size);
+ return -EOPNOTSUPP;
+ }
+ size = i_size_read(dst_inode);
+
+ endoff = dst_off + len;
+ if (inode_newsize_ok(dst_inode, endoff))
+ return -EOPNOTSUPP;
+
+ if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
+ return -EDQUOT;
+
+ return 0;
+}
+
+static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off,
+ size_t len, unsigned int flags)
+{
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *dst_inode = file_inode(dst_file);
+ struct ceph_inode_info *src_ci = ceph_inode(src_inode);
+ struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
+ struct ceph_cap_flush *prealloc_cf;
+ struct ceph_object_locator src_oloc, dst_oloc;
+ struct ceph_object_id src_oid, dst_oid;
+ loff_t endoff = 0, size;
+ ssize_t ret = -EIO;
+ u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
+ u32 src_objlen, dst_objlen, object_size;
+ int src_got = 0, dst_got = 0, err, dirty;
+ bool do_final_copy = false;
+
+ if (src_inode == dst_inode)
+ return -EINVAL;
+ if (ceph_snap(dst_inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ /*
+ * Some of the checks below will return -EOPNOTSUPP, which will force a
+ * fallback to the default VFS copy_file_range implementation. This is
+ * desirable in several cases (for ex, the 'len' is smaller than the
+ * size of the objects, or in cases where that would be more
+ * efficient).
+ */
+
+ if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
+ return -EOPNOTSUPP;
+
+ if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
+ (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
+ (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
+ return -EOPNOTSUPP;
+
+ if (len < src_ci->i_layout.object_size)
+ return -EOPNOTSUPP; /* no remote copy will be done */
+
+ prealloc_cf = ceph_alloc_cap_flush();
+ if (!prealloc_cf)
+ return -ENOMEM;
+
+ /* Start by sync'ing the source file */
+ ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
+ * clients may have dirty data in their caches. And OSDs know nothing
+ * about caps, so they can't safely do the remote object copies.
+ */
+ err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
+ dst_ci, (dst_off + len), &dst_got);
+ if (err < 0) {
+ dout("get_rd_wr_caps returned %d\n", err);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
+ if (ret < 0)
+ goto out_caps;
+
+ size = i_size_read(dst_inode);
+ endoff = dst_off + len;
+
+ /* Drop dst file cached pages */
+ ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
+ dst_off >> PAGE_SHIFT,
+ endoff >> PAGE_SHIFT);
+ if (ret < 0) {
+ dout("Failed to invalidate inode pages (%zd)\n", ret);
+ ret = 0; /* XXX */
+ }
+ src_oloc.pool = src_ci->i_layout.pool_id;
+ src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
+ dst_oloc.pool = dst_ci->i_layout.pool_id;
+ dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
+
+ ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
+ src_ci->i_layout.object_size,
+ &src_objnum, &src_objoff, &src_objlen);
+ ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
+ dst_ci->i_layout.object_size,
+ &dst_objnum, &dst_objoff, &dst_objlen);
+ /* object-level offsets need to the same */
+ if (src_objoff != dst_objoff) {
+ ret = -EOPNOTSUPP;
+ goto out_caps;
+ }
+
+ /*
+ * Do a manual copy if the object offset isn't object aligned.
+ * 'src_objlen' contains the bytes left until the end of the object,
+ * starting at the src_off
+ */
+ if (src_objoff) {
+ /*
+ * we need to temporarily drop all caps as we'll be calling
+ * {read,write}_iter, which will get caps again.
+ */
+ put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
+ ret = do_splice_direct(src_file, &src_off, dst_file,
+ &dst_off, src_objlen, flags);
+ if (ret < 0) {
+ dout("do_splice_direct returned %d\n", err);
+ goto out;
+ }
+ len -= ret;
+ err = get_rd_wr_caps(src_ci, (src_off + len),
+ &src_got, dst_ci,
+ (dst_off + len), &dst_got);
+ if (err < 0)
+ goto out;
+ err = is_file_size_ok(src_inode, dst_inode,
+ src_off, dst_off, len);
+ if (err < 0)
+ goto out_caps;
+ }
+ object_size = src_ci->i_layout.object_size;
+ while (len >= object_size) {
+ ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
+ object_size, &src_objnum,
+ &src_objoff, &src_objlen);
+ ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
+ object_size, &dst_objnum,
+ &dst_objoff, &dst_objlen);
+ ceph_oid_init(&src_oid);
+ ceph_oid_printf(&src_oid, "%llx.%08llx",
+ src_ci->i_vino.ino, src_objnum);
+ ceph_oid_init(&dst_oid);
+ ceph_oid_printf(&dst_oid, "%llx.%08llx",
+ dst_ci->i_vino.ino, dst_objnum);
+ /* Do an object remote copy */
+ err = ceph_osdc_copy_from(
+ &ceph_inode_to_client(src_inode)->client->osdc,
+ src_ci->i_vino.snap, 0,
+ &src_oid, &src_oloc,
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
+ &dst_oid, &dst_oloc,
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
+ if (err) {
+ dout("ceph_osdc_copy_from returned %d\n", err);
+ if (!ret)
+ ret = err;
+ goto out_caps;
+ }
+ len -= object_size;
+ src_off += object_size;
+ dst_off += object_size;
+ ret += object_size;
+ }
+
+ if (len)
+ /* We still need one final local copy */
+ do_final_copy = true;
+
+ file_update_time(dst_file);
+ if (endoff > size) {
+ int caps_flags = 0;
+
+ /* Let the MDS know about dst file size change */
+ if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
+ caps_flags |= CHECK_CAPS_NODELAY;
+ if (ceph_inode_set_size(dst_inode, endoff))
+ caps_flags |= CHECK_CAPS_AUTHONLY;
+ if (caps_flags)
+ ceph_check_caps(dst_ci, caps_flags, NULL);
+ }
+ /* Mark Fw dirty */
+ spin_lock(&dst_ci->i_ceph_lock);
+ dst_ci->i_inline_version = CEPH_INLINE_NONE;
+ dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
+ spin_unlock(&dst_ci->i_ceph_lock);
+ if (dirty)
+ __mark_inode_dirty(dst_inode, dirty);
+
+out_caps:
+ put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
+
+ if (do_final_copy) {
+ err = do_splice_direct(src_file, &src_off, dst_file,
+ &dst_off, len, flags);
+ if (err < 0) {
+ dout("do_splice_direct returned %d\n", err);
+ goto out;
+ }
+ len -= err;
+ ret += err;
+ }
+
+out:
+ ceph_free_cap_flush(prealloc_cf);
+
+ return ret;
+}
+
const struct file_operations ceph_file_fops = {
.open = ceph_open,
.release = ceph_release,
@@ -1843,5 +2104,5 @@ const struct file_operations ceph_file_fops = {
.unlocked_ioctl = ceph_ioctl,
.compat_ioctl = ceph_ioctl,
.fallocate = ceph_fallocate,
+ .copy_file_range = ceph_copy_file_range,
};
-
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ebc7bdaed2d0..79dd5e6ed755 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1132,8 +1132,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
- dput(dn);
- dn = realdn; /* note realdn contains the error */
+ dn = realdn;
+ /*
+ * Caller should release 'dn' in the case of error.
+ * If 'req->r_dentry' is passed to this function,
+ * caller should leave 'req->r_dentry' untouched.
+ */
goto out;
} else if (realdn) {
dout("dn %p (%d) spliced with %p (%d) "
@@ -1196,7 +1200,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
WARN_ON_ONCE(1);
}
- if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
+ if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
+ test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
+ !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
struct qstr dname;
struct dentry *dn, *parent;
@@ -1677,7 +1683,6 @@ retry_lookup:
if (IS_ERR(realdn)) {
err = PTR_ERR(realdn);
d_drop(dn);
- dn = NULL;
goto next_item;
}
dn = realdn;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index bc43c822426a..67a9aeb2f4ec 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2071,7 +2071,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_old_dentry_drop)
len += req->r_old_dentry->d_name.len;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
+ msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
msg = ERR_PTR(-ENOMEM);
goto out_free2;
@@ -2136,7 +2136,6 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_pagelist) {
struct ceph_pagelist *pagelist = req->r_pagelist;
- refcount_inc(&pagelist->refcnt);
ceph_msg_data_add_pagelist(msg, pagelist);
msg->hdr.data_len = cpu_to_le32(pagelist->length);
} else {
@@ -3126,12 +3125,11 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
pr_info("mds%d reconnect start\n", mds);
- pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
goto fail_nopagelist;
- ceph_pagelist_init(pagelist);
- reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
+ reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
if (!reply)
goto fail_nomsg;
@@ -3241,6 +3239,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
mutex_unlock(&mdsc->mutex);
up_read(&mdsc->snap_rwsem);
+ ceph_pagelist_release(pagelist);
return;
fail:
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index eab1359d0553..b5ecd6f50360 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -165,6 +165,8 @@ enum {
Opt_noacl,
Opt_quotadf,
Opt_noquotadf,
+ Opt_copyfrom,
+ Opt_nocopyfrom,
};
static match_table_t fsopt_tokens = {
@@ -203,6 +205,8 @@ static match_table_t fsopt_tokens = {
{Opt_noacl, "noacl"},
{Opt_quotadf, "quotadf"},
{Opt_noquotadf, "noquotadf"},
+ {Opt_copyfrom, "copyfrom"},
+ {Opt_nocopyfrom, "nocopyfrom"},
{-1, NULL}
};
@@ -355,6 +359,12 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_noquotadf:
fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
break;
+ case Opt_copyfrom:
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
+ break;
+ case Opt_nocopyfrom:
+ fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
+ break;
#ifdef CONFIG_CEPH_FS_POSIX_ACL
case Opt_acl:
fsopt->sb_flags |= SB_POSIXACL;
@@ -553,6 +563,9 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
+ if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
+ seq_puts(m, ",nocopyfrom");
+
if (fsopt->mds_namespace)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 582e28fd1b7b..c005a5400f2e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -40,6 +40,7 @@
#define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */
#define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */
#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
+#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE
@@ -1008,7 +1009,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
extern int ceph_try_get_caps(struct ceph_inode_info *ci,
- int need, int want, int *got);
+ int need, int want, bool nonblock, int *got);
/* for counting open files by mode */
extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 5cc8b94f8206..316f6ad10644 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -951,11 +951,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
if (size > 0) {
/* copy value into pagelist */
- pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
- ceph_pagelist_init(pagelist);
err = ceph_pagelist_append(pagelist, value, size);
if (err)
goto out;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f1fbea947fef..3e812428ac8d 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -132,7 +132,7 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
- seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+ seq_printf(m, "\tSpeed: %zu bps\n", iface->speed);
seq_puts(m, "\t\tCapabilities: ");
if (iface->rdma_capable)
seq_puts(m, "rdma ");
@@ -285,7 +285,7 @@ skip_rdma:
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
(ses->serverNOS == NULL)) {
- seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d\t",
+ seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
i, ses->serverName, ses->ses_count,
ses->capabilities, ses->status);
if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
@@ -296,16 +296,18 @@ skip_rdma:
seq_printf(m,
"\n%d) Name: %s Domain: %s Uses: %d OS:"
" %s\n\tNOS: %s\tCapability: 0x%x\n\tSMB"
- " session status: %d\t",
+ " session status: %d ",
i, ses->serverName, ses->serverDomain,
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
if (server->rdma)
seq_printf(m, "RDMA\n\t");
- seq_printf(m, "TCP status: %d\n\tLocal Users To "
+ seq_printf(m, "TCP status: %d Instance: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
- server->tcpStatus, server->srv_count,
+ server->tcpStatus,
+ server->reconnect_instance,
+ server->srv_count,
server->sec_mode, in_flight(server));
#ifdef CONFIG_CIFS_STATS2
@@ -352,7 +354,7 @@ skip_rdma:
seq_printf(m, "\n\tServer interfaces: %zu\n",
ses->iface_count);
for (j = 0; j < ses->iface_count; j++) {
- seq_printf(m, "\t%d)\n", j);
+ seq_printf(m, "\t%d)", j);
cifs_dump_iface(m, &ses->iface_list[j]);
}
spin_unlock(&ses->iface_lock);
@@ -383,6 +385,9 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
+ atomic_set(&tcpSesReconnectCount, 0);
+ atomic_set(&tconInfoReconnectCount, 0);
+
spin_lock(&GlobalMid_Lock);
GlobalMaxActiveXid = 0;
GlobalCurrentXid = 0;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index f4f3f0853c6e..631dc1bb21c1 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -47,6 +47,29 @@ extern int cifsFYI;
*/
#ifdef CONFIG_CIFS_DEBUG
+
+/*
+ * When adding tracepoints and debug messages we have various choices.
+ * Some considerations:
+ *
+ * Use cifs_dbg(VFS, ...) for things we always want logged, and the user to see
+ * cifs_info(...) slightly less important, admin can filter via loglevel > 6
+ * cifs_dbg(FYI, ...) minor debugging messages, off by default
+ * trace_smb3_* ftrace functions are preferred for complex debug messages
+ * intended for developers or experienced admins, off by default
+ */
+
+/* Information level messages, minor events */
+#define cifs_info_func(ratefunc, fmt, ...) \
+do { \
+ pr_info_ ## ratefunc("CIFS: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define cifs_info(fmt, ...) \
+do { \
+ cifs_info_func(ratelimited, fmt, ##__VA_ARGS__); \
+} while (0)
+
/* information message: e.g., configuration, major event */
#define cifs_dbg_func(ratefunc, type, fmt, ...) \
do { \
@@ -81,6 +104,11 @@ do { \
if (0) \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+
+#define cifs_info(fmt, ...) \
+do { \
+ pr_info("CIFS: "fmt, ##__VA_ARGS__); \
+} while (0)
#endif
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 6b61df117fd4..b97c74efd04a 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -304,12 +304,17 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
*/
mnt = ERR_PTR(-ENOMEM);
+ cifs_sb = CIFS_SB(mntpt->d_sb);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
+ mnt = ERR_PTR(-EREMOTE);
+ goto cdda_exit;
+ }
+
/* always use tree name prefix */
full_path = build_path_from_dentry_optional_prefix(mntpt, true);
if (full_path == NULL)
goto cdda_exit;
- cifs_sb = CIFS_SB(mntpt->d_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
mnt = ERR_CAST(tlink);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9731d0d891e7..63d7530f2e1d 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -51,6 +51,7 @@
*/
#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
#define CIFS_MOUNT_NO_HANDLE_CACHE 0x4000000 /* disable caching dir handles */
+#define CIFS_MOUNT_NO_DFS 0x8000000 /* disable DFS resolving */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 57ff0756e30c..d8bce2f862de 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -43,8 +43,19 @@ struct smb_snapshot_array {
/* snapshots[]; */
} __packed;
+struct smb_query_info {
+ __u32 info_type;
+ __u32 file_info_class;
+ __u32 additional_information;
+ __u32 flags;
+ __u32 input_buffer_length;
+ __u32 output_buffer_length;
+ /* char buffer[]; */
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
+#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 7065426b3280..7de9603c54f1 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -81,6 +81,14 @@ module_param(cifs_max_pending, uint, 0444);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
"CIFS/SMB1 dialect (N/A for SMB3) "
"Default: 32767 Range: 2 to 32767.");
+#ifdef CONFIG_CIFS_STATS2
+unsigned int slow_rsp_threshold = 1;
+module_param(slow_rsp_threshold, uint, 0644);
+MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
+ "before logging that a response is delayed. "
+ "Default: 1 (if set to 0 disables msg).");
+#endif /* STATS2 */
+
module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
@@ -492,6 +500,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",unix");
else
seq_puts(s, ",nounix");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ seq_puts(s, ",nodfs");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
@@ -707,7 +717,14 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_mnt_data mnt_data;
struct dentry *root;
- cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
+ /*
+ * Prints in Kernel / CIFS log the attempted mount operation
+ * If CIFS_DEBUG && cifs_FYI
+ */
+ if (cifsFYI)
+ cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
+ else
+ cifs_info("Attempting to mount %s\n", dev_name);
volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
if (IS_ERR(volume_info))
@@ -1418,6 +1435,11 @@ init_cifs(void)
#ifdef CONFIG_CIFS_STATS2
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
+ if (slow_rsp_threshold < 1)
+ cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
+ else if (slow_rsp_threshold > 32767)
+ cifs_dbg(VFS,
+ "slow response threshold set higher than recommended (0 to 32767)\n");
#endif /* CONFIG_CIFS_STATS2 */
atomic_set(&midCount, 0);
@@ -1538,11 +1560,11 @@ exit_cifs(void)
cifs_proc_clean();
}
-MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
+MODULE_AUTHOR("Steve French");
MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
MODULE_DESCRIPTION
- ("VFS to access servers complying with the SNIA CIFS Specification "
- "e.g. Samba and Windows");
+ ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
+ "also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
MODULE_SOFTDEP("pre: arc4");
MODULE_SOFTDEP("pre: des");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index f047e87871a1..24e265a51874 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -148,5 +148,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.13"
+#define CIFS_VERSION "2.14"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 9dcaed031843..ed1e0fcb69e3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -33,6 +33,7 @@
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define SMB_PATH_MAX 260
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -465,6 +466,11 @@ struct smb_version_operations {
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
int (*next_header)(char *);
+ /* ioctl passthrough for query_info */
+ int (*ioctl_query_info)(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ __le16 *path, int is_dir,
+ unsigned long p);
};
struct smb_version_values {
@@ -654,6 +660,7 @@ struct TCP_Server_Info {
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* for signing, protected by srv_mutex */
+ __u32 reconnect_instance; /* incremented on each reconnect */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
@@ -798,6 +805,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
* a single wsize request with a single call.
*/
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
@@ -924,6 +932,8 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head rlist; /* reconnect list */
+ atomic_t num_local_opens; /* num of all opens including disconnected */
+ atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList;
spinlock_t open_file_lock; /* protects list above */
struct cifs_ses *ses; /* pointer to session associated with */
@@ -1072,7 +1082,8 @@ struct cifsLockInfo {
__u64 offset;
__u64 length;
__u32 pid;
- __u32 type;
+ __u16 type;
+ __u16 flags;
};
/*
@@ -1715,6 +1726,7 @@ GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
#ifdef CONFIG_CIFS_STATS2
GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+extern unsigned int slow_rsp_threshold; /* number of secs before logging */
#endif
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 20adda4de83b..fa361bc00602 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -219,7 +219,7 @@ extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
- __u64 length, __u8 type,
+ __u64 length, __u8 type, __u16 flags,
struct cifsLockInfo **conf_lock,
int rw_check);
extern void cifs_add_pending_open(struct cifs_fid *fid,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5657b79dbc99..f82fd342bca5 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1607,6 +1607,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
+ .rq_offset = rdata->page_offset,
.rq_npages = rdata->nr_pages,
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
@@ -2210,6 +2211,7 @@ cifs_async_writev(struct cifs_writedata *wdata,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
rqst.rq_pages = wdata->pages;
+ rqst.rq_offset = wdata->page_offset;
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
@@ -5027,6 +5029,13 @@ oldQFSInfoRetry:
le16_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
@@ -5107,6 +5116,13 @@ QFSInfoRetry:
le32_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le64_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
@@ -5470,6 +5486,13 @@ QFSPosixRetry:
data_offset);
FSData->f_bsize =
le32_to_cpu(response_data->BlockSize);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le64_to_cpu(response_data->TotalBlocks);
FSData->f_bfree =
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 11bcd2fb90b1..6f24f129a751 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -250,6 +250,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_ignore, "dev" },
{ Opt_ignore, "mand" },
{ Opt_ignore, "nomand" },
+ { Opt_ignore, "relatime" },
{ Opt_ignore, "_netdev" },
{ Opt_err, NULL }
@@ -347,7 +348,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->maxBuf = 0;
server->max_read = 0;
- cifs_dbg(FYI, "Reconnecting tcp session\n");
+ cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
trace_smb3_reconnect(server->CurrentMid, server->hostname);
/* before reconnecting the tcp session, mark the smb session (uid)
@@ -2396,6 +2397,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
+ tcp_ses->reconnect_instance = 0;
tcp_ses->lstrp = jiffies;
spin_lock_init(&tcp_ses->req_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
@@ -3085,10 +3087,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
if (rc)
goto out_fail;
- if (volume_info->nodfs) {
- tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
- cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags);
- }
tcon->use_persistent = false;
/* check if SMB2 or later, CIFS does not support persistent handles */
if (volume_info->persistent) {
@@ -3663,6 +3661,8 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->actimeo = pvolume_info->actimeo;
cifs_sb->local_nls = pvolume_info->local_nls;
+ if (pvolume_info->nodfs)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS;
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
if (pvolume_info->setuids)
@@ -3819,6 +3819,9 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
struct dfs_info3_param *referrals = NULL;
char *full_path = NULL, *ref_path = NULL, *mdata = NULL;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ return -EREMOTE;
+
full_path = build_unc_path_to_root(volume_info, cifs_sb);
if (IS_ERR(full_path))
return PTR_ERR(full_path);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index dcdbcb6f09f8..e262a05a98bf 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -334,6 +334,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
+ atomic_inc(&tcon->num_local_opens);
/* if readable file instance put first in list*/
if (file->f_mode & FMODE_READ)
@@ -395,6 +396,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
/* remove it from the lists */
list_del(&cifs_file->flist);
list_del(&cifs_file->tlist);
+ atomic_dec(&tcon->num_local_opens);
if (list_empty(&cifsi->openFileList)) {
cifs_dbg(FYI, "closing last open instance for inode %p\n",
@@ -864,7 +866,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
}
static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
{
struct cifsLockInfo *lock =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -874,6 +876,7 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type)
lock->length = length;
lock->type = type;
lock->pid = current->tgid;
+ lock->flags = flags;
INIT_LIST_HEAD(&lock->blist);
init_waitqueue_head(&lock->block_q);
return lock;
@@ -896,7 +899,8 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
/* @rw_check : 0 - no op, 1 - read, 2 - write */
static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
- __u64 length, __u8 type, struct cifsFileInfo *cfile,
+ __u64 length, __u8 type, __u16 flags,
+ struct cifsFileInfo *cfile,
struct cifsLockInfo **conf_lock, int rw_check)
{
struct cifsLockInfo *li;
@@ -918,6 +922,10 @@ cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
((server->ops->compare_fids(cfile, cur_cfile) &&
current->tgid == li->pid) || type == li->type))
continue;
+ if (rw_check == CIFS_LOCK_OP &&
+ (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
+ server->ops->compare_fids(cfile, cur_cfile))
+ continue;
if (conf_lock)
*conf_lock = li;
return true;
@@ -927,8 +935,8 @@ cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
bool
cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
- __u8 type, struct cifsLockInfo **conf_lock,
- int rw_check)
+ __u8 type, __u16 flags,
+ struct cifsLockInfo **conf_lock, int rw_check)
{
bool rc = false;
struct cifs_fid_locks *cur;
@@ -936,7 +944,8 @@ cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
list_for_each_entry(cur, &cinode->llist, llist) {
rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
- cfile, conf_lock, rw_check);
+ flags, cfile, conf_lock,
+ rw_check);
if (rc)
break;
}
@@ -964,7 +973,8 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
down_read(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, offset, length, type,
- &conf_lock, CIFS_LOCK_OP);
+ flock->fl_flags, &conf_lock,
+ CIFS_LOCK_OP);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
@@ -1011,7 +1021,8 @@ try_again:
down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
- lock->type, &conf_lock, CIFS_LOCK_OP);
+ lock->type, lock->flags, &conf_lock,
+ CIFS_LOCK_OP);
if (!exist && cinode->can_cache_brlcks) {
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
@@ -1321,7 +1332,7 @@ cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
cifs_dbg(FYI, "Lease on file - not implemented yet\n");
if (flock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP |
- FL_ACCESS | FL_LEASE | FL_CLOSE)))
+ FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
*type = server->vals->large_lock_type;
@@ -1584,7 +1595,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
if (lock) {
struct cifsLockInfo *lock;
- lock = cifs_lock_init(flock->fl_start, length, type);
+ lock = cifs_lock_init(flock->fl_start, length, type,
+ flock->fl_flags);
if (!lock)
return -ENOMEM;
@@ -1653,7 +1665,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
-
cifs_sb = CIFS_FILE_SB(file);
netfid = cfile->fid.netfid;
cinode = CIFS_I(file_inode(file));
@@ -2098,6 +2109,7 @@ static int cifs_writepages(struct address_space *mapping,
pgoff_t end, index;
struct cifs_writedata *wdata;
int rc = 0;
+ unsigned int xid;
/*
* If wsize is smaller than the page cache size, default to writing
@@ -2106,6 +2118,7 @@ static int cifs_writepages(struct address_space *mapping,
if (cifs_sb->wsize < PAGE_SIZE)
return generic_writepages(mapping, wbc);
+ xid = get_xid();
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -2199,6 +2212,7 @@ retry:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
+ free_xid(xid);
return rc;
}
@@ -2817,8 +2831,8 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
goto out;
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
- server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP))
+ server->vals->exclusive_lock_type, 0,
+ NULL, CIFS_WRITE_OP))
rc = __generic_file_write_iter(iocb, from);
else
rc = -EACCES;
@@ -3388,7 +3402,7 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
down_read(&cinode->lock_sem);
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
tcon->ses->server->vals->shared_lock_type,
- NULL, CIFS_READ_OP))
+ 0, NULL, CIFS_READ_OP))
rc = generic_file_read_iter(iocb, to);
up_read(&cinode->lock_sem);
return rc;
@@ -3743,7 +3757,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct TCP_Server_Info *server;
pid_t pid;
+ unsigned int xid;
+ xid = get_xid();
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
@@ -3753,8 +3769,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
*/
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages);
- if (rc == 0)
+ if (rc == 0) {
+ free_xid(xid);
return rc;
+ }
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -3798,6 +3816,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
*/
if (unlikely(rsize < PAGE_SIZE)) {
add_credits_and_wake_if(server, credits, 0);
+ free_xid(xid);
return 0;
}
@@ -3862,6 +3881,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* allocator.
*/
cifs_fscache_readpages_cancel(mapping->host, page_list);
+ free_xid(xid);
return rc;
}
@@ -3889,8 +3909,12 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
else
cifs_dbg(FYI, "Bytes read %d\n", rc);
- file_inode(file)->i_atime =
- current_time(file_inode(file));
+ /* we do not want atime to be less than mtime, it broke some apps */
+ file_inode(file)->i_atime = current_time(file_inode(file));
+ if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
+ file_inode(file)->i_atime = file_inode(file)->i_mtime;
+ else
+ file_inode(file)->i_atime = current_time(file_inode(file));
if (PAGE_SIZE > rc)
memset(read_data + rc, 0, PAGE_SIZE - rc);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6e8765f44508..1023d78673fb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -162,7 +162,11 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_revalidate_cache(inode, fattr);
spin_lock(&inode->i_lock);
- inode->i_atime = fattr->cf_atime;
+ /* we do not want atime to be less than mtime, it broke some apps */
+ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime))
+ inode->i_atime = fattr->cf_mtime;
+ else
+ inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
@@ -777,38 +781,53 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if (rc == -EACCES && backup_cred(cifs_sb)) {
- srchinf = kzalloc(sizeof(struct cifs_search_info),
- GFP_KERNEL);
- if (srchinf == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
- }
+ } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
+ (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
+ == 0)) {
+ /*
+ * For SMB2 and later the backup intent flag is already
+ * sent if needed on open and there is no path based
+ * FindFirst operation to use to retry with
+ */
- srchinf->endOfSearch = false;
+ srchinf = kzalloc(sizeof(struct cifs_search_info),
+ GFP_KERNEL);
+ if (srchinf == NULL) {
+ rc = -ENOMEM;
+ goto cgii_exit;
+ }
+
+ srchinf->endOfSearch = false;
+ if (tcon->unix_ext)
+ srchinf->info_level = SMB_FIND_FILE_UNIX;
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0)
+ srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ else /* no srvino useful for fallback to some netapp */
+ srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
- CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_BACKUP_SEARCH;
+ srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
+ CIFS_SEARCH_CLOSE_AT_END |
+ CIFS_SEARCH_BACKUP_SEARCH;
- rc = CIFSFindFirst(xid, tcon, full_path,
- cifs_sb, NULL, srchflgs, srchinf, false);
- if (!rc) {
- data =
- (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ rc = CIFSFindFirst(xid, tcon, full_path,
+ cifs_sb, NULL, srchflgs, srchinf, false);
+ if (!rc) {
+ data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
- cifs_dir_info_to_fattr(&fattr,
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
- fattr.cf_uniqueid = le64_to_cpu(
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
- validinum = true;
+ cifs_dir_info_to_fattr(&fattr,
+ (FILE_DIRECTORY_INFO *)data, cifs_sb);
+ fattr.cf_uniqueid = le64_to_cpu(
+ ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+ validinum = true;
- cifs_buf_release(srchinf->ntwrk_buf_start);
- }
- kfree(srchinf);
- if (rc)
- goto cgii_exit;
+ cifs_buf_release(srchinf->ntwrk_buf_start);
+ }
+ kfree(srchinf);
+ if (rc)
+ goto cgii_exit;
} else
goto cgii_exit;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 54f32f9143a9..76ddd98b6298 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -32,8 +32,51 @@
#include "cifs_debug.h"
#include "cifsfs.h"
#include "cifs_ioctl.h"
+#include "smb2proto.h"
#include <linux/btrfs.h>
+static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
+ unsigned long p)
+{
+ struct inode *inode = file_inode(filep);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct dentry *dentry = filep->f_path.dentry;
+ unsigned char *path;
+ __le16 *utf16_path = NULL, root_path;
+ int rc = 0;
+
+ path = build_path_from_dentry(dentry);
+ if (path == NULL)
+ return -ENOMEM;
+
+ cifs_dbg(FYI, "%s %s\n", __func__, path);
+
+ if (!path[0]) {
+ root_path = 0;
+ utf16_path = &root_path;
+ } else {
+ utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
+ if (!utf16_path) {
+ rc = -ENOMEM;
+ goto ici_exit;
+ }
+ }
+
+ if (tcon->ses->server->ops->ioctl_query_info)
+ rc = tcon->ses->server->ops->ioctl_query_info(
+ xid, tcon, utf16_path,
+ filep->private_data ? 0 : 1, p);
+ else
+ rc = -EOPNOTSUPP;
+
+ ici_exit:
+ if (utf16_path != &root_path)
+ kfree(utf16_path);
+ kfree(path);
+ return rc;
+}
+
static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
unsigned long srcfd)
{
@@ -123,7 +166,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
struct inode *inode = file_inode(filep);
int rc = -ENOTTY; /* strange error - but the precedent */
unsigned int xid;
- struct cifs_sb_info *cifs_sb;
struct cifsFileInfo *pSMBFile = filep->private_data;
struct cifs_tcon *tcon;
__u64 ExtAttrBits = 0;
@@ -131,7 +173,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
xid = get_xid();
- cifs_sb = CIFS_SB(inode->i_sb);
cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
switch (command) {
case FS_IOC_GETFLAGS:
@@ -196,6 +237,9 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
case CIFS_IOC_COPYCHUNK_FILE:
rc = cifs_ioctl_copychunk(xid, filep, arg);
break;
+ case CIFS_QUERY_INFO:
+ rc = cifs_ioctl_query_info(xid, filep, arg);
+ break;
case CIFS_IOC_SET_INTEGRITY:
if (pSMBFile == NULL)
break;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 10cff44832d8..8a41f4eba726 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -123,6 +123,8 @@ tconInfoAlloc(void)
ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
GFP_KERNEL);
spin_lock_init(&ret_buf->stat_lock);
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
}
return ret_buf;
}
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index 0ffa18094335..dd10f0ce4cd5 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -33,7 +33,7 @@
/*
* Identifiers for functions that use the open, operation, close pattern
- * in smb2inode.c:smb2_open_op_close()
+ * in smb2inode.c:smb2_compound_op()
*/
#define SMB2_OP_SET_DELETE 1
#define SMB2_OP_SET_INFO 2
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1eef1791d0c4..9e7ef7ec2d70 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -38,54 +38,83 @@
#include "smb2proto.h"
static int
-smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- __u32 desired_access, __u32 create_disposition,
- __u32 create_options, void *data, int command)
+smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ __u32 desired_access, __u32 create_disposition,
+ __u32 create_options, void *ptr, int command)
{
- int rc, tmprc = 0;
+ int rc;
__le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
- bool use_cached_root_handle = false;
-
- if ((strcmp(full_path, "") == 0) && (create_options == 0) &&
- (desired_access == FILE_READ_ATTRIBUTES) &&
- (create_disposition == FILE_OPEN) &&
- (tcon->nohandlecache == false)) {
- rc = open_shroot(xid, tcon, &fid);
- if (rc == 0)
- use_cached_root_handle = true;
- }
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = ses->server;
+ int num_rqst = 0;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ int flags = 0;
+ __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
+ unsigned int size[2];
+ void *data[2];
+ struct smb2_file_rename_info rename_info;
+ struct smb2_file_link_info link_info;
+ int len;
- if (use_cached_root_handle == false) {
- utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
- if (!utf16_path)
- return -ENOMEM;
-
- oparms.tcon = tcon;
- oparms.desired_access = desired_access;
- oparms.disposition = create_disposition;
- oparms.create_options = create_options;
- oparms.fid = &fid;
- oparms.reconnect = false;
-
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
- NULL);
- if (rc) {
- kfree(utf16_path);
- return rc;
- }
- }
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ /* Open */
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = desired_access;
+ oparms.disposition = create_disposition;
+ oparms.create_options = create_options;
+ if (backup_cred(cifs_sb))
+ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[num_rqst].rq_iov = open_iov;
+ rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ rc = SMB2_open_init(tcon, &rqst[num_rqst], &oplock, &oparms,
+ utf16_path);
+ kfree(utf16_path);
+ if (rc)
+ goto finished;
+
+ smb2_set_next_command(server, &rqst[num_rqst++]);
+
+ /* Operation */
switch (command) {
- case SMB2_OP_DELETE:
- break;
case SMB2_OP_QUERY_INFO:
- tmprc = SMB2_query_info(xid, tcon, fid.persistent_fid,
- fid.volatile_fid,
- (struct smb2_file_all_info *)data);
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[num_rqst].rq_iov = qi_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, FILE_ALL_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_DELETE:
break;
case SMB2_OP_MKDIR:
/*
@@ -94,39 +123,156 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
*/
break;
case SMB2_OP_RMDIR:
- tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
- fid.volatile_fid);
- break;
- case SMB2_OP_RENAME:
- tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, (__le16 *)data);
- break;
- case SMB2_OP_HARDLINK:
- tmprc = SMB2_set_hardlink(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, (__le16 *)data);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ size[0] = 8;
+ data[0] = &delete_pending[0];
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_DISPOSITION_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_EOF:
- tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, current->tgid,
- (__le64 *)data, false);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ size[0] = 8; /* sizeof __le64 */
+ data[0] = ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_INFO:
- tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid,
- fid.volatile_fid,
- (FILE_BASIC_INFO *)data);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+
+ size[0] = sizeof(FILE_BASIC_INFO);
+ data[0] = ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_BASIC_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_RENAME:
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 2;
+
+ len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+
+ rename_info.ReplaceIfExists = 1;
+ rename_info.RootDirectory = 0;
+ rename_info.FileNameLength = cpu_to_le32(len);
+
+ size[0] = sizeof(struct smb2_file_rename_info);
+ data[0] = &rename_info;
+
+ size[1] = len + 2 /* null */;
+ data[1] = (__le16 *)ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_RENAME_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_HARDLINK:
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 2;
+
+ len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+
+ link_info.ReplaceIfExists = 0;
+ link_info.RootDirectory = 0;
+ link_info.FileNameLength = cpu_to_le32(len);
+
+ size[0] = sizeof(struct smb2_file_link_info);
+ data[0] = &link_info;
+
+ size[1] = len + 2 /* null */;
+ data[1] = (__le16 *)ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_LINK_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
default:
cifs_dbg(VFS, "Invalid command\n");
- break;
+ rc = -EINVAL;
}
+ if (rc)
+ goto finished;
- if (use_cached_root_handle)
- close_shroot(&tcon->crfid);
- else
- rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- if (tmprc)
- rc = tmprc;
- kfree(utf16_path);
+ /* Close */
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[num_rqst].rq_iov = close_iov;
+ rqst[num_rqst].rq_nvec = 1;
+ rc = SMB2_close_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID);
+ smb2_set_related(&rqst[num_rqst++]);
+ if (rc)
+ goto finished;
+
+ rc = compound_send_recv(xid, ses, flags, num_rqst, rqst,
+ resp_buftype, rsp_iov);
+
+ finished:
+ SMB2_open_free(&rqst[0]);
+ switch (command) {
+ case SMB2_OP_QUERY_INFO:
+ if (rc == 0) {
+ qi_rsp = (struct smb2_query_info_rsp *)
+ rsp_iov[1].iov_base;
+ rc = smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ le32_to_cpu(qi_rsp->OutputBufferLength),
+ &rsp_iov[1], sizeof(struct smb2_file_all_info),
+ ptr);
+ }
+ if (rqst[1].rq_iov)
+ SMB2_query_info_free(&rqst[1]);
+ if (rqst[2].rq_iov)
+ SMB2_close_free(&rqst[2]);
+ break;
+ case SMB2_OP_DELETE:
+ case SMB2_OP_MKDIR:
+ if (rqst[1].rq_iov)
+ SMB2_close_free(&rqst[1]);
+ break;
+ case SMB2_OP_HARDLINK:
+ case SMB2_OP_RENAME:
+ case SMB2_OP_RMDIR:
+ case SMB2_OP_SET_EOF:
+ case SMB2_OP_SET_INFO:
+ if (rqst[1].rq_iov)
+ SMB2_set_info_free(&rqst[1]);
+ if (rqst[2].rq_iov)
+ SMB2_close_free(&rqst[2]);
+ break;
+ }
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
return rc;
}
@@ -147,6 +293,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
{
int rc;
struct smb2_file_all_info *smb2_data;
+ __u32 create_options = 0;
*adjust_tz = false;
*symlink = false;
@@ -155,17 +302,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
+ if (backup_cred(cifs_sb))
+ create_options |= CREATE_OPEN_BACKUP_INTENT;
- rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, 0,
- smb2_data, SMB2_OP_QUERY_INFO);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
+ smb2_data, SMB2_OP_QUERY_INFO);
if (rc == -EOPNOTSUPP) {
*symlink = true;
+ create_options |= OPEN_REPARSE_POINT;
+
/* Failed on a symbolic link - query a reparse point info */
- rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- OPEN_REPARSE_POINT, smb2_data,
- SMB2_OP_QUERY_INFO);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_READ_ATTRIBUTES, FILE_OPEN,
+ create_options, smb2_data,
+ SMB2_OP_QUERY_INFO);
}
if (rc)
goto out;
@@ -180,9 +331,9 @@ int
smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
+ return smb2_compound_op(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+ CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
}
void
@@ -199,9 +350,9 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
+ tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+ CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
@@ -210,18 +361,18 @@ int
smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_NOT_FILE,
- NULL, SMB2_OP_RMDIR);
+ return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+ CREATE_NOT_FILE,
+ NULL, SMB2_OP_RMDIR);
}
int
smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- NULL, SMB2_OP_DELETE);
+ return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+ CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+ NULL, SMB2_OP_DELETE);
}
static int
@@ -238,8 +389,8 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
goto smb2_rename_path;
}
- rc = smb2_open_op_close(xid, tcon, cifs_sb, from_name, access,
- FILE_OPEN, 0, smb2_to_name, command);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
+ FILE_OPEN, 0, smb2_to_name, command);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
@@ -269,9 +420,10 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc)
{
__le64 eof = cpu_to_le64(size);
- return smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
- SMB2_OP_SET_EOF);
+
+ return smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
+ SMB2_OP_SET_EOF);
}
int
@@ -291,9 +443,9 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
- FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
- SMB2_OP_SET_INFO);
+ rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
+ SMB2_OP_SET_INFO);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 20a2d304c603..d47b7f5dfa6c 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -288,7 +288,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"},
{STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"},
{STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"},
- {STATUS_NOT_IMPLEMENTED, -ENOSYS, "STATUS_NOT_IMPLEMENTED"},
+ {STATUS_NOT_IMPLEMENTED, -EOPNOTSUPP, "STATUS_NOT_IMPLEMENTED"},
{STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"},
{STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"},
{STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 26f8a65b8722..23c0a21a66f8 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -74,6 +74,12 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
int *val, rc = 0;
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
+
+ /* eg found case where write overlapping reconnect messed up credits */
+ if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
+ trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
+ server->hostname, *val);
+
*val += add;
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
@@ -104,7 +110,12 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
{
spin_lock(&server->req_lock);
server->credits = val;
+ if (val == 1)
+ server->reconnect_instance++;
spin_unlock(&server->req_lock);
+ /* don't log while holding the lock */
+ if (val == 1)
+ cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
}
static int *
@@ -270,6 +281,31 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
}
static unsigned int
+smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int wsize;
+
+ /* start with specified wsize, or default */
+ wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
+ wsize = min_t(unsigned int, wsize, server->max_write);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma) {
+ if (server->sign)
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_fragmented_send_size);
+ else
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_readwrite_size);
+ }
+#endif
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+
+ return wsize;
+}
+
+static unsigned int
smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
{
struct TCP_Server_Info *server = tcon->ses->server;
@@ -295,6 +331,31 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return rsize;
}
+static unsigned int
+smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int rsize;
+
+ /* start with specified rsize, or default */
+ rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
+ rsize = min_t(unsigned int, rsize, server->max_read);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma) {
+ if (server->sign)
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_fragmented_recv_size);
+ else
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_readwrite_size);
+ }
+#endif
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+
+ return rsize;
+}
static int
parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
@@ -962,6 +1023,9 @@ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
(long long)(tcon->bytes_read),
(long long)(tcon->bytes_written));
+ seq_printf(m, "\nOpen files: %d total (local), %d open on server",
+ atomic_read(&tcon->num_local_opens),
+ atomic_read(&tcon->num_remote_opens));
seq_printf(m, "\nTreeConnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
@@ -1057,6 +1121,131 @@ req_res_key_exit:
return rc;
}
+static int
+smb2_ioctl_query_info(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ __le16 *path, int is_dir,
+ unsigned long p)
+{
+ struct cifs_ses *ses = tcon->ses;
+ char __user *arg = (char __user *)p;
+ struct smb_query_info qi;
+ struct smb_query_info __user *pqi;
+ int rc = 0;
+ int flags = 0;
+ struct smb2_query_info_rsp *rsp = NULL;
+ void *buffer = NULL;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct cifs_open_parms oparms;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_fid fid;
+ struct kvec qi_iov[1];
+ struct kvec close_iov[1];
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
+ return -EFAULT;
+
+ if (qi.output_buffer_length > 1024)
+ return -EINVAL;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
+ if (buffer == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
+ qi.output_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+
+ /* Open */
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ memset(&oparms, 0, sizeof(oparms));
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
+ oparms.disposition = FILE_OPEN;
+ if (is_dir)
+ oparms.create_options = CREATE_NOT_FILE;
+ else
+ oparms.create_options = CREATE_NOT_DIR;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_next_command(ses->server, &rqst[0]);
+
+ /* Query */
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ qi.file_info_class, qi.info_type,
+ qi.additional_information,
+ qi.input_buffer_length,
+ qi.output_buffer_length, buffer);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_next_command(ses->server, &rqst[1]);
+ smb2_set_related(&rqst[1]);
+
+ /* Close */
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[2].rq_iov = close_iov;
+ rqst[2].rq_nvec = 1;
+
+ rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_related(&rqst[2]);
+
+ rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto iqinf_exit;
+ pqi = (struct smb_query_info __user *)arg;
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(rsp->OutputBufferLength) < qi.input_buffer_length)
+ qi.input_buffer_length = le32_to_cpu(rsp->OutputBufferLength);
+ if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+ if (copy_to_user(pqi + 1, rsp->Buffer, qi.input_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+
+ iqinf_exit:
+ kfree(buffer);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ SMB2_close_free(&rqst[2]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ return rc;
+}
+
static ssize_t
smb2_copychunk_range(const unsigned int xid,
struct cifsFileInfo *srcfile,
@@ -1301,7 +1490,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
}
return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, cfile->pid, &eof, false);
+ cfile->fid.volatile_fid, cfile->pid, &eof);
}
static int
@@ -1556,7 +1745,7 @@ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
CIFS_CACHE_READ(cinode) ? 1 : 0);
}
-static void
+void
smb2_set_related(struct smb_rqst *rqst)
{
struct smb2_sync_hdr *shdr;
@@ -1567,7 +1756,7 @@ smb2_set_related(struct smb_rqst *rqst)
char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
-static void
+void
smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
struct smb2_sync_hdr *shdr;
@@ -1610,7 +1799,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
flags |= CIFS_TRANSFORM_REQ;
memset(rqst, 0, sizeof(rqst));
- memset(resp_buftype, 0, sizeof(resp_buftype));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
memset(&open_iov, 0, sizeof(open_iov));
@@ -1636,7 +1825,8 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
FS_FULL_SIZE_INFORMATION,
SMB2_O_INFO_FILESYSTEM, 0,
- sizeof(struct smb2_fs_full_size_info));
+ sizeof(struct smb2_fs_full_size_info), 0,
+ NULL);
if (rc)
goto qfs_exit;
smb2_set_next_command(server, &rqst[1]);
@@ -3303,6 +3493,7 @@ struct smb_version_operations smb20_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb21_operations = {
@@ -3398,6 +3589,7 @@ struct smb_version_operations smb21_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb30_operations = {
@@ -3425,8 +3617,8 @@ struct smb_version_operations smb30_operations = {
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
- .negotiate_wsize = smb2_negotiate_wsize,
- .negotiate_rsize = smb2_negotiate_rsize,
+ .negotiate_wsize = smb3_negotiate_wsize,
+ .negotiate_rsize = smb3_negotiate_rsize,
.sess_setup = SMB2_sess_setup,
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
@@ -3502,6 +3694,7 @@ struct smb_version_operations smb30_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb311_operations = {
@@ -3529,8 +3722,8 @@ struct smb_version_operations smb311_operations = {
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
- .negotiate_wsize = smb2_negotiate_wsize,
- .negotiate_rsize = smb2_negotiate_rsize,
+ .negotiate_wsize = smb3_negotiate_wsize,
+ .negotiate_rsize = smb3_negotiate_rsize,
.sess_setup = SMB2_sess_setup,
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
@@ -3607,6 +3800,7 @@ struct smb_version_operations smb311_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f54d07bda067..7d7b016fe8bb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1478,7 +1478,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
tcon->tid = 0;
-
+ atomic_set(&tcon->num_remote_opens, 0);
rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
&total_len);
if (rc) {
@@ -2243,10 +2243,12 @@ SMB2_open_free(struct smb_rqst *rqst)
{
int i;
- cifs_small_buf_release(rqst->rq_iov[0].iov_base);
- for (i = 1; i < rqst->rq_nvec; i++)
- if (rqst->rq_iov[i].iov_base != smb2_padding)
- kfree(rqst->rq_iov[i].iov_base);
+ if (rqst && rqst->rq_iov) {
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
+ }
}
int
@@ -2261,7 +2263,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
struct cifs_ses *ses = tcon->ses;
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
@@ -2303,6 +2305,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
ses->Suid, oparms->create_options,
oparms->desired_access);
+ atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
@@ -2474,13 +2477,13 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
goto ioctl_exit;
}
- *out_data = kmalloc(*plen, GFP_KERNEL);
+ *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
+ *plen, GFP_KERNEL);
if (*out_data == NULL) {
rc = -ENOMEM;
goto ioctl_exit;
}
- memcpy(*out_data, (char *)rsp + le32_to_cpu(rsp->OutputOffset), *plen);
ioctl_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -2535,7 +2538,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
void
SMB2_close_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
int
@@ -2547,7 +2551,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
cifs_dbg(FYI, "Close\n");
@@ -2577,6 +2581,8 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
goto close_exit;
}
+ atomic_dec(&tcon->num_remote_opens);
+
/* BB FIXME - decode close response, update inode for caching */
close_exit:
@@ -2627,10 +2633,10 @@ smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
* If SMB buffer fields are valid, copy into temporary buffer to hold result.
* Caller must free buffer.
*/
-static int
-validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
- struct kvec *iov, unsigned int minbufsize,
- char *data)
+int
+smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int minbufsize,
+ char *data)
{
char *begin_of_buf = offset + (char *)iov->iov_base;
int rc;
@@ -2651,7 +2657,7 @@ int
SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type, u32 additional_info,
- size_t output_len)
+ size_t output_len, size_t input_len, void *input)
{
struct smb2_query_info_req *req;
struct kvec *iov = rqst->rq_iov;
@@ -2669,23 +2675,25 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /*
- * We do not use the input buffer (do not send extra byte)
- */
- req->InputBufferOffset = 0;
-
req->OutputBufferLength = cpu_to_le32(output_len);
+ if (input_len) {
+ req->InputBufferLength = cpu_to_le32(input_len);
+ /* total_len for smb query request never close to le16 max */
+ req->InputBufferOffset = cpu_to_le16(total_len - 1);
+ memcpy(req->Buffer, input, input_len);
+ }
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
- iov[0].iov_len = total_len - 1;
+ iov[0].iov_len = total_len - 1 + input_len;
return 0;
}
void
SMB2_query_info_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
@@ -2699,7 +2707,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[1];
struct kvec rsp_iov;
int rc = 0;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
@@ -2718,7 +2726,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
info_class, info_type, additional_info,
- output_len);
+ output_len, 0, NULL);
if (rc)
goto qinf_exit;
@@ -2746,9 +2754,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
}
}
- rc = validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength),
- &rsp_iov, min_len, *data);
+ rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength),
+ &rsp_iov, min_len, *data);
qinf_exit:
SMB2_query_info_free(&rqst);
@@ -3754,45 +3762,22 @@ qdir_exit:
return rc;
}
-static int
-send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+int
+SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
- u8 info_type, u32 additional_info, unsigned int num,
+ u8 info_type, u32 additional_info,
void **data, unsigned int *size)
{
- struct smb_rqst rqst;
struct smb2_set_info_req *req;
- struct smb2_set_info_rsp *rsp = NULL;
- struct kvec *iov;
- struct kvec rsp_iov;
- int rc = 0;
- int resp_buftype;
- unsigned int i;
- struct cifs_ses *ses = tcon->ses;
- int flags = 0;
- unsigned int total_len;
-
- if (!ses || !(ses->server))
- return -EIO;
-
- if (!num)
- return -EINVAL;
-
- iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
- if (!iov)
- return -ENOMEM;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int i, total_len;
+ int rc;
rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
- if (rc) {
- kfree(iov);
+ if (rc)
return rc;
- }
-
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
req->sync_hdr.ProcessId = cpu_to_le32(pid);
-
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
@@ -3810,19 +3795,66 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- for (i = 1; i < num; i++) {
+ for (i = 1; i < rqst->rq_nvec; i++) {
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
+ return 0;
+}
+
+void
+SMB2_set_info_free(struct smb_rqst *rqst)
+{
+ if (rqst && rqst->rq_iov)
+ cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+static int
+send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
+ u8 info_type, u32 additional_info, unsigned int num,
+ void **data, unsigned int *size)
+{
+ struct smb_rqst rqst;
+ struct smb2_set_info_rsp *rsp = NULL;
+ struct kvec *iov;
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ int flags = 0;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (!num)
+ return -EINVAL;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
+ if (!iov)
+ return -ENOMEM;
+
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = num;
+ rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid,
+ info_class, info_type, additional_info,
+ data, size);
+ if (rc) {
+ kfree(iov);
+ return rc;
+ }
+
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_buf_release(req);
+ SMB2_set_info_free(&rqst);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -3837,88 +3869,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
}
int
-SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
-{
- struct smb2_file_rename_info info;
- void **data;
- unsigned int size[2];
- int rc;
- int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
-
- data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
- info.FileNameLength = cpu_to_le32(len);
-
- data[0] = &info;
- size[0] = sizeof(struct smb2_file_rename_info);
-
- data[1] = target_file;
- size[1] = len + 2 /* null */;
-
- rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_RENAME_INFORMATION, SMB2_O_INFO_FILE,
- 0, 2, data, size);
- kfree(data);
- return rc;
-}
-
-int
-SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid)
-{
- __u8 delete_pending = 1;
- void *data;
- unsigned int size;
-
- data = &delete_pending;
- size = 1; /* sizeof __u8 */
-
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_DISPOSITION_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, &data, &size);
-}
-
-int
-SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
-{
- struct smb2_file_link_info info;
- void **data;
- unsigned int size[2];
- int rc;
- int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
-
- data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
- info.FileNameLength = cpu_to_le32(len);
-
- data[0] = &info;
- size[0] = sizeof(struct smb2_file_link_info);
-
- data[1] = target_file;
- size[1] = len + 2 /* null */;
-
- rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_LINK_INFORMATION, SMB2_O_INFO_FILE,
- 0, 2, data, size);
- kfree(data);
- return rc;
-}
-
-int
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
+ u64 volatile_fid, u32 pid, __le64 *eof)
{
struct smb2_file_eof_info info;
void *data;
@@ -3929,28 +3881,12 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
data = &info;
size = sizeof(struct smb2_file_eof_info);
- if (is_falloc)
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- pid, FILE_ALLOCATION_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, &data, &size);
- else
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
0, 1, &data, &size);
}
int
-SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
-{
- unsigned int size;
- size = sizeof(FILE_BASIC_INFO);
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_BASIC_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, (void **)&buf, &size);
-}
-
-int
SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
@@ -4350,6 +4286,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
+ __u64 *please_key_high;
+ __u64 *please_key_low;
cifs_dbg(FYI, "SMB2_lease_break\n");
rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
@@ -4379,10 +4317,16 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
+ please_key_low = (__u64 *)req->LeaseKey;
+ please_key_high = (__u64 *)(req->LeaseKey+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
+ trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+ ses->Suid, *please_key_low, *please_key_high, rc);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
- }
+ } else
+ trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
+ ses->Suid, *please_key_low, *please_key_high);
return rc;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 8fb7887f2b3d..f753f424d7f1 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -613,6 +613,8 @@ struct smb2_tree_disconnect_rsp {
#define SVHDX_OPEN_DEVICE_CONTEX 0x9CCBCF9E04C1E643980E158DA1F6EC83
#define SMB2_CREATE_TAG_POSIX 0x93AD25509CB411E7B42383DE968BCD7C
+/* Flag (SMB3 open response) values */
+#define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
/*
* Maximum number of iovs we need for an open/create request.
@@ -650,7 +652,7 @@ struct smb2_create_rsp {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 89 */
__u8 OplockLevel;
- __u8 Reserved;
+ __u8 Flag; /* 0x01 if reparse point */
__le32 CreateAction;
__le64 CreationTime;
__le64 LastAccessTime;
@@ -1174,6 +1176,15 @@ struct smb2_query_info_rsp {
__u8 Buffer[1];
} __packed;
+/*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+ * [1] : path
+ * [2] : compound padding
+ */
+#define SMB2_SET_INFO_IOV_SIZE 3
+
struct smb2_set_info_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index b4076577eeb7..9f4e9ed9ce53 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -116,6 +116,9 @@ extern void smb2_reconnect_server(struct work_struct *work);
extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
struct smb_rqst *rqst);
+extern void smb2_set_next_command(struct TCP_Server_Info *server,
+ struct smb_rqst *rqst);
+extern void smb2_set_related(struct smb_rqst *rqst);
/*
* SMB2 Worker functions - most of protocol specific implementation details
@@ -160,7 +163,8 @@ extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type,
- u32 additional_info, size_t output_len);
+ u32 additional_info, size_t output_len,
+ size_t input_len, void *input);
extern void SMB2_query_info_free(struct smb_rqst *rqst);
extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
@@ -179,20 +183,14 @@ extern int SMB2_echo(struct TCP_Server_Info *server);
extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
-extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- __le16 *target_file);
-extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid);
-extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- __le16 *target_file);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
- __le64 *eof, bool is_fallocate);
-extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- FILE_BASIC_INFO *buf);
+ __le64 *eof);
+extern int SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 pid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ void **data, unsigned int *size);
+extern void SMB2_set_info_free(struct smb_rqst *rqst);
extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
@@ -232,6 +230,10 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
extern int smb3_encryption_required(const struct cifs_tcon *tcon);
extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size);
+extern int smb2_validate_and_copy_iov(unsigned int offset,
+ unsigned int buffer_length,
+ struct kvec *iov,
+ unsigned int minbufsize, char *data);
extern void smb2_copy_fs_info_to_kstatfs(
struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5b05bf255268..e94a8d1d08a3 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2304,8 +2304,12 @@ static void smbd_mr_recovery_work(struct work_struct *work)
int rc;
list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_INVALIDATED ||
- smbdirect_mr->state == MR_ERROR) {
+ if (smbdirect_mr->state == MR_INVALIDATED)
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ else if (smbdirect_mr->state == MR_ERROR) {
/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
@@ -2329,25 +2333,21 @@ static void smbd_mr_recovery_work(struct work_struct *work)
smbd_disconnect_rdma_connection(info);
continue;
}
+ } else
+ /* This MR is being used, don't recover it */
+ continue;
- if (smbdirect_mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count,
- smbdirect_mr->dir);
-
- smbdirect_mr->state = MR_READY;
+ smbdirect_mr->state = MR_READY;
- /* smbdirect_mr->state is updated by this function
- * and is read and updated by I/O issuing CPUs trying
- * to get a MR, the call to atomic_inc_return
- * implicates a memory barrier and guarantees this
- * value is updated before waking up any calls to
- * get_mr() from the I/O issuing CPUs
- */
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
- }
+ /* smbdirect_mr->state is updated by this function
+ * and is read and updated by I/O issuing CPUs trying
+ * to get a MR, the call to atomic_inc_return
+ * implicates a memory barrier and guarantees this
+ * value is updated before waking up any calls to
+ * get_mr() from the I/O issuing CPUs
+ */
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
}
}
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index d4aed5217a56..cce8414fe7ec 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -460,6 +460,85 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
+
+DECLARE_EVENT_CLASS(smb3_lease_done_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high),
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state)
+)
+
+#define DEFINE_SMB3_LEASE_DONE_EVENT(name) \
+DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high), \
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
+
+DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
+
+DECLARE_EVENT_CLASS(smb3_lease_err_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high,
+ int rc),
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ __entry->rc = rc;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x rc=%d",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state, __entry->rc)
+)
+
+#define DEFINE_SMB3_LEASE_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high, \
+ int rc), \
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
+
+DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
+
DECLARE_EVENT_CLASS(smb3_reconnect_class,
TP_PROTO(__u64 currmid,
char *hostname),
@@ -486,6 +565,36 @@ DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
DEFINE_SMB3_RECONNECT_EVENT(reconnect);
DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+DECLARE_EVENT_CLASS(smb3_credit_class,
+ TP_PROTO(__u64 currmid,
+ char *hostname,
+ int credits),
+ TP_ARGS(currmid, hostname, credits),
+ TP_STRUCT__entry(
+ __field(__u64, currmid)
+ __field(char *, hostname)
+ __field(int, credits)
+ ),
+ TP_fast_assign(
+ __entry->currmid = currmid;
+ __entry->hostname = hostname;
+ __entry->credits = credits;
+ ),
+ TP_printk("server=%s current_mid=0x%llx credits=%d",
+ __entry->hostname,
+ __entry->currmid,
+ __entry->credits)
+)
+
+#define DEFINE_SMB3_CREDIT_EVENT(name) \
+DEFINE_EVENT(smb3_credit_class, smb3_##name, \
+ TP_PROTO(__u64 currmid, \
+ char *hostname, \
+ int credits), \
+ TP_ARGS(currmid, hostname, credits))
+
+DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
+
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 146b618802a5..83ff0c25710d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -113,9 +113,18 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
cifs_small_buf_release(midEntry->resp_buf);
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
- /* commands taking longer than one second are indications that
- something is wrong, unless it is quite a slow link or server */
- if (time_after(now, midEntry->when_alloc + HZ) &&
+ /*
+ * commands taking longer than one second (default) can be indications
+ * that something is wrong, unless it is quite a slow link or a very
+ * busy server. Note that this calc is unlikely or impossible to wrap
+ * as long as slow_rsp_threshold is not set way above recommended max
+ * value (32767 ie 9 hours) and is generally harmless even if wrong
+ * since only affects debug counters - so leaving the calc as simple
+ * comparison rather than doing multiple conversions and overflow
+ * checks
+ */
+ if ((slow_rsp_threshold != 0) &&
+ time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
(midEntry->command != command)) {
/* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
@@ -128,7 +137,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
- pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
+ cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
now - midEntry->when_alloc,
now - midEntry->when_sent,
now - midEntry->when_received);
@@ -784,7 +793,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
int i, j, rc = 0;
int timeout, optype;
struct mid_q_entry *midQ[MAX_COMPOUND];
- unsigned int credits = 1;
+ unsigned int credits = 0;
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
@@ -849,21 +858,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
- for (i = 0; i < num_rqst; i++) {
- if (rc < 0)
- goto out;
+ if (rc < 0)
+ goto out;
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst[i].rq_iov,
- rqst[i].rq_nvec);
+ /*
+ * Compounding is never used during session establish.
+ */
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ smb311_update_preauth_hash(ses, rqst[0].rq_iov,
+ rqst[0].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
- goto out;
+ if (timeout == CIFS_ASYNC_OP)
+ goto out;
+ for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
if (rc != 0) {
- cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
- midQ[i]->mid);
+ cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
+ midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(ses->server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
@@ -875,10 +887,21 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
spin_unlock(&GlobalMid_Lock);
}
+ }
+
+ for (i = 0; i < num_rqst; i++)
+ if (midQ[i]->resp_buf)
+ credits += ses->server->ops->get_credits(midQ[i]);
+ if (!credits)
+ credits = 1;
+
+ for (i = 0; i < num_rqst; i++) {
+ if (rc < 0)
+ goto out;
rc = cifs_sync_mid_result(midQ[i], ses->server);
if (rc != 0) {
- add_credits(ses->server, 1, optype);
+ add_credits(ses->server, credits, optype);
return rc;
}
@@ -899,23 +922,26 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
else
resp_buf_type[i] = CIFS_SMALL_BUFFER;
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
- struct kvec iov = {
- .iov_base = resp_iov[i].iov_base,
- .iov_len = resp_iov[i].iov_len
- };
- smb311_update_preauth_hash(ses, &iov, 1);
- }
-
- credits = ses->server->ops->get_credits(midQ[i]);
-
rc = ses->server->ops->check_receive(midQ[i], ses->server,
flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by cifs_delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
midQ[i]->resp_buf = NULL;
+
+ }
+
+ /*
+ * Compounding is never used during session establish.
+ */
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ struct kvec iov = {
+ .iov_base = resp_iov[0].iov_base,
+ .iov_len = resp_iov[0].iov_len
+ };
+ smb311_update_preauth_hash(ses, &iov, 1);
}
+
out:
/*
* This will dequeue all mids. After this it is important that the
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 504b3c3539dc..15f6e96b3bd9 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -52,7 +52,7 @@
#define elf_prpsinfo compat_elf_prpsinfo
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a9b00942e87d..6e30949d9f77 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -22,37 +22,21 @@
#include <linux/smp.h>
#include <linux/ioctl.h>
#include <linux/if.h>
-#include <linux/if_bridge.h>
#include <linux/raid/md_u.h>
-#include <linux/kd.h>
-#include <linux/route.h>
-#include <linux/in6.h>
-#include <linux/ipv6_route.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/vt.h>
#include <linux/falloc.h>
-#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
-#include <linux/fb.h>
-#include <linux/videodev2.h>
-#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/blkdev.h>
-#include <linux/elevator.h>
#include <linux/rtc.h>
#include <linux/pci.h>
#include <linux/serial.h>
-#include <linux/if_tun.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
-#include <linux/atalk.h>
#include <linux/gfp.h>
#include <linux/cec.h>
@@ -74,44 +58,17 @@
#endif
#include <linux/uaccess.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_bonding.h>
#include <linux/watchdog.h>
#include <linux/soundcard.h>
-#include <linux/lp.h>
-#include <linux/ppdev.h>
-
-#include <linux/atm.h>
-#include <linux/atmarp.h>
-#include <linux/atmclip.h>
-#include <linux/atmdev.h>
-#include <linux/atmioc.h>
-#include <linux/atmlec.h>
-#include <linux/atmmpc.h>
-#include <linux/atmsvc.h>
-#include <linux/atm_tcp.h>
-#include <linux/sonet.h>
-#include <linux/atm_suni.h>
-
-#include <linux/usb.h>
-#include <linux/usbdevice_fs.h>
-#include <linux/nbd.h>
-#include <linux/random.h>
-#include <linux/filter.h>
#include <linux/hiddev.h>
-#define __DVB_CORE__
-#include <linux/dvb/audio.h>
-#include <linux/dvb/dmx.h>
-#include <linux/dvb/frontend.h>
-#include <linux/dvb/video.h>
#include <linux/sort.h>
#ifdef CONFIG_SPARC
+#include <linux/fb.h>
#include <asm/fbio.h>
#endif
@@ -133,71 +90,6 @@ static int do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return vfs_ioctl(file, cmd, arg);
}
-struct compat_video_event {
- int32_t type;
- compat_time_t timestamp;
- union {
- video_size_t size;
- unsigned int frame_rate;
- } u;
-};
-
-static int do_video_get_event(struct file *file,
- unsigned int cmd, struct compat_video_event __user *up)
-{
- struct video_event __user *kevent =
- compat_alloc_user_space(sizeof(*kevent));
- int err;
-
- if (kevent == NULL)
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long)kevent);
- if (!err) {
- err = convert_in_user(&kevent->type, &up->type);
- err |= convert_in_user(&kevent->timestamp, &up->timestamp);
- err |= convert_in_user(&kevent->u.size.w, &up->u.size.w);
- err |= convert_in_user(&kevent->u.size.h, &up->u.size.h);
- err |= convert_in_user(&kevent->u.size.aspect_ratio,
- &up->u.size.aspect_ratio);
- if (err)
- err = -EFAULT;
- }
-
- return err;
-}
-
-struct compat_video_still_picture {
- compat_uptr_t iFrame;
- int32_t size;
-};
-
-static int do_video_stillpicture(struct file *file,
- unsigned int cmd, struct compat_video_still_picture __user *up)
-{
- struct video_still_picture __user *up_native;
- compat_uptr_t fp;
- int32_t size;
- int err;
-
- err = get_user(fp, &up->iFrame);
- err |= get_user(size, &up->size);
- if (err)
- return -EFAULT;
-
- up_native =
- compat_alloc_user_space(sizeof(struct video_still_picture));
-
- err = put_user(compat_ptr(fp), &up_native->iFrame);
- err |= put_user(size, &up_native->size);
- if (err)
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long) up_native);
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
@@ -544,85 +436,6 @@ static int mt_ioctl_trans(struct file *file,
#define HCIUARTSETFLAGS _IOW('U', 203, int)
#define HCIUARTGETFLAGS _IOR('U', 204, int)
-#define BNEPCONNADD _IOW('B', 200, int)
-#define BNEPCONNDEL _IOW('B', 201, int)
-#define BNEPGETCONNLIST _IOR('B', 210, int)
-#define BNEPGETCONNINFO _IOR('B', 211, int)
-#define BNEPGETSUPPFEAT _IOR('B', 212, int)
-
-#define CMTPCONNADD _IOW('C', 200, int)
-#define CMTPCONNDEL _IOW('C', 201, int)
-#define CMTPGETCONNLIST _IOR('C', 210, int)
-#define CMTPGETCONNINFO _IOR('C', 211, int)
-
-#define HIDPCONNADD _IOW('H', 200, int)
-#define HIDPCONNDEL _IOW('H', 201, int)
-#define HIDPGETCONNLIST _IOR('H', 210, int)
-#define HIDPGETCONNINFO _IOR('H', 211, int)
-
-
-struct serial_struct32 {
- compat_int_t type;
- compat_int_t line;
- compat_uint_t port;
- compat_int_t irq;
- compat_int_t flags;
- compat_int_t xmit_fifo_size;
- compat_int_t custom_divisor;
- compat_int_t baud_base;
- unsigned short close_delay;
- char io_type;
- char reserved_char[1];
- compat_int_t hub6;
- unsigned short closing_wait; /* time to wait before closing */
- unsigned short closing_wait2; /* no longer used... */
- compat_uint_t iomem_base;
- unsigned short iomem_reg_shift;
- unsigned int port_high;
- /* compat_ulong_t iomap_base FIXME */
- compat_int_t reserved[1];
-};
-
-static int serial_struct_ioctl(struct file *file,
- unsigned cmd, struct serial_struct32 __user *ss32)
-{
- typedef struct serial_struct32 SS32;
- int err;
- struct serial_struct __user *ss = compat_alloc_user_space(sizeof(*ss));
- __u32 udata;
- unsigned int base;
- unsigned char *iomem_base;
-
- if (ss == NULL)
- return -EFAULT;
- if (cmd == TIOCSSERIAL) {
- if (copy_in_user(ss, ss32, offsetof(SS32, iomem_base)) ||
- get_user(udata, &ss32->iomem_base))
- return -EFAULT;
- iomem_base = compat_ptr(udata);
- if (put_user(iomem_base, &ss->iomem_base) ||
- convert_in_user(&ss32->iomem_reg_shift,
- &ss->iomem_reg_shift) ||
- convert_in_user(&ss32->port_high, &ss->port_high) ||
- put_user(0UL, &ss->iomap_base))
- return -EFAULT;
- }
- err = do_ioctl(file, cmd, (unsigned long)ss);
- if (cmd == TIOCGSERIAL && err >= 0) {
- if (copy_in_user(ss32, ss, offsetof(SS32, iomem_base)) ||
- get_user(iomem_base, &ss->iomem_base))
- return -EFAULT;
- base = (unsigned long)iomem_base >> 32 ?
- 0xffffffff : (unsigned)(unsigned long)iomem_base;
- if (put_user(base, &ss32->iomem_base) ||
- convert_in_user(&ss->iomem_reg_shift,
- &ss32->iomem_reg_shift) ||
- convert_in_user(&ss->port_high, &ss32->port_high))
- return -EFAULT;
- }
- return err;
-}
-
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
@@ -707,60 +520,8 @@ static int compat_ioctl_preallocate(struct file *file,
static unsigned int ioctl_pointer[] = {
/* compatible ioctls first */
-COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
-COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
-
-/* Big T */
-COMPATIBLE_IOCTL(TCGETA)
-COMPATIBLE_IOCTL(TCSETA)
-COMPATIBLE_IOCTL(TCSETAW)
-COMPATIBLE_IOCTL(TCSETAF)
-COMPATIBLE_IOCTL(TCSBRK)
-COMPATIBLE_IOCTL(TCXONC)
-COMPATIBLE_IOCTL(TCFLSH)
-COMPATIBLE_IOCTL(TCGETS)
-COMPATIBLE_IOCTL(TCSETS)
-COMPATIBLE_IOCTL(TCSETSW)
-COMPATIBLE_IOCTL(TCSETSF)
-COMPATIBLE_IOCTL(TIOCLINUX)
-COMPATIBLE_IOCTL(TIOCSBRK)
-COMPATIBLE_IOCTL(TIOCGDEV)
-COMPATIBLE_IOCTL(TIOCCBRK)
-COMPATIBLE_IOCTL(TIOCGSID)
-COMPATIBLE_IOCTL(TIOCGICOUNT)
-COMPATIBLE_IOCTL(TIOCGEXCL)
/* Little t */
-COMPATIBLE_IOCTL(TIOCGETD)
-COMPATIBLE_IOCTL(TIOCSETD)
-COMPATIBLE_IOCTL(TIOCEXCL)
-COMPATIBLE_IOCTL(TIOCNXCL)
-COMPATIBLE_IOCTL(TIOCCONS)
-COMPATIBLE_IOCTL(TIOCGSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSWINSZ)
-COMPATIBLE_IOCTL(TIOCGWINSZ)
-COMPATIBLE_IOCTL(TIOCMGET)
-COMPATIBLE_IOCTL(TIOCMBIC)
-COMPATIBLE_IOCTL(TIOCMBIS)
-COMPATIBLE_IOCTL(TIOCMSET)
-COMPATIBLE_IOCTL(TIOCNOTTY)
-COMPATIBLE_IOCTL(TIOCSTI)
COMPATIBLE_IOCTL(TIOCOUTQ)
-COMPATIBLE_IOCTL(TIOCSPGRP)
-COMPATIBLE_IOCTL(TIOCGPGRP)
-COMPATIBLE_IOCTL(TIOCSERGETLSR)
-#ifdef TIOCSRS485
-COMPATIBLE_IOCTL(TIOCSRS485)
-#endif
-#ifdef TIOCGRS485
-COMPATIBLE_IOCTL(TIOCGRS485)
-#endif
-#ifdef TCGETS2
-COMPATIBLE_IOCTL(TCGETS2)
-COMPATIBLE_IOCTL(TCSETS2)
-COMPATIBLE_IOCTL(TCSETSW2)
-COMPATIBLE_IOCTL(TCSETSF2)
-#endif
/* Little f */
COMPATIBLE_IOCTL(FIOCLEX)
COMPATIBLE_IOCTL(FIONCLEX)
@@ -775,23 +536,6 @@ COMPATIBLE_IOCTL(FIGETBSZ)
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
COMPATIBLE_IOCTL(FITRIM)
-COMPATIBLE_IOCTL(KDGETKEYCODE)
-COMPATIBLE_IOCTL(KDSETKEYCODE)
-COMPATIBLE_IOCTL(KDGKBTYPE)
-COMPATIBLE_IOCTL(KDGETMODE)
-COMPATIBLE_IOCTL(KDGKBMODE)
-COMPATIBLE_IOCTL(KDGKBMETA)
-COMPATIBLE_IOCTL(KDGKBENT)
-COMPATIBLE_IOCTL(KDSKBENT)
-COMPATIBLE_IOCTL(KDGKBSENT)
-COMPATIBLE_IOCTL(KDSKBSENT)
-COMPATIBLE_IOCTL(KDGKBDIACR)
-COMPATIBLE_IOCTL(KDSKBDIACR)
-COMPATIBLE_IOCTL(KDGKBDIACRUC)
-COMPATIBLE_IOCTL(KDSKBDIACRUC)
-COMPATIBLE_IOCTL(KDKBDREP)
-COMPATIBLE_IOCTL(KDGKBLED)
-COMPATIBLE_IOCTL(KDGETLED)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
@@ -1106,19 +850,6 @@ COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-COMPATIBLE_IOCTL(BNEPCONNADD)
-COMPATIBLE_IOCTL(BNEPCONNDEL)
-COMPATIBLE_IOCTL(BNEPGETCONNLIST)
-COMPATIBLE_IOCTL(BNEPGETCONNINFO)
-COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
-COMPATIBLE_IOCTL(CMTPCONNADD)
-COMPATIBLE_IOCTL(CMTPCONNDEL)
-COMPATIBLE_IOCTL(CMTPGETCONNLIST)
-COMPATIBLE_IOCTL(CMTPGETCONNINFO)
-COMPATIBLE_IOCTL(HIDPCONNADD)
-COMPATIBLE_IOCTL(HIDPCONNDEL)
-COMPATIBLE_IOCTL(HIDPGETCONNLIST)
-COMPATIBLE_IOCTL(HIDPGETCONNINFO)
/* CAPI */
COMPATIBLE_IOCTL(CAPI_REGISTER)
COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
@@ -1133,11 +864,6 @@ COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
-/* Siemens Gigaset */
-COMPATIBLE_IOCTL(GIGASET_REDIR)
-COMPATIBLE_IOCTL(GIGASET_CONFIG)
-COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
-COMPATIBLE_IOCTL(GIGASET_VERSION)
/* Misc. */
COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
@@ -1162,82 +888,12 @@ COMPATIBLE_IOCTL(HIDIOCGFLAG)
COMPATIBLE_IOCTL(HIDIOCSFLAG)
COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
-/* dvb */
-COMPATIBLE_IOCTL(AUDIO_STOP)
-COMPATIBLE_IOCTL(AUDIO_PLAY)
-COMPATIBLE_IOCTL(AUDIO_PAUSE)
-COMPATIBLE_IOCTL(AUDIO_CONTINUE)
-COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE)
-COMPATIBLE_IOCTL(AUDIO_SET_MUTE)
-COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC)
-COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE)
-COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT)
-COMPATIBLE_IOCTL(AUDIO_GET_STATUS)
-COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES)
-COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(AUDIO_SET_ID)
-COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
-COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(DMX_START)
-COMPATIBLE_IOCTL(DMX_STOP)
-COMPATIBLE_IOCTL(DMX_SET_FILTER)
-COMPATIBLE_IOCTL(DMX_SET_PES_FILTER)
-COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE)
-COMPATIBLE_IOCTL(DMX_GET_PES_PIDS)
-COMPATIBLE_IOCTL(DMX_GET_STC)
-COMPATIBLE_IOCTL(DMX_REQBUFS)
-COMPATIBLE_IOCTL(DMX_QUERYBUF)
-COMPATIBLE_IOCTL(DMX_EXPBUF)
-COMPATIBLE_IOCTL(DMX_QBUF)
-COMPATIBLE_IOCTL(DMX_DQBUF)
-COMPATIBLE_IOCTL(VIDEO_STOP)
-COMPATIBLE_IOCTL(VIDEO_PLAY)
-COMPATIBLE_IOCTL(VIDEO_FREEZE)
-COMPATIBLE_IOCTL(VIDEO_CONTINUE)
-COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE)
-COMPATIBLE_IOCTL(VIDEO_SET_BLANK)
-COMPATIBLE_IOCTL(VIDEO_GET_STATUS)
-COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
-COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
-COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
-COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
-/* cec */
-COMPATIBLE_IOCTL(CEC_ADAP_G_CAPS)
-COMPATIBLE_IOCTL(CEC_ADAP_G_LOG_ADDRS)
-COMPATIBLE_IOCTL(CEC_ADAP_S_LOG_ADDRS)
-COMPATIBLE_IOCTL(CEC_ADAP_G_PHYS_ADDR)
-COMPATIBLE_IOCTL(CEC_ADAP_S_PHYS_ADDR)
-COMPATIBLE_IOCTL(CEC_G_MODE)
-COMPATIBLE_IOCTL(CEC_S_MODE)
-COMPATIBLE_IOCTL(CEC_TRANSMIT)
-COMPATIBLE_IOCTL(CEC_RECEIVE)
-COMPATIBLE_IOCTL(CEC_DQEVENT)
-
/* joystick */
COMPATIBLE_IOCTL(JSIOCGVERSION)
COMPATIBLE_IOCTL(JSIOCGAXES)
COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
-#ifdef TIOCGLTC
-COMPATIBLE_IOCTL(TIOCGLTC)
-COMPATIBLE_IOCTL(TIOCSLTC)
-#endif
-#ifdef TIOCSTART
-/*
- * For these two we have definitions in ioctls.h and/or termios.h on
- * some architectures but no actual implemention. Some applications
- * like bash call them if they are defined in the headers, so we provide
- * entries here to avoid syslog message spew.
- */
-COMPATIBLE_IOCTL(TIOCSTART)
-COMPATIBLE_IOCTL(TIOCSTOP)
-#endif
-
/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
but we don't want warnings on other file systems. So declare
them as compatible here. */
@@ -1293,22 +949,12 @@ static long do_ioctl_trans(unsigned int cmd,
case MTIOCPOS32:
return mt_ioctl_trans(file, cmd, argp);
#endif
- /* Serial */
- case TIOCGSERIAL:
- case TIOCSSERIAL:
- return serial_struct_ioctl(file, cmd, argp);
/* Not implemented in the native kernel */
case RTC_IRQP_READ32:
case RTC_IRQP_SET32:
case RTC_EPOCH_READ32:
case RTC_EPOCH_SET32:
return rtc_ioctl(file, cmd, argp);
-
- /* dvb */
- case VIDEO_GET_EVENT:
- return do_video_get_event(file, cmd, argp);
- case VIDEO_STILLPICTURE:
- return do_video_stillpicture(file, cmd, argp);
}
/*
@@ -1316,24 +962,11 @@ static long do_ioctl_trans(unsigned int cmd,
* so we must not do a compat_ptr() translation.
*/
switch (cmd) {
- /* Big T */
- case TCSBRKP:
- case TIOCMIWAIT:
- case TIOCSCTTY:
/* RAID */
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
- /* Big K */
- case KDSIGACCEPT:
- case KIOCSOUND:
- case KDMKTONE:
- case KDSETMODE:
- case KDSKBMODE:
- case KDSKBMETA:
- case KDSKBLED:
- case KDSETLED:
return vfs_ioctl(file, cmd, arg);
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 1e2c87acac9b..e42e17e55bfd 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -536,7 +536,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return err;
}
-void do_coredump(const siginfo_t *siginfo)
+void do_coredump(const kernel_siginfo_t *siginfo)
{
struct core_state core_state;
struct core_name cn;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index f408994fc632..9352487bd0fc 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -202,7 +202,8 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
continue;
blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
blk_offset += offset;
- if (blk_offset + len > BUFFER_SIZE)
+ if (blk_offset > BUFFER_SIZE ||
+ blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
@@ -418,9 +419,12 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
int i;
vma->vm_flags |= VM_MIXEDMAP;
for (i = 0; i < pages && !ret; i++) {
+ vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
- ret = vm_insert_mixed(vma, vma->vm_start + off, pfn);
+ vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
+ if (vmf & VM_FAULT_ERROR)
+ ret = vm_fault_to_errno(vmf, 0);
}
}
@@ -869,8 +873,8 @@ static int cramfs_readpage(struct file *file, struct page *page)
if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
/* See comments on earlier code. */
u32 prev_start = block_start;
- block_start = prev_start & ~CRAMFS_BLK_FLAGS;
- block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
+ block_start = prev_start & ~CRAMFS_BLK_FLAGS;
+ block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
block_start += PAGE_SIZE;
} else {
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 39c20ef26db4..79debfc9cef9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
- return true;
-
return false;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index e997ca51192f..7874c9bb2fc5 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -174,16 +174,6 @@ static struct fscrypt_mode {
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
},
- [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
- .friendly_name = "Speck128/256-XTS",
- .cipher_str = "xts(speck128)",
- .keysize = 64,
- },
- [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
- .friendly_name = "Speck128/256-CTS-CBC",
- .cipher_str = "cts(cbc(speck128))",
- .keysize = 32,
- },
};
static struct fscrypt_mode *
diff --git a/fs/dax.c b/fs/dax.c
index 0fb270f0a0ef..616e36ea6aaa 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -38,6 +38,17 @@
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>
+static inline unsigned int pe_order(enum page_entry_size pe_size)
+{
+ if (pe_size == PE_SIZE_PTE)
+ return PAGE_SHIFT - PAGE_SHIFT;
+ if (pe_size == PE_SIZE_PMD)
+ return PMD_SHIFT - PAGE_SHIFT;
+ if (pe_size == PE_SIZE_PUD)
+ return PUD_SHIFT - PAGE_SHIFT;
+ return ~0;
+}
+
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
@@ -46,6 +57,9 @@
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
+/* The order of a PMD entry */
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
static int __init init_dax_wait_table(void)
@@ -59,63 +73,74 @@ static int __init init_dax_wait_table(void)
fs_initcall(init_dax_wait_table);
/*
- * We use lowest available bit in exceptional entry for locking, one bit for
- * the entry size (PMD) and two more to tell us if the entry is a zero page or
- * an empty entry that is just used for locking. In total four special bits.
+ * DAX pagecache entries use XArray value entries so they can't be mistaken
+ * for pages. We use one bit for locking, one bit for the entry size (PMD)
+ * and two more to tell us if the entry is a zero page or an empty entry that
+ * is just used for locking. In total four special bits.
*
* If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
* and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
* block allocation.
*/
-#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
-#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
+#define DAX_SHIFT (4)
+#define DAX_LOCKED (1UL << 0)
+#define DAX_PMD (1UL << 1)
+#define DAX_ZERO_PAGE (1UL << 2)
+#define DAX_EMPTY (1UL << 3)
-static unsigned long dax_radix_pfn(void *entry)
+static unsigned long dax_to_pfn(void *entry)
{
- return (unsigned long)entry >> RADIX_DAX_SHIFT;
+ return xa_to_value(entry) >> DAX_SHIFT;
}
-static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
+static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
- return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
- (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
+ return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
}
-static unsigned int dax_radix_order(void *entry)
+static void *dax_make_page_entry(struct page *page)
{
- if ((unsigned long)entry & RADIX_DAX_PMD)
- return PMD_SHIFT - PAGE_SHIFT;
+ pfn_t pfn = page_to_pfn_t(page);
+ return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
+}
+
+static bool dax_is_locked(void *entry)
+{
+ return xa_to_value(entry) & DAX_LOCKED;
+}
+
+static unsigned int dax_entry_order(void *entry)
+{
+ if (xa_to_value(entry) & DAX_PMD)
+ return PMD_ORDER;
return 0;
}
static int dax_is_pmd_entry(void *entry)
{
- return (unsigned long)entry & RADIX_DAX_PMD;
+ return xa_to_value(entry) & DAX_PMD;
}
static int dax_is_pte_entry(void *entry)
{
- return !((unsigned long)entry & RADIX_DAX_PMD);
+ return !(xa_to_value(entry) & DAX_PMD);
}
static int dax_is_zero_entry(void *entry)
{
- return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
+ return xa_to_value(entry) & DAX_ZERO_PAGE;
}
static int dax_is_empty_entry(void *entry)
{
- return (unsigned long)entry & RADIX_DAX_EMPTY;
+ return xa_to_value(entry) & DAX_EMPTY;
}
/*
- * DAX radix tree locking
+ * DAX page cache entry locking
*/
struct exceptional_entry_key {
- struct address_space *mapping;
+ struct xarray *xa;
pgoff_t entry_start;
};
@@ -124,10 +149,11 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
- pgoff_t index, void *entry, struct exceptional_entry_key *key)
+static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+ void *entry, struct exceptional_entry_key *key)
{
unsigned long hash;
+ unsigned long index = xas->xa_index;
/*
* If 'entry' is a PMD, align the 'index' that we use for the wait
@@ -136,22 +162,21 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
*/
if (dax_is_pmd_entry(entry))
index &= ~PG_PMD_COLOUR;
-
- key->mapping = mapping;
+ key->xa = xas->xa;
key->entry_start = index;
- hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+ hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
return wait_table + hash;
}
-static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
- int sync, void *keyp)
+static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
+ unsigned int mode, int sync, void *keyp)
{
struct exceptional_entry_key *key = keyp;
struct wait_exceptional_entry_queue *ewait =
container_of(wait, struct wait_exceptional_entry_queue, wait);
- if (key->mapping != ewait->key.mapping ||
+ if (key->xa != ewait->key.xa ||
key->entry_start != ewait->key.entry_start)
return 0;
return autoremove_wake_function(wait, mode, sync, NULL);
@@ -162,13 +187,12 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
-static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
- wq = dax_entry_waitqueue(mapping, index, entry, &key);
+ wq = dax_entry_waitqueue(xas, entry, &key);
/*
* Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -181,55 +205,16 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
}
/*
- * Check whether the given slot is locked. Must be called with the i_pages
- * lock held.
- */
-static inline int slot_locked(struct address_space *mapping, void **slot)
-{
- unsigned long entry = (unsigned long)
- radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
- return entry & RADIX_DAX_ENTRY_LOCK;
-}
-
-/*
- * Mark the given slot as locked. Must be called with the i_pages lock held.
- */
-static inline void *lock_slot(struct address_space *mapping, void **slot)
-{
- unsigned long entry = (unsigned long)
- radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
-
- entry |= RADIX_DAX_ENTRY_LOCK;
- radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
- return (void *)entry;
-}
-
-/*
- * Mark the given slot as unlocked. Must be called with the i_pages lock held.
- */
-static inline void *unlock_slot(struct address_space *mapping, void **slot)
-{
- unsigned long entry = (unsigned long)
- radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
-
- entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
- radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
- return (void *)entry;
-}
-
-/*
- * Lookup entry in radix tree, wait for it to become unlocked if it is
- * exceptional entry and return it. The caller must call
- * put_unlocked_mapping_entry() when he decided not to lock the entry or
- * put_locked_mapping_entry() when he locked the entry and now wants to
- * unlock it.
+ * Look up entry in page cache, wait for it to become unlocked if it
+ * is a DAX entry and return it. The caller must subsequently call
+ * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
+ * if it did.
*
* Must be called with the i_pages lock held.
*/
-static void *__get_unlocked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void ***slotp, bool (*wait_fn)(void))
+static void *get_unlocked_entry(struct xa_state *xas)
{
- void *entry, **slot;
+ void *entry;
struct wait_exceptional_entry_queue ewait;
wait_queue_head_t *wq;
@@ -237,80 +222,54 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
ewait.wait.func = wake_exceptional_entry_func;
for (;;) {
- bool revalidate;
-
- entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
- &slot);
- if (!entry ||
- WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
- !slot_locked(mapping, slot)) {
- if (slotp)
- *slotp = slot;
+ entry = xas_load(xas);
+ if (!entry || xa_is_internal(entry) ||
+ WARN_ON_ONCE(!xa_is_value(entry)) ||
+ !dax_is_locked(entry))
return entry;
- }
- wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE);
- xa_unlock_irq(&mapping->i_pages);
- revalidate = wait_fn();
+ xas_unlock_irq(xas);
+ xas_reset(xas);
+ schedule();
finish_wait(wq, &ewait.wait);
- xa_lock_irq(&mapping->i_pages);
- if (revalidate)
- return ERR_PTR(-EAGAIN);
+ xas_lock_irq(xas);
}
}
-static bool entry_wait(void)
-{
- schedule();
- /*
- * Never return an ERR_PTR() from
- * __get_unlocked_mapping_entry(), just keep looping.
- */
- return false;
-}
-
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void ***slotp)
+static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
- return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
-}
-
-static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
-{
- void *entry, **slot;
-
- xa_lock_irq(&mapping->i_pages);
- entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
- if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
- !slot_locked(mapping, slot))) {
- xa_unlock_irq(&mapping->i_pages);
- return;
- }
- unlock_slot(mapping, slot);
- xa_unlock_irq(&mapping->i_pages);
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+ /* If we were the only waiter woken, wake the next one */
+ if (entry)
+ dax_wake_entry(xas, entry, false);
}
-static void put_locked_mapping_entry(struct address_space *mapping,
- pgoff_t index)
+/*
+ * We used the xa_state to get the entry, but then we locked the entry and
+ * dropped the xa_lock, so we know the xa_state is stale and must be reset
+ * before use.
+ */
+static void dax_unlock_entry(struct xa_state *xas, void *entry)
{
- unlock_mapping_entry(mapping, index);
+ void *old;
+
+ xas_reset(xas);
+ xas_lock_irq(xas);
+ old = xas_store(xas, entry);
+ xas_unlock_irq(xas);
+ BUG_ON(!dax_is_locked(old));
+ dax_wake_entry(xas, entry, false);
}
/*
- * Called when we are done with radix tree entry we looked up via
- * get_unlocked_mapping_entry() and which we didn't lock in the end.
+ * Return: The entry stored at this location before it was locked.
*/
-static void put_unlocked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
+static void *dax_lock_entry(struct xa_state *xas, void *entry)
{
- if (!entry)
- return;
-
- /* We have to wake up next waiter for the radix tree entry lock */
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+ unsigned long v = xa_to_value(entry);
+ return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
}
static unsigned long dax_entry_size(void *entry)
@@ -325,9 +284,9 @@ static unsigned long dax_entry_size(void *entry)
return PAGE_SIZE;
}
-static unsigned long dax_radix_end_pfn(void *entry)
+static unsigned long dax_end_pfn(void *entry)
{
- return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+ return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
}
/*
@@ -335,8 +294,8 @@ static unsigned long dax_radix_end_pfn(void *entry)
* 'empty' and 'zero' entries.
*/
#define for_each_mapped_pfn(entry, pfn) \
- for (pfn = dax_radix_pfn(entry); \
- pfn < dax_radix_end_pfn(entry); pfn++)
+ for (pfn = dax_to_pfn(entry); \
+ pfn < dax_end_pfn(entry); pfn++)
/*
* TODO: for reflink+dax we need a way to associate a single page with
@@ -393,33 +352,16 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
-static bool entry_wait_revalidate(void)
-{
- rcu_read_unlock();
- schedule();
- rcu_read_lock();
-
- /*
- * Tell __get_unlocked_mapping_entry() to take a break, we need
- * to revalidate page->mapping after dropping locks
- */
- return true;
-}
-
bool dax_lock_mapping_entry(struct page *page)
{
- pgoff_t index;
- struct inode *inode;
- bool did_lock = false;
- void *entry = NULL, **slot;
- struct address_space *mapping;
+ XA_STATE(xas, NULL, 0);
+ void *entry;
- rcu_read_lock();
for (;;) {
- mapping = READ_ONCE(page->mapping);
+ struct address_space *mapping = READ_ONCE(page->mapping);
if (!dax_mapping(mapping))
- break;
+ return false;
/*
* In the device-dax case there's no need to lock, a
@@ -428,98 +370,94 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
- inode = mapping->host;
- if (S_ISCHR(inode->i_mode)) {
- did_lock = true;
- break;
- }
+ if (S_ISCHR(mapping->host->i_mode))
+ return true;
- xa_lock_irq(&mapping->i_pages);
+ xas.xa = &mapping->i_pages;
+ xas_lock_irq(&xas);
if (mapping != page->mapping) {
- xa_unlock_irq(&mapping->i_pages);
+ xas_unlock_irq(&xas);
continue;
}
- index = page->index;
-
- entry = __get_unlocked_mapping_entry(mapping, index, &slot,
- entry_wait_revalidate);
- if (!entry) {
- xa_unlock_irq(&mapping->i_pages);
- break;
- } else if (IS_ERR(entry)) {
- xa_unlock_irq(&mapping->i_pages);
- WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
- continue;
+ xas_set(&xas, page->index);
+ entry = xas_load(&xas);
+ if (dax_is_locked(entry)) {
+ entry = get_unlocked_entry(&xas);
+ /* Did the page move while we slept? */
+ if (dax_to_pfn(entry) != page_to_pfn(page)) {
+ xas_unlock_irq(&xas);
+ continue;
+ }
}
- lock_slot(mapping, slot);
- did_lock = true;
- xa_unlock_irq(&mapping->i_pages);
- break;
+ dax_lock_entry(&xas, entry);
+ xas_unlock_irq(&xas);
+ return true;
}
- rcu_read_unlock();
-
- return did_lock;
}
void dax_unlock_mapping_entry(struct page *page)
{
struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
+ XA_STATE(xas, &mapping->i_pages, page->index);
- if (S_ISCHR(inode->i_mode))
+ if (S_ISCHR(mapping->host->i_mode))
return;
- unlock_mapping_entry(mapping, page->index);
+ dax_unlock_entry(&xas, dax_make_page_entry(page));
}
/*
- * Find radix tree entry at given index. If it points to an exceptional entry,
- * return it with the radix tree entry locked. If the radix tree doesn't
- * contain given index, create an empty exceptional entry for the index and
- * return with it locked.
+ * Find page cache entry at given index. If it is a DAX entry, return it
+ * with the entry locked. If the page cache doesn't contain an entry at
+ * that index, add a locked empty entry.
*
- * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
- * either return that locked entry or will return an error. This error will
- * happen if there are any 4k entries within the 2MiB range that we are
- * requesting.
+ * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
+ * either return that locked entry or will return VM_FAULT_FALLBACK.
+ * This will happen if there are any PTE entries within the PMD range
+ * that we are requesting.
*
- * We always favor 4k entries over 2MiB entries. There isn't a flow where we
- * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
- * insertion will fail if it finds any 4k entries already in the tree, and a
- * 4k insertion will cause an existing 2MiB entry to be unmapped and
- * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
- * well as 2MiB empty entries.
+ * We always favor PTE entries over PMD entries. There isn't a flow where we
+ * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
+ * insertion will fail if it finds any PTE entries already in the tree, and a
+ * PTE insertion will cause an existing PMD entry to be unmapped and
+ * downgraded to PTE entries. This happens for both PMD zero pages as
+ * well as PMD empty entries.
*
- * The exception to this downgrade path is for 2MiB DAX PMD entries that have
- * real storage backing them. We will leave these real 2MiB DAX entries in
- * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
+ * The exception to this downgrade path is for PMD entries that have
+ * real storage backing them. We will leave these real PMD entries in
+ * the tree, and PTE writes will simply dirty the entire PMD entry.
*
* Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
* persistent memory the benefit is doubtful. We can add that later if we can
* show it helps.
+ *
+ * On error, this function does not return an ERR_PTR. Instead it returns
+ * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
+ * overlap with xarray value entries.
*/
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
- unsigned long size_flag)
+static void *grab_mapping_entry(struct xa_state *xas,
+ struct address_space *mapping, unsigned long size_flag)
{
- bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
- void *entry, **slot;
-
-restart:
- xa_lock_irq(&mapping->i_pages);
- entry = get_unlocked_mapping_entry(mapping, index, &slot);
+ unsigned long index = xas->xa_index;
+ bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
+ void *entry;
- if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
- entry = ERR_PTR(-EIO);
- goto out_unlock;
- }
+retry:
+ xas_lock_irq(xas);
+ entry = get_unlocked_entry(xas);
+ if (xa_is_internal(entry))
+ goto fallback;
if (entry) {
- if (size_flag & RADIX_DAX_PMD) {
+ if (WARN_ON_ONCE(!xa_is_value(entry))) {
+ xas_set_err(xas, EIO);
+ goto out_unlock;
+ }
+
+ if (size_flag & DAX_PMD) {
if (dax_is_pte_entry(entry)) {
- put_unlocked_mapping_entry(mapping, index,
- entry);
- entry = ERR_PTR(-EEXIST);
- goto out_unlock;
+ put_unlocked_entry(xas, entry);
+ goto fallback;
}
} else { /* trying to grab a PTE entry */
if (dax_is_pmd_entry(entry) &&
@@ -530,87 +468,57 @@ restart:
}
}
- /* No entry for given index? Make sure radix tree is big enough. */
- if (!entry || pmd_downgrade) {
- int err;
-
- if (pmd_downgrade) {
- /*
- * Make sure 'entry' remains valid while we drop
- * the i_pages lock.
- */
- entry = lock_slot(mapping, slot);
- }
+ if (pmd_downgrade) {
+ /*
+ * Make sure 'entry' remains valid while we drop
+ * the i_pages lock.
+ */
+ dax_lock_entry(xas, entry);
- xa_unlock_irq(&mapping->i_pages);
/*
* Besides huge zero pages the only other thing that gets
* downgraded are empty entries which don't need to be
* unmapped.
*/
- if (pmd_downgrade && dax_is_zero_entry(entry))
- unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
- PG_PMD_NR, false);
-
- err = radix_tree_preload(
- mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
- if (err) {
- if (pmd_downgrade)
- put_locked_mapping_entry(mapping, index);
- return ERR_PTR(err);
- }
- xa_lock_irq(&mapping->i_pages);
-
- if (!entry) {
- /*
- * We needed to drop the i_pages lock while calling
- * radix_tree_preload() and we didn't have an entry to
- * lock. See if another thread inserted an entry at
- * our index during this time.
- */
- entry = __radix_tree_lookup(&mapping->i_pages, index,
- NULL, &slot);
- if (entry) {
- radix_tree_preload_end();
- xa_unlock_irq(&mapping->i_pages);
- goto restart;
- }
+ if (dax_is_zero_entry(entry)) {
+ xas_unlock_irq(xas);
+ unmap_mapping_pages(mapping,
+ xas->xa_index & ~PG_PMD_COLOUR,
+ PG_PMD_NR, false);
+ xas_reset(xas);
+ xas_lock_irq(xas);
}
- if (pmd_downgrade) {
- dax_disassociate_entry(entry, mapping, false);
- radix_tree_delete(&mapping->i_pages, index);
- mapping->nrexceptional--;
- dax_wake_mapping_entry_waiter(mapping, index, entry,
- true);
- }
+ dax_disassociate_entry(entry, mapping, false);
+ xas_store(xas, NULL); /* undo the PMD join */
+ dax_wake_entry(xas, entry, true);
+ mapping->nrexceptional--;
+ entry = NULL;
+ xas_set(xas, index);
+ }
- entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
-
- err = __radix_tree_insert(&mapping->i_pages, index,
- dax_radix_order(entry), entry);
- radix_tree_preload_end();
- if (err) {
- xa_unlock_irq(&mapping->i_pages);
- /*
- * Our insertion of a DAX entry failed, most likely
- * because we were inserting a PMD entry and it
- * collided with a PTE sized entry at a different
- * index in the PMD range. We haven't inserted
- * anything into the radix tree and have no waiters to
- * wake.
- */
- return ERR_PTR(err);
- }
- /* Good, we have inserted empty locked entry into the tree. */
+ if (entry) {
+ dax_lock_entry(xas, entry);
+ } else {
+ entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
+ dax_lock_entry(xas, entry);
+ if (xas_error(xas))
+ goto out_unlock;
mapping->nrexceptional++;
- xa_unlock_irq(&mapping->i_pages);
- return entry;
}
- entry = lock_slot(mapping, slot);
- out_unlock:
- xa_unlock_irq(&mapping->i_pages);
+
+out_unlock:
+ xas_unlock_irq(xas);
+ if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
+ goto retry;
+ if (xas->xa_node == XA_ERROR(-ENOMEM))
+ return xa_mk_internal(VM_FAULT_OOM);
+ if (xas_error(xas))
+ return xa_mk_internal(VM_FAULT_SIGBUS);
return entry;
+fallback:
+ xas_unlock_irq(xas);
+ return xa_mk_internal(VM_FAULT_FALLBACK);
}
/**
@@ -630,11 +538,10 @@ restart:
*/
struct page *dax_layout_busy_page(struct address_space *mapping)
{
- pgoff_t indices[PAGEVEC_SIZE];
+ XA_STATE(xas, &mapping->i_pages, 0);
+ void *entry;
+ unsigned int scanned = 0;
struct page *page = NULL;
- struct pagevec pvec;
- pgoff_t index, end;
- unsigned i;
/*
* In the 'limited' case get_user_pages() for dax is disabled.
@@ -645,13 +552,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (!dax_mapping(mapping) || !mapping_mapped(mapping))
return NULL;
- pagevec_init(&pvec);
- index = 0;
- end = -1;
-
/*
* If we race get_user_pages_fast() here either we'll see the
- * elevated page count in the pagevec_lookup and wait, or
+ * elevated page count in the iteration and wait, or
* get_user_pages_fast() will see that the page it took a reference
* against is no longer mapped in the page tables and bail to the
* get_user_pages() slow path. The slow path is protected by
@@ -663,94 +566,68 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
*/
unmap_mapping_range(mapping, 0, 0, 1);
- while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- indices)) {
- pgoff_t nr_pages = 1;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *pvec_ent = pvec.pages[i];
- void *entry;
-
- index = indices[i];
- if (index >= end)
- break;
-
- if (WARN_ON_ONCE(
- !radix_tree_exceptional_entry(pvec_ent)))
- continue;
-
- xa_lock_irq(&mapping->i_pages);
- entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (entry) {
- page = dax_busy_page(entry);
- /*
- * Account for multi-order entries at
- * the end of the pagevec.
- */
- if (i + 1 >= pagevec_count(&pvec))
- nr_pages = 1UL << dax_radix_order(entry);
- }
- put_unlocked_mapping_entry(mapping, index, entry);
- xa_unlock_irq(&mapping->i_pages);
- if (page)
- break;
- }
-
- /*
- * We don't expect normal struct page entries to exist in our
- * tree, but we keep these pagevec calls so that this code is
- * consistent with the common pattern for handling pagevecs
- * throughout the kernel.
- */
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- index += nr_pages;
-
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ if (WARN_ON_ONCE(!xa_is_value(entry)))
+ continue;
+ if (unlikely(dax_is_locked(entry)))
+ entry = get_unlocked_entry(&xas);
+ if (entry)
+ page = dax_busy_page(entry);
+ put_unlocked_entry(&xas, entry);
if (page)
break;
+ if (++scanned % XA_CHECK_SCHED)
+ continue;
+
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
}
+ xas_unlock_irq(&xas);
return page;
}
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
-static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+static int __dax_invalidate_entry(struct address_space *mapping,
pgoff_t index, bool trunc)
{
+ XA_STATE(xas, &mapping->i_pages, index);
int ret = 0;
void *entry;
- struct radix_tree_root *pages = &mapping->i_pages;
- xa_lock_irq(pages);
- entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
+ xas_lock_irq(&xas);
+ entry = get_unlocked_entry(&xas);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
- (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
- radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
+ (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
+ xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
goto out;
dax_disassociate_entry(entry, mapping, trunc);
- radix_tree_delete(pages, index);
+ xas_store(&xas, NULL);
mapping->nrexceptional--;
ret = 1;
out:
- put_unlocked_mapping_entry(mapping, index, entry);
- xa_unlock_irq(pages);
+ put_unlocked_entry(&xas, entry);
+ xas_unlock_irq(&xas);
return ret;
}
+
/*
- * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
- * entry to get unlocked before deleting it.
+ * Delete DAX entry at @index from @mapping. Wait for it
+ * to be unlocked before deleting it.
*/
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
- int ret = __dax_invalidate_mapping_entry(mapping, index, true);
+ int ret = __dax_invalidate_entry(mapping, index, true);
/*
* This gets called from truncate / punch_hole path. As such, the caller
* must hold locks protecting against concurrent modifications of the
- * radix tree (usually fs-private i_mmap_sem for writing). Since the
- * caller has seen exceptional entry for this index, we better find it
+ * page cache (usually fs-private i_mmap_sem for writing). Since the
+ * caller has seen a DAX entry for this index, we better find it
* at that index as well...
*/
WARN_ON_ONCE(!ret);
@@ -758,12 +635,12 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
}
/*
- * Invalidate exceptional DAX entry if it is clean.
+ * Invalidate DAX entry if it is clean.
*/
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index)
{
- return __dax_invalidate_mapping_entry(mapping, index, false);
+ return __dax_invalidate_entry(mapping, index, false);
}
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
@@ -799,30 +676,27 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
* already in the tree, we will skip the insertion and just dirty the PMD as
* appropriate.
*/
-static void *dax_insert_mapping_entry(struct address_space *mapping,
- struct vm_fault *vmf,
- void *entry, pfn_t pfn_t,
- unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct xa_state *xas,
+ struct address_space *mapping, struct vm_fault *vmf,
+ void *entry, pfn_t pfn, unsigned long flags, bool dirty)
{
- struct radix_tree_root *pages = &mapping->i_pages;
- unsigned long pfn = pfn_t_to_pfn(pfn_t);
- pgoff_t index = vmf->pgoff;
- void *new_entry;
+ void *new_entry = dax_make_entry(pfn, flags);
if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
+ if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+ unsigned long index = xas->xa_index;
/* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry))
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
- PG_PMD_NR, false);
+ PG_PMD_NR, false);
else /* pte entry */
- unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
+ unmap_mapping_pages(mapping, index, 1, false);
}
- xa_lock_irq(pages);
- new_entry = dax_radix_locked_entry(pfn, flags);
+ xas_reset(xas);
+ xas_lock_irq(xas);
if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
dax_disassociate_entry(entry, mapping, false);
dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
@@ -830,33 +704,30 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
/*
- * Only swap our new entry into the radix tree if the current
+ * Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
- * PMD entry is already in the tree, we leave it alone. This
+ * PMD entry is already in the cache, we leave it alone. This
* means that if we are trying to insert a PTE and the
* existing entry is a PMD, we will just leave the PMD in the
* tree and dirty it if necessary.
*/
- struct radix_tree_node *node;
- void **slot;
- void *ret;
-
- ret = __radix_tree_lookup(pages, index, &node, &slot);
- WARN_ON_ONCE(ret != entry);
- __radix_tree_replace(pages, node, slot,
- new_entry, NULL);
+ void *old = dax_lock_entry(xas, new_entry);
+ WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
+ DAX_LOCKED));
entry = new_entry;
+ } else {
+ xas_load(xas); /* Walk the xa_state */
}
if (dirty)
- radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
+ xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
- xa_unlock_irq(pages);
+ xas_unlock_irq(xas);
return entry;
}
-static inline unsigned long
-pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
+static inline
+unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
{
unsigned long address;
@@ -866,8 +737,8 @@ pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
}
/* Walk all mappings of a given index of a file and writeprotect them */
-static void dax_mapping_entry_mkclean(struct address_space *mapping,
- pgoff_t index, unsigned long pfn)
+static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
+ unsigned long pfn)
{
struct vm_area_struct *vma;
pte_t pte, *ptep = NULL;
@@ -937,11 +808,9 @@ unlock_pte:
i_mmap_unlock_read(mapping);
}
-static int dax_writeback_one(struct dax_device *dax_dev,
- struct address_space *mapping, pgoff_t index, void *entry)
+static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+ struct address_space *mapping, void *entry)
{
- struct radix_tree_root *pages = &mapping->i_pages;
- void *entry2, **slot;
unsigned long pfn;
long ret = 0;
size_t size;
@@ -950,32 +819,38 @@ static int dax_writeback_one(struct dax_device *dax_dev,
* A page got tagged dirty in DAX mapping? Something is seriously
* wrong.
*/
- if (WARN_ON(!radix_tree_exceptional_entry(entry)))
+ if (WARN_ON(!xa_is_value(entry)))
return -EIO;
- xa_lock_irq(pages);
- entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
- /* Entry got punched out / reallocated? */
- if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
- goto put_unlocked;
- /*
- * Entry got reallocated elsewhere? No need to writeback. We have to
- * compare pfns as we must not bail out due to difference in lockbit
- * or entry type.
- */
- if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
- goto put_unlocked;
- if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
- dax_is_zero_entry(entry))) {
- ret = -EIO;
- goto put_unlocked;
+ if (unlikely(dax_is_locked(entry))) {
+ void *old_entry = entry;
+
+ entry = get_unlocked_entry(xas);
+
+ /* Entry got punched out / reallocated? */
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+ goto put_unlocked;
+ /*
+ * Entry got reallocated elsewhere? No need to writeback.
+ * We have to compare pfns as we must not bail out due to
+ * difference in lockbit or entry type.
+ */
+ if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
+ goto put_unlocked;
+ if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
+ dax_is_zero_entry(entry))) {
+ ret = -EIO;
+ goto put_unlocked;
+ }
+
+ /* Another fsync thread may have already done this entry */
+ if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
+ goto put_unlocked;
}
- /* Another fsync thread may have already written back this entry */
- if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
- goto put_unlocked;
/* Lock the entry to serialize with page faults */
- entry = lock_slot(mapping, slot);
+ dax_lock_entry(xas, entry);
+
/*
* We can clear the tag now but we have to be careful so that concurrent
* dax_writeback_one() calls for the same index cannot finish before we
@@ -983,8 +858,8 @@ static int dax_writeback_one(struct dax_device *dax_dev,
* at the entry only under the i_pages lock and once they do that
* they will see the entry locked and wait for it to unlock.
*/
- radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
- xa_unlock_irq(pages);
+ xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
+ xas_unlock_irq(xas);
/*
* Even if dax_writeback_mapping_range() was given a wbc->range_start
@@ -993,10 +868,10 @@ static int dax_writeback_one(struct dax_device *dax_dev,
* This allows us to flush for PMD_SIZE and not have to worry about
* partial PMD writebacks.
*/
- pfn = dax_radix_pfn(entry);
- size = PAGE_SIZE << dax_radix_order(entry);
+ pfn = dax_to_pfn(entry);
+ size = PAGE_SIZE << dax_entry_order(entry);
- dax_mapping_entry_mkclean(mapping, index, pfn);
+ dax_entry_mkclean(mapping, xas->xa_index, pfn);
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
/*
* After we have flushed the cache, we can clear the dirty tag. There
@@ -1004,16 +879,18 @@ static int dax_writeback_one(struct dax_device *dax_dev,
* the pfn mappings are writeprotected and fault waits for mapping
* entry lock.
*/
- xa_lock_irq(pages);
- radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
- xa_unlock_irq(pages);
- trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
- put_locked_mapping_entry(mapping, index);
+ xas_reset(xas);
+ xas_lock_irq(xas);
+ xas_store(xas, entry);
+ xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
+ dax_wake_entry(xas, entry, false);
+
+ trace_dax_writeback_one(mapping->host, xas->xa_index,
+ size >> PAGE_SHIFT);
return ret;
put_unlocked:
- put_unlocked_mapping_entry(mapping, index, entry2);
- xa_unlock_irq(pages);
+ put_unlocked_entry(xas, entry);
return ret;
}
@@ -1025,13 +902,13 @@ static int dax_writeback_one(struct dax_device *dax_dev,
int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc)
{
+ XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
struct inode *inode = mapping->host;
- pgoff_t start_index, end_index;
- pgoff_t indices[PAGEVEC_SIZE];
+ pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
struct dax_device *dax_dev;
- struct pagevec pvec;
- bool done = false;
- int i, ret = 0;
+ void *entry;
+ int ret = 0;
+ unsigned int scanned = 0;
if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
return -EIO;
@@ -1043,41 +920,29 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!dax_dev)
return -EIO;
- start_index = wbc->range_start >> PAGE_SHIFT;
- end_index = wbc->range_end >> PAGE_SHIFT;
-
- trace_dax_writeback_range(inode, start_index, end_index);
+ trace_dax_writeback_range(inode, xas.xa_index, end_index);
- tag_pages_for_writeback(mapping, start_index, end_index);
+ tag_pages_for_writeback(mapping, xas.xa_index, end_index);
- pagevec_init(&pvec);
- while (!done) {
- pvec.nr = find_get_entries_tag(mapping, start_index,
- PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
- pvec.pages, indices);
-
- if (pvec.nr == 0)
+ xas_lock_irq(&xas);
+ xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
+ ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
+ if (ret < 0) {
+ mapping_set_error(mapping, ret);
break;
-
- for (i = 0; i < pvec.nr; i++) {
- if (indices[i] > end_index) {
- done = true;
- break;
- }
-
- ret = dax_writeback_one(dax_dev, mapping, indices[i],
- pvec.pages[i]);
- if (ret < 0) {
- mapping_set_error(mapping, ret);
- goto out;
- }
}
- start_index = indices[pvec.nr - 1] + 1;
+ if (++scanned % XA_CHECK_SCHED)
+ continue;
+
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
}
-out:
+ xas_unlock_irq(&xas);
put_dax(dax_dev);
- trace_dax_writeback_range_done(inode, start_index, end_index);
- return (ret < 0 ? ret : 0);
+ trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
+ return ret;
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
@@ -1125,16 +990,18 @@ out:
* If this page is ever written to we will re-fault and change the mapping to
* point to real DAX storage instead.
*/
-static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
- struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas,
+ struct address_space *mapping, void **entry,
+ struct vm_fault *vmf)
{
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
vm_fault_t ret;
- dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
- false);
+ *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+ DAX_ZERO_PAGE, false);
+
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
trace_dax_load_hole(inode, vmf, ret);
return ret;
@@ -1342,6 +1209,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
+ XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
@@ -1368,9 +1236,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
if (write && !vmf->cow_page)
flags |= IOMAP_WRITE;
- entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
- if (IS_ERR(entry)) {
- ret = dax_fault_return(PTR_ERR(entry));
+ entry = grab_mapping_entry(&xas, mapping, 0);
+ if (xa_is_internal(entry)) {
+ ret = xa_to_internal(entry);
goto out;
}
@@ -1443,7 +1311,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
if (error < 0)
goto error_finish_iomap;
- entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+ entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
0, write && !sync);
/*
@@ -1471,7 +1339,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (!write) {
- ret = dax_load_hole(mapping, entry, vmf);
+ ret = dax_load_hole(&xas, mapping, &entry, vmf);
goto finish_iomap;
}
/*FALLTHRU*/
@@ -1498,21 +1366,20 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
}
unlock_entry:
- put_locked_mapping_entry(mapping, vmf->pgoff);
+ dax_unlock_entry(&xas, entry);
out:
trace_dax_pte_fault_done(inode, vmf, ret);
return ret | major;
}
#ifdef CONFIG_FS_DAX_PMD
-static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
- void *entry)
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+ struct iomap *iomap, void **entry)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct inode *inode = mapping->host;
struct page *zero_page;
- void *ret = NULL;
spinlock_t *ptl;
pmd_t pmd_entry;
pfn_t pfn;
@@ -1523,8 +1390,8 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
goto fallback;
pfn = page_to_pfn_t(zero_page);
- ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
- RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
+ *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
+ DAX_PMD | DAX_ZERO_PAGE, false);
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (!pmd_none(*(vmf->pmd))) {
@@ -1536,11 +1403,11 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
+ trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
return VM_FAULT_NOPAGE;
fallback:
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
return VM_FAULT_FALLBACK;
}
@@ -1549,6 +1416,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
+ XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
unsigned long pmd_addr = vmf->address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool sync;
@@ -1556,7 +1424,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct inode *inode = mapping->host;
vm_fault_t result = VM_FAULT_FALLBACK;
struct iomap iomap = { 0 };
- pgoff_t max_pgoff, pgoff;
+ pgoff_t max_pgoff;
void *entry;
loff_t pos;
int error;
@@ -1567,7 +1435,6 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
- pgoff = linear_page_index(vma, pmd_addr);
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
@@ -1576,7 +1443,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* Make sure that the faulting address's PMD offset (color) matches
* the PMD offset from the start of the file. This is necessary so
* that a PMD range in the page table overlaps exactly with a PMD
- * range in the radix tree.
+ * range in the page cache.
*/
if ((vmf->pgoff & PG_PMD_COLOUR) !=
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
@@ -1592,24 +1459,26 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
if ((pmd_addr + PMD_SIZE) > vma->vm_end)
goto fallback;
- if (pgoff >= max_pgoff) {
+ if (xas.xa_index >= max_pgoff) {
result = VM_FAULT_SIGBUS;
goto out;
}
/* If the PMD would extend beyond the file size */
- if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
+ if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
goto fallback;
/*
- * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
- * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
- * is already in the tree, for instance), it will return -EEXIST and
- * we just fall back to 4k entries.
+ * grab_mapping_entry() will make sure we get an empty PMD entry,
+ * a zero PMD entry or a DAX PMD. If it can't (because a PTE
+ * entry is already in the array, for instance), it will return
+ * VM_FAULT_FALLBACK.
*/
- entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
- if (IS_ERR(entry))
+ entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
+ if (xa_is_internal(entry)) {
+ result = xa_to_internal(entry);
goto fallback;
+ }
/*
* It is possible, particularly with mixed reads & writes to private
@@ -1628,7 +1497,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* setting up a mapping, so really we're using iomap_begin() as a way
* to look up our filesystem block.
*/
- pos = (loff_t)pgoff << PAGE_SHIFT;
+ pos = (loff_t)xas.xa_index << PAGE_SHIFT;
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
if (error)
goto unlock_entry;
@@ -1644,8 +1513,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
if (error < 0)
goto finish_iomap;
- entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
- RADIX_DAX_PMD, write && !sync);
+ entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
+ DAX_PMD, write && !sync);
/*
* If we are doing synchronous page fault and inode needs fsync,
@@ -1669,7 +1538,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
case IOMAP_HOLE:
if (WARN_ON_ONCE(write))
break;
- result = dax_pmd_load_hole(vmf, &iomap, entry);
+ result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
break;
default:
WARN_ON_ONCE(1);
@@ -1692,7 +1561,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
&iomap);
}
unlock_entry:
- put_locked_mapping_entry(mapping, pgoff);
+ dax_unlock_entry(&xas, entry);
fallback:
if (result == VM_FAULT_FALLBACK) {
split_huge_pmd(vma, vmf->pmd, vmf->address);
@@ -1737,54 +1606,49 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);
-/**
+/*
* dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
* @vmf: The description of the fault
- * @pe_size: Size of entry to be inserted
* @pfn: PFN to insert
+ * @order: Order of entry to insert.
*
- * This function inserts writeable PTE or PMD entry into page tables for mmaped
- * DAX file. It takes care of marking corresponding radix tree entry as dirty
- * as well.
+ * This function inserts a writeable PTE or PMD entry into the page tables
+ * for an mmaped DAX file. It also marks the page cache entry as dirty.
*/
-static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
- enum page_entry_size pe_size,
- pfn_t pfn)
+static vm_fault_t
+dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- void *entry, **slot;
- pgoff_t index = vmf->pgoff;
+ XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+ void *entry;
vm_fault_t ret;
- xa_lock_irq(&mapping->i_pages);
- entry = get_unlocked_mapping_entry(mapping, index, &slot);
+ xas_lock_irq(&xas);
+ entry = get_unlocked_entry(&xas);
/* Did we race with someone splitting entry or so? */
if (!entry ||
- (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
- (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
- put_unlocked_mapping_entry(mapping, index, entry);
- xa_unlock_irq(&mapping->i_pages);
+ (order == 0 && !dax_is_pte_entry(entry)) ||
+ (order == PMD_ORDER && (xa_is_internal(entry) ||
+ !dax_is_pmd_entry(entry)))) {
+ put_unlocked_entry(&xas, entry);
+ xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
VM_FAULT_NOPAGE);
return VM_FAULT_NOPAGE;
}
- radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
- entry = lock_slot(mapping, slot);
- xa_unlock_irq(&mapping->i_pages);
- switch (pe_size) {
- case PE_SIZE_PTE:
+ xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
+ dax_lock_entry(&xas, entry);
+ xas_unlock_irq(&xas);
+ if (order == 0)
ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- break;
#ifdef CONFIG_FS_DAX_PMD
- case PE_SIZE_PMD:
+ else if (order == PMD_ORDER)
ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
pfn, true);
- break;
#endif
- default:
+ else
ret = VM_FAULT_FALLBACK;
- }
- put_locked_mapping_entry(mapping, index);
+ dax_unlock_entry(&xas, entry);
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
return ret;
}
@@ -1804,17 +1668,12 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
{
int err;
loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
- size_t len = 0;
+ unsigned int order = pe_order(pe_size);
+ size_t len = PAGE_SIZE << order;
- if (pe_size == PE_SIZE_PTE)
- len = PAGE_SIZE;
- else if (pe_size == PE_SIZE_PMD)
- len = PMD_SIZE;
- else
- WARN_ON_ONCE(1);
err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
if (err)
return VM_FAULT_SIGBUS;
- return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
+ return dax_insert_pfn_mkwrite(vmf, pfn, order);
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2e7e8d85e9b4..2593153471cf 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -26,7 +26,7 @@
#include <linux/export.h>
#include <linux/security.h>
#include <linux/seqlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/list_lru.h>
@@ -257,24 +257,10 @@ static void __d_free(struct rcu_head *head)
kmem_cache_free(dentry_cache, dentry);
}
-static void __d_free_external_name(struct rcu_head *head)
-{
- struct external_name *name = container_of(head, struct external_name,
- u.head);
-
- mod_node_page_state(page_pgdat(virt_to_page(name)),
- NR_INDIRECTLY_RECLAIMABLE_BYTES,
- -ksize(name));
-
- kfree(name);
-}
-
static void __d_free_external(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
-
- __d_free_external_name(&external_name(dentry)->u.head);
-
+ kfree(external_name(dentry));
kmem_cache_free(dentry_cache, dentry);
}
@@ -306,7 +292,7 @@ void release_dentry_name_snapshot(struct name_snapshot *name)
struct external_name *p;
p = container_of(name->name, struct external_name, name[0]);
if (unlikely(atomic_dec_and_test(&p->u.count)))
- call_rcu(&p->u.head, __d_free_external_name);
+ kfree_rcu(p, u.head);
}
}
EXPORT_SYMBOL(release_dentry_name_snapshot);
@@ -1606,7 +1592,6 @@ EXPORT_SYMBOL(d_invalidate);
struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
{
- struct external_name *ext = NULL;
struct dentry *dentry;
char *dname;
int err;
@@ -1627,14 +1612,15 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dname = dentry->d_iname;
} else if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
-
- ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
- if (!ext) {
+ struct external_name *p = kmalloc(size + name->len,
+ GFP_KERNEL_ACCOUNT |
+ __GFP_RECLAIMABLE);
+ if (!p) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
- atomic_set(&ext->u.count, 1);
- dname = ext->name;
+ atomic_set(&p->u.count, 1);
+ dname = p->name;
} else {
dname = dentry->d_iname;
}
@@ -1673,12 +1659,6 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
}
}
- if (unlikely(ext)) {
- pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
- mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
- ksize(ext));
- }
-
this_cpu_inc(nr_dentry);
return dentry;
@@ -2707,7 +2687,7 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
dentry->d_name.hash_len = target->d_name.hash_len;
}
if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
- call_rcu(&old_name->u.head, __d_free_external_name);
+ kfree_rcu(old_name, u.head);
}
/*
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 49121e5a8de2..5c36ceecb5c1 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -593,11 +593,16 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
lower_new_dir_dentry = dget_parent(lower_new_dentry);
target_inode = d_inode(new_dentry);
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ rc = -EINVAL;
+ if (lower_old_dentry->d_parent != lower_old_dir_dentry)
+ goto out_lock;
+ if (lower_new_dentry->d_parent != lower_new_dir_dentry)
+ goto out_lock;
+ if (d_unhashed(lower_old_dentry) || d_unhashed(lower_new_dentry))
+ goto out_lock;
/* source should not be ancestor of target */
- if (trap == lower_old_dentry) {
- rc = -EINVAL;
+ if (trap == lower_old_dentry)
goto out_lock;
- }
/* target should not be ancestor of source */
if (trap == lower_new_dentry) {
rc = -ENOTEMPTY;
diff --git a/fs/exec.c b/fs/exec.c
index 1ebf6e5a521d..fc281b738a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -908,14 +908,14 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
goto out;
i_size = i_size_read(file_inode(file));
- if (max_size > 0 && i_size > max_size) {
- ret = -EFBIG;
- goto out;
- }
if (i_size <= 0) {
ret = -EINVAL;
goto out;
}
+ if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
+ ret = -EFBIG;
+ goto out;
+ }
if (id != READING_FIRMWARE_PREALLOC_BUFFER)
*buf = vmalloc(i_size);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 224c04abb2e5..cf4c77f8dd08 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -256,11 +256,15 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
if (default_acl) {
error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return error;
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 00e759f05161..e770cd100a6a 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -390,11 +390,7 @@ struct ext2_inode {
#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
-#ifdef CONFIG_FS_DAX
#define EXT2_MOUNT_DAX 0x100000 /* Direct Access */
-#else
-#define EXT2_MOUNT_DAX 0
-#endif
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 73bd58fa13de..cb91baa4275d 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -309,20 +309,17 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
-#if defined(CONFIG_QUOTA)
if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
seq_puts(seq, ",usrquota");
if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
seq_puts(seq, ",grpquota");
-#endif
-#ifdef CONFIG_FS_DAX
if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
seq_puts(seq, ",xip");
+
if (sbi->s_mount_opt & EXT2_MOUNT_DAX)
seq_puts(seq, ",dax");
-#endif
if (!test_opt(sb, RESERVATION))
seq_puts(seq, ",noreservation");
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fb50f9aa6ead..c1d570ee1d9f 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -284,12 +284,16 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
default_acl, XATTR_CREATE);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
acl, XATTR_CREATE);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return error;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index caff935fbeb8..3f89d0ab08fc 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -45,15 +45,6 @@
#include <linux/compiler.h>
-/* Until this gets included into linux/compiler-gcc.h */
-#ifndef __nonstring
-#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
-#define __nonstring __attribute__((nonstring))
-#else
-#define __nonstring
-#endif
-#endif
-
/*
* The fourth extended filesystem constants/structures
*/
@@ -628,6 +619,7 @@ enum {
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+#define EXT4_FREE_BLOCKS_RERESERVE_CLUSTER 0x0040
/*
* ioctl commands
@@ -1030,6 +1022,9 @@ struct ext4_inode_info {
ext4_lblk_t i_da_metadata_calc_last_lblock;
int i_da_metadata_calc_len;
+ /* pending cluster reservations for bigalloc file systems */
+ struct ext4_pending_tree i_pending_tree;
+
/* on-disk additional length */
__u16 i_extra_isize;
@@ -1401,7 +1396,8 @@ struct ext4_sb_info {
u32 s_min_batch_time;
struct block_device *journal_bdev;
#ifdef CONFIG_QUOTA
- char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */
+ /* Names of quota files with journalled quota */
+ char __rcu *s_qf_names[EXT4_MAXQUOTAS];
int s_jquota_fmt; /* Format of quota to use */
#endif
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
@@ -2483,10 +2479,11 @@ extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
-extern int ext4_page_mkwrite(struct vm_fault *vmf);
-extern int ext4_filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
+extern void ext4_da_release_space(struct inode *inode, int to_free);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -3142,10 +3139,6 @@ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
int flags);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
-extern int ext4_find_delalloc_range(struct inode *inode,
- ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end);
-extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
@@ -3156,6 +3149,7 @@ extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
struct inode *inode2, ext4_lblk_t lblk1,
ext4_lblk_t lblk2, ext4_lblk_t count,
int mark_unwritten,int *err);
+extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
/* move_extent.c */
extern void ext4_double_down_write_data_sem(struct inode *first,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index adf6668b596f..98bd0e9ee7df 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -120,6 +120,19 @@ struct ext4_ext_path {
};
/*
+ * Used to record a portion of a cluster found at the beginning or end
+ * of an extent while traversing the extent tree during space removal.
+ * A partial cluster may be removed if it does not contain blocks shared
+ * with extents that aren't being deleted (tofree state). Otherwise,
+ * it cannot be removed (nofree state).
+ */
+struct partial_cluster {
+ ext4_fsblk_t pclu; /* physical cluster number */
+ ext4_lblk_t lblk; /* logical block number within logical cluster */
+ enum {initial, tofree, nofree} state;
+};
+
+/*
* structure for external API
*/
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 72a361d5ef74..240b6dea5441 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2351,8 +2351,8 @@ ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
{
struct extent_status es;
- ext4_es_find_delayed_extent_range(inode, hole_start,
- hole_start + hole_len - 1, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+ hole_start + hole_len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= hole_start)
@@ -2490,106 +2490,157 @@ static inline int get_default_free_blocks_flags(struct inode *inode)
return 0;
}
+/*
+ * ext4_rereserve_cluster - increment the reserved cluster count when
+ * freeing a cluster with a pending reservation
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in cluster to be reserved
+ *
+ * Increments the reserved cluster count and adjusts quota in a bigalloc
+ * file system when freeing a partial cluster containing at least one
+ * delayed and unwritten block. A partial cluster meeting that
+ * requirement will have a pending reservation. If so, the
+ * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
+ * defer reserved and allocated space accounting to a subsequent call
+ * to this function.
+ */
+static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
+
+ spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
+ spin_unlock(&ei->i_block_reservation_lock);
+
+ percpu_counter_add(&sbi->s_freeclusters_counter, 1);
+ ext4_remove_pending(inode, lblk);
+}
+
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent *ex,
- long long *partial_cluster,
+ struct partial_cluster *partial,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
- ext4_fsblk_t pblk;
- int flags = get_default_free_blocks_flags(inode);
+ ext4_fsblk_t last_pblk, pblk;
+ ext4_lblk_t num;
+ int flags;
+
+ /* only extent tail removal is allowed */
+ if (from < le32_to_cpu(ex->ee_block) ||
+ to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
+ ext4_error(sbi->s_sb,
+ "strange request: removal(2) %u-%u from %u:%u",
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
+ return 0;
+ }
+
+#ifdef EXTENTS_STATS
+ spin_lock(&sbi->s_ext_stats_lock);
+ sbi->s_ext_blocks += ee_len;
+ sbi->s_ext_extents++;
+ if (ee_len < sbi->s_ext_min)
+ sbi->s_ext_min = ee_len;
+ if (ee_len > sbi->s_ext_max)
+ sbi->s_ext_max = ee_len;
+ if (ext_depth(inode) > sbi->s_depth_max)
+ sbi->s_depth_max = ext_depth(inode);
+ spin_unlock(&sbi->s_ext_stats_lock);
+#endif
+
+ trace_ext4_remove_blocks(inode, ex, from, to, partial);
/*
- * For bigalloc file systems, we never free a partial cluster
- * at the beginning of the extent. Instead, we make a note
- * that we tried freeing the cluster, and check to see if we
- * need to free it on a subsequent call to ext4_remove_blocks,
- * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
+ * if we have a partial cluster, and it's different from the
+ * cluster of the last block in the extent, we free it
*/
- flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
+ last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
+
+ if (partial->state != initial &&
+ partial->pclu != EXT4_B2C(sbi, last_pblk)) {
+ if (partial->state == tofree) {
+ flags = get_default_free_blocks_flags(inode);
+ if (ext4_is_pending(inode, partial->lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(sbi, partial->pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial->lblk);
+ }
+ partial->state = initial;
+ }
+
+ num = le32_to_cpu(ex->ee_block) + ee_len - from;
+ pblk = ext4_ext_pblock(ex) + ee_len - num;
- trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
/*
- * If we have a partial cluster, and it's different from the
- * cluster of the last block, we need to explicitly free the
- * partial cluster here.
+ * We free the partial cluster at the end of the extent (if any),
+ * unless the cluster is used by another extent (partial_cluster
+ * state is nofree). If a partial cluster exists here, it must be
+ * shared with the last block in the extent.
*/
- pblk = ext4_ext_pblock(ex) + ee_len - 1;
- if (*partial_cluster > 0 &&
- *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
+ flags = get_default_free_blocks_flags(inode);
+
+ /* partial, left end cluster aligned, right end unaligned */
+ if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
+ (EXT4_LBLK_CMASK(sbi, to) >= from) &&
+ (partial->state != nofree)) {
+ if (ext4_is_pending(inode, to))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, *partial_cluster),
+ EXT4_PBLK_CMASK(sbi, last_pblk),
sbi->s_cluster_ratio, flags);
- *partial_cluster = 0;
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, to);
+ partial->state = initial;
+ flags = get_default_free_blocks_flags(inode);
}
-#ifdef EXTENTS_STATS
- {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- spin_lock(&sbi->s_ext_stats_lock);
- sbi->s_ext_blocks += ee_len;
- sbi->s_ext_extents++;
- if (ee_len < sbi->s_ext_min)
- sbi->s_ext_min = ee_len;
- if (ee_len > sbi->s_ext_max)
- sbi->s_ext_max = ee_len;
- if (ext_depth(inode) > sbi->s_depth_max)
- sbi->s_depth_max = ext_depth(inode);
- spin_unlock(&sbi->s_ext_stats_lock);
- }
-#endif
- if (from >= le32_to_cpu(ex->ee_block)
- && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
- /* tail removal */
- ext4_lblk_t num;
- long long first_cluster;
-
- num = le32_to_cpu(ex->ee_block) + ee_len - from;
- pblk = ext4_ext_pblock(ex) + ee_len - num;
- /*
- * Usually we want to free partial cluster at the end of the
- * extent, except for the situation when the cluster is still
- * used by any other extent (partial_cluster is negative).
- */
- if (*partial_cluster < 0 &&
- *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
- flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
+ flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
- ext_debug("free last %u blocks starting %llu partial %lld\n",
- num, pblk, *partial_cluster);
- ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
- /*
- * If the block range to be freed didn't start at the
- * beginning of a cluster, and we removed the entire
- * extent and the cluster is not used by any other extent,
- * save the partial cluster here, since we might need to
- * delete if we determine that the truncate or punch hole
- * operation has removed all of the blocks in the cluster.
- * If that cluster is used by another extent, preserve its
- * negative value so it isn't freed later on.
- *
- * If the whole extent wasn't freed, we've reached the
- * start of the truncated/punched region and have finished
- * removing blocks. If there's a partial cluster here it's
- * shared with the remainder of the extent and is no longer
- * a candidate for removal.
- */
- if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
- first_cluster = (long long) EXT4_B2C(sbi, pblk);
- if (first_cluster != -*partial_cluster)
- *partial_cluster = first_cluster;
- } else {
- *partial_cluster = 0;
+ /*
+ * For bigalloc file systems, we never free a partial cluster
+ * at the beginning of the extent. Instead, we check to see if we
+ * need to free it on a subsequent call to ext4_remove_blocks,
+ * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
+ */
+ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
+ ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
+
+ /* reset the partial cluster if we've freed past it */
+ if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
+ partial->state = initial;
+
+ /*
+ * If we've freed the entire extent but the beginning is not left
+ * cluster aligned and is not marked as ineligible for freeing we
+ * record the partial cluster at the beginning of the extent. It
+ * wasn't freed by the preceding ext4_free_blocks() call, and we
+ * need to look farther to the left to determine if it's to be freed
+ * (not shared with another extent). Else, reset the partial
+ * cluster - we're either done freeing or the beginning of the
+ * extent is left cluster aligned.
+ */
+ if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
+ if (partial->state == initial) {
+ partial->pclu = EXT4_B2C(sbi, pblk);
+ partial->lblk = from;
+ partial->state = tofree;
}
- } else
- ext4_error(sbi->s_sb, "strange request: removal(2) "
- "%u-%u from %u:%u",
- from, to, le32_to_cpu(ex->ee_block), ee_len);
+ } else {
+ partial->state = initial;
+ }
+
return 0;
}
-
/*
* ext4_ext_rm_leaf() Removes the extents associated with the
* blocks appearing between "start" and "end". Both "start"
@@ -2608,7 +2659,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
- long long *partial_cluster,
+ struct partial_cluster *partial,
ext4_lblk_t start, ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2640,7 +2691,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
- trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+ trace_ext4_ext_rm_leaf(inode, start, ex, partial);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
@@ -2671,8 +2722,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex);
- *partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ partial->pclu = EXT4_B2C(sbi, pblk);
+ partial->state = nofree;
}
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
@@ -2714,8 +2765,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (err)
goto out;
- err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
- a, b);
+ err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
if (err)
goto out;
@@ -2769,18 +2819,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* If there's a partial cluster and at least one extent remains in
* the leaf, free the partial cluster if it isn't shared with the
* current extent. If it is shared with the current extent
- * we zero partial_cluster because we've reached the start of the
+ * we reset the partial cluster because we've reached the start of the
* truncated/punched region and we're done removing blocks.
*/
- if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
+ if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
- if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
+ if (partial->pclu != EXT4_B2C(sbi, pblk)) {
+ int flags = get_default_free_blocks_flags(inode);
+
+ if (ext4_is_pending(inode, partial->lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, *partial_cluster),
- sbi->s_cluster_ratio,
- get_default_free_blocks_flags(inode));
+ EXT4_C2B(sbi, partial->pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial->lblk);
}
- *partial_cluster = 0;
+ partial->state = initial;
}
/* if this leaf is free, then we should
@@ -2819,10 +2874,14 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
- long long partial_cluster = 0;
+ struct partial_cluster partial;
handle_t *handle;
int i = 0, err = 0;
+ partial.pclu = 0;
+ partial.lblk = 0;
+ partial.state = initial;
+
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
@@ -2882,8 +2941,8 @@ again:
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
- partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ partial.pclu = EXT4_B2C(sbi, pblk);
+ partial.state = nofree;
}
/*
@@ -2911,9 +2970,10 @@ again:
&ex);
if (err)
goto out;
- if (pblk)
- partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ if (pblk) {
+ partial.pclu = EXT4_B2C(sbi, pblk);
+ partial.state = nofree;
+ }
}
}
/*
@@ -2948,8 +3008,7 @@ again:
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
- &partial_cluster, start,
- end);
+ &partial, start, end);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
@@ -3021,21 +3080,24 @@ again:
}
}
- trace_ext4_ext_remove_space_done(inode, start, end, depth,
- partial_cluster, path->p_hdr->eh_entries);
+ trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
+ path->p_hdr->eh_entries);
/*
- * If we still have something in the partial cluster and we have removed
- * even the first extent, then we should free the blocks in the partial
- * cluster as well. (This code will only run when there are no leaves
- * to the immediate left of the truncated/punched region.)
+ * if there's a partial cluster and we have removed the first extent
+ * in the file, then we also free the partial cluster, if any
*/
- if (partial_cluster > 0 && err == 0) {
- /* don't zero partial_cluster since it's not used afterwards */
+ if (partial.state == tofree && err == 0) {
+ int flags = get_default_free_blocks_flags(inode);
+
+ if (ext4_is_pending(inode, partial.lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, partial_cluster),
- sbi->s_cluster_ratio,
- get_default_free_blocks_flags(inode));
+ EXT4_C2B(sbi, partial.pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial.lblk);
+ partial.state = initial;
}
/* TODO: flexible tree reduction should be here */
@@ -3819,114 +3881,6 @@ out:
return ext4_mark_inode_dirty(handle, inode);
}
-/**
- * ext4_find_delalloc_range: find delayed allocated block in the given range.
- *
- * Return 1 if there is a delalloc block in the range, otherwise 0.
- */
-int ext4_find_delalloc_range(struct inode *inode,
- ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end)
-{
- struct extent_status es;
-
- ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
- if (es.es_len == 0)
- return 0; /* there is no delay extent in this tree */
- else if (es.es_lblk <= lblk_start &&
- lblk_start < es.es_lblk + es.es_len)
- return 1;
- else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
- return 1;
- else
- return 0;
-}
-
-int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
-{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_lblk_t lblk_start, lblk_end;
- lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
- lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
-
- return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
-}
-
-/**
- * Determines how many complete clusters (out of those specified by the 'map')
- * are under delalloc and were reserved quota for.
- * This function is called when we are writing out the blocks that were
- * originally written with their allocation delayed, but then the space was
- * allocated using fallocate() before the delayed allocation could be resolved.
- * The cases to look for are:
- * ('=' indicated delayed allocated blocks
- * '-' indicates non-delayed allocated blocks)
- * (a) partial clusters towards beginning and/or end outside of allocated range
- * are not delalloc'ed.
- * Ex:
- * |----c---=|====c====|====c====|===-c----|
- * |++++++ allocated ++++++|
- * ==> 4 complete clusters in above example
- *
- * (b) partial cluster (outside of allocated range) towards either end is
- * marked for delayed allocation. In this case, we will exclude that
- * cluster.
- * Ex:
- * |----====c========|========c========|
- * |++++++ allocated ++++++|
- * ==> 1 complete clusters in above example
- *
- * Ex:
- * |================c================|
- * |++++++ allocated ++++++|
- * ==> 0 complete clusters in above example
- *
- * The ext4_da_update_reserve_space will be called only if we
- * determine here that there were some "entire" clusters that span
- * this 'allocated' range.
- * In the non-bigalloc case, this function will just end up returning num_blks
- * without ever calling ext4_find_delalloc_range.
- */
-static unsigned int
-get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
- unsigned int num_blks)
-{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
- ext4_lblk_t lblk_from, lblk_to, c_offset;
- unsigned int allocated_clusters = 0;
-
- alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
- alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
-
- /* max possible clusters for this allocation */
- allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
-
- trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
-
- /* Check towards left side */
- c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
- if (c_offset) {
- lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
- lblk_to = lblk_from + c_offset - 1;
-
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
- allocated_clusters--;
- }
-
- /* Now check towards right. */
- c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
- if (allocated_clusters && c_offset) {
- lblk_from = lblk_start + num_blks;
- lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
-
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
- allocated_clusters--;
- }
-
- return allocated_clusters;
-}
-
static int
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
@@ -4108,23 +4062,6 @@ out:
}
map->m_len = allocated;
- /*
- * If we have done fallocate with the offset that is already
- * delayed allocated, we would have block reservation
- * and quota reservation done in the delayed write path.
- * But fallocate would have already updated quota and block
- * count for this offset. So cancel these reservation
- */
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- unsigned int reserved_clusters;
- reserved_clusters = get_reserved_cluster_alloc(inode,
- map->m_lblk, map->m_len);
- if (reserved_clusters)
- ext4_da_update_reserve_space(inode,
- reserved_clusters,
- 0);
- }
-
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
@@ -4513,77 +4450,39 @@ got_allocated_blocks:
map->m_flags |= EXT4_MAP_NEW;
/*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now.
+ * Reduce the reserved cluster count to reflect successful deferred
+ * allocation of delayed allocated clusters or direct allocation of
+ * clusters discovered to be delayed allocated. Once allocated, a
+ * cluster is not included in the reserved count.
*/
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- unsigned int reserved_clusters;
- /*
- * Check how many clusters we had reserved this allocated range
- */
- reserved_clusters = get_reserved_cluster_alloc(inode,
- map->m_lblk, allocated);
- if (!map_from_cluster) {
- BUG_ON(allocated_clusters < reserved_clusters);
- if (reserved_clusters < allocated_clusters) {
- struct ext4_inode_info *ei = EXT4_I(inode);
- int reservation = allocated_clusters -
- reserved_clusters;
- /*
- * It seems we claimed few clusters outside of
- * the range of this allocation. We should give
- * it back to the reservation pool. This can
- * happen in the following case:
- *
- * * Suppose s_cluster_ratio is 4 (i.e., each
- * cluster has 4 blocks. Thus, the clusters
- * are [0-3],[4-7],[8-11]...
- * * First comes delayed allocation write for
- * logical blocks 10 & 11. Since there were no
- * previous delayed allocated blocks in the
- * range [8-11], we would reserve 1 cluster
- * for this write.
- * * Next comes write for logical blocks 3 to 8.
- * In this case, we will reserve 2 clusters
- * (for [0-3] and [4-7]; and not for [8-11] as
- * that range has a delayed allocated blocks.
- * Thus total reserved clusters now becomes 3.
- * * Now, during the delayed allocation writeout
- * time, we will first write blocks [3-8] and
- * allocate 3 clusters for writing these
- * blocks. Also, we would claim all these
- * three clusters above.
- * * Now when we come here to writeout the
- * blocks [10-11], we would expect to claim
- * the reservation of 1 cluster we had made
- * (and we would claim it since there are no
- * more delayed allocated blocks in the range
- * [8-11]. But our reserved cluster count had
- * already gone to 0.
- *
- * Thus, at the step 4 above when we determine
- * that there are still some unwritten delayed
- * allocated blocks outside of our current
- * block range, we should increment the
- * reserved clusters count so that when the
- * remaining blocks finally gets written, we
- * could claim them.
- */
- dquot_reserve_block(inode,
- EXT4_C2B(sbi, reservation));
- spin_lock(&ei->i_block_reservation_lock);
- ei->i_reserved_data_blocks += reservation;
- spin_unlock(&ei->i_block_reservation_lock);
- }
+ if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) {
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
/*
- * We will claim quota for all newly allocated blocks.
- * We're updating the reserved space *after* the
- * correction above so we do not accidentally free
- * all the metadata reservation because we might
- * actually need it later on.
+ * When allocating delayed allocated clusters, simply
+ * reduce the reserved cluster count and claim quota
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
+ } else {
+ ext4_lblk_t lblk, len;
+ unsigned int n;
+
+ /*
+ * When allocating non-delayed allocated clusters
+ * (from fallocate, filemap, DIO, or clusters
+ * allocated when delalloc has been disabled by
+ * ext4_nonda_switch), reduce the reserved cluster
+ * count by the number of allocated clusters that
+ * have previously been delayed allocated. Quota
+ * has been claimed by ext4_mb_new_blocks() above,
+ * so release the quota reservations made for any
+ * previously delayed allocated clusters.
+ */
+ lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+ len = allocated_clusters << sbi->s_cluster_bits;
+ n = ext4_es_delayed_clu(inode, lblk, len);
+ if (n > 0)
+ ext4_da_update_reserve_space(inode, (int) n, 0);
}
}
@@ -5075,8 +4974,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
ext4_lblk_t block, next_del;
if (newes->es_pblk == 0) {
- ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
- newes->es_lblk + newes->es_len - 1, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ newes->es_lblk,
+ newes->es_lblk + newes->es_len - 1,
+ &es);
/*
* No extent in extent-tree contains block @newes->es_pblk,
@@ -5097,7 +4998,8 @@ static int ext4_find_delayed_extent(struct inode *inode,
}
block = newes->es_lblk + newes->es_len;
- ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
+ EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
@@ -5958,3 +5860,82 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
}
return replaced_count;
}
+
+/*
+ * ext4_clu_mapped - determine whether any block in a logical cluster has
+ * been mapped to a physical cluster
+ *
+ * @inode - file containing the logical cluster
+ * @lclu - logical cluster of interest
+ *
+ * Returns 1 if any block in the logical cluster is mapped, signifying
+ * that a physical cluster has been allocated for it. Otherwise,
+ * returns 0. Can also return negative error codes. Derived from
+ * ext4_ext_map_blocks().
+ */
+int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_ext_path *path;
+ int depth, mapped = 0, err = 0;
+ struct ext4_extent *extent;
+ ext4_lblk_t first_lblk, first_lclu, last_lclu;
+
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+
+ depth = ext_depth(inode);
+
+ /*
+ * A consistent leaf must not be empty. This situation is possible,
+ * though, _during_ tree modification, and it's why an assert can't
+ * be put in ext4_find_extent().
+ */
+ if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ EXT4_ERROR_INODE(inode,
+ "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
+ (unsigned long) EXT4_C2B(sbi, lclu),
+ depth, path[depth].p_block);
+ err = -EFSCORRUPTED;
+ goto out;
+ }
+
+ extent = path[depth].p_ext;
+
+ /* can't be mapped if the extent tree is empty */
+ if (extent == NULL)
+ goto out;
+
+ first_lblk = le32_to_cpu(extent->ee_block);
+ first_lclu = EXT4_B2C(sbi, first_lblk);
+
+ /*
+ * Three possible outcomes at this point - found extent spanning
+ * the target cluster, to the left of the target cluster, or to the
+ * right of the target cluster. The first two cases are handled here.
+ * The last case indicates the target cluster is not mapped.
+ */
+ if (lclu >= first_lclu) {
+ last_lclu = EXT4_B2C(sbi, first_lblk +
+ ext4_ext_get_actual_len(extent) - 1);
+ if (lclu <= last_lclu) {
+ mapped = 1;
+ } else {
+ first_lblk = ext4_ext_next_allocated_block(path);
+ first_lclu = EXT4_B2C(sbi, first_lblk);
+ if (lclu == first_lclu)
+ mapped = 1;
+ }
+ }
+
+out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+
+ return err ? err : mapped;
+}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index c4e6fb15101b..2b439afafe13 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -142,6 +142,7 @@
*/
static struct kmem_cache *ext4_es_cachep;
+static struct kmem_cache *ext4_pending_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
@@ -149,6 +150,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
int __init ext4_init_es(void)
{
@@ -233,30 +236,38 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
}
/*
- * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
- * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
+ * ext4_es_find_extent_range - find extent with specified status within block
+ * range or next extent following block range in
+ * extents status tree
*
- * @inode: the inode which owns delayed extents
- * @lblk: the offset where we start to search
- * @end: the offset where we stop to search
- * @es: delayed extent that we found
+ * @inode - file containing the range
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block defining start of range
+ * @end - logical block defining end of range
+ * @es - extent found, if any
+ *
+ * Find the first extent within the block range specified by @lblk and @end
+ * in the extents status tree that satisfies @matching_fn. If a match
+ * is found, it's returned in @es. If not, and a matching extent is found
+ * beyond the block range, it's returned in @es. If no match is found, an
+ * extent is returned in @es whose es_lblk, es_len, and es_pblk components
+ * are 0.
*/
-void ext4_es_find_delayed_extent_range(struct inode *inode,
- ext4_lblk_t lblk, ext4_lblk_t end,
- struct extent_status *es)
+static void __es_find_extent_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es)
{
struct ext4_es_tree *tree = NULL;
struct extent_status *es1 = NULL;
struct rb_node *node;
- BUG_ON(es == NULL);
- BUG_ON(end < lblk);
- trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
+ WARN_ON(es == NULL);
+ WARN_ON(end < lblk);
- read_lock(&EXT4_I(inode)->i_es_lock);
tree = &EXT4_I(inode)->i_es_tree;
- /* find extent in cache firstly */
+ /* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
if (tree->cache_es) {
es1 = tree->cache_es;
@@ -271,28 +282,133 @@ void ext4_es_find_delayed_extent_range(struct inode *inode,
es1 = __es_tree_search(&tree->root, lblk);
out:
- if (es1 && !ext4_es_is_delayed(es1)) {
+ if (es1 && !matching_fn(es1)) {
while ((node = rb_next(&es1->rb_node)) != NULL) {
es1 = rb_entry(node, struct extent_status, rb_node);
if (es1->es_lblk > end) {
es1 = NULL;
break;
}
- if (ext4_es_is_delayed(es1))
+ if (matching_fn(es1))
break;
}
}
- if (es1 && ext4_es_is_delayed(es1)) {
+ if (es1 && matching_fn(es1)) {
tree->cache_es = es1;
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
}
+}
+
+/*
+ * Locking for __es_find_extent_range() for external use
+ */
+void ext4_es_find_extent_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es)
+{
+ trace_ext4_es_find_extent_range_enter(inode, lblk);
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ __es_find_extent_range(inode, matching_fn, lblk, end, es);
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ trace_ext4_es_find_extent_range_exit(inode, es);
+}
+
+/*
+ * __es_scan_range - search block range for block with specified status
+ * in extents status tree
+ *
+ * @inode - file containing the range
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block defining start of range
+ * @end - logical block defining end of range
+ *
+ * Returns true if at least one block in the specified block range satisfies
+ * the criterion specified by @matching_fn, and false if not. If at least
+ * one extent has the specified status, then there is at least one block
+ * in the cluster with that status. Should only be called by code that has
+ * taken i_es_lock.
+ */
+static bool __es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t start, ext4_lblk_t end)
+{
+ struct extent_status es;
+
+ __es_find_extent_range(inode, matching_fn, start, end, &es);
+ if (es.es_len == 0)
+ return false; /* no matching extent in the tree */
+ else if (es.es_lblk <= start &&
+ start < es.es_lblk + es.es_len)
+ return true;
+ else if (start <= es.es_lblk && es.es_lblk <= end)
+ return true;
+ else
+ return false;
+}
+/*
+ * Locking for __es_scan_range() for external use
+ */
+bool ext4_es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end)
+{
+ bool ret;
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ ret = __es_scan_range(inode, matching_fn, lblk, end);
read_unlock(&EXT4_I(inode)->i_es_lock);
- trace_ext4_es_find_delayed_extent_range_exit(inode, es);
+ return ret;
+}
+
+/*
+ * __es_scan_clu - search cluster for block with specified status in
+ * extents status tree
+ *
+ * @inode - file containing the cluster
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block in cluster to be searched
+ *
+ * Returns true if at least one extent in the cluster containing @lblk
+ * satisfies the criterion specified by @matching_fn, and false if not. If at
+ * least one extent has the specified status, then there is at least one block
+ * in the cluster with that status. Should only be called by code that has
+ * taken i_es_lock.
+ */
+static bool __es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
+}
+
+/*
+ * Locking for __es_scan_clu() for external use
+ */
+bool ext4_es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk)
+{
+ bool ret;
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ ret = __es_scan_clu(inode, matching_fn, lblk);
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ return ret;
}
static void ext4_es_list_add(struct inode *inode)
@@ -694,6 +810,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
@@ -730,6 +847,11 @@ retry:
if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
err = 0;
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+ (status & EXTENT_STATUS_WRITTEN ||
+ status & EXTENT_STATUS_UNWRITTEN))
+ __revise_pending(inode, lblk, len);
+
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
@@ -1252,3 +1374,499 @@ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
ei->i_es_tree.cache_es = NULL;
return nr_shrunk;
}
+
+#ifdef ES_DEBUG__
+static void ext4_print_pending_tree(struct inode *inode)
+{
+ struct ext4_pending_tree *tree;
+ struct rb_node *node;
+ struct pending_reservation *pr;
+
+ printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
+ tree = &EXT4_I(inode)->i_pending_tree;
+ node = rb_first(&tree->root);
+ while (node) {
+ pr = rb_entry(node, struct pending_reservation, rb_node);
+ printk(KERN_DEBUG " %u", pr->lclu);
+ node = rb_next(node);
+ }
+ printk(KERN_DEBUG "\n");
+}
+#else
+#define ext4_print_pending_tree(inode)
+#endif
+
+int __init ext4_init_pending(void)
+{
+ ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
+ sizeof(struct pending_reservation),
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
+ if (ext4_pending_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void ext4_exit_pending(void)
+{
+ kmem_cache_destroy(ext4_pending_cachep);
+}
+
+void ext4_init_pending_tree(struct ext4_pending_tree *tree)
+{
+ tree->root = RB_ROOT;
+}
+
+/*
+ * __get_pending - retrieve a pointer to a pending reservation
+ *
+ * @inode - file containing the pending cluster reservation
+ * @lclu - logical cluster of interest
+ *
+ * Returns a pointer to a pending reservation if it's a member of
+ * the set, and NULL if not. Must be called holding i_es_lock.
+ */
+static struct pending_reservation *__get_pending(struct inode *inode,
+ ext4_lblk_t lclu)
+{
+ struct ext4_pending_tree *tree;
+ struct rb_node *node;
+ struct pending_reservation *pr = NULL;
+
+ tree = &EXT4_I(inode)->i_pending_tree;
+ node = (&tree->root)->rb_node;
+
+ while (node) {
+ pr = rb_entry(node, struct pending_reservation, rb_node);
+ if (lclu < pr->lclu)
+ node = node->rb_left;
+ else if (lclu > pr->lclu)
+ node = node->rb_right;
+ else if (lclu == pr->lclu)
+ return pr;
+ }
+ return NULL;
+}
+
+/*
+ * __insert_pending - adds a pending cluster reservation to the set of
+ * pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+ struct rb_node **p = &tree->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct pending_reservation *pr;
+ ext4_lblk_t lclu;
+ int ret = 0;
+
+ lclu = EXT4_B2C(sbi, lblk);
+ /* search to find parent for insertion */
+ while (*p) {
+ parent = *p;
+ pr = rb_entry(parent, struct pending_reservation, rb_node);
+
+ if (lclu < pr->lclu) {
+ p = &(*p)->rb_left;
+ } else if (lclu > pr->lclu) {
+ p = &(*p)->rb_right;
+ } else {
+ /* pending reservation already inserted */
+ goto out;
+ }
+ }
+
+ pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+ if (pr == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr->lclu = lclu;
+
+ rb_link_node(&pr->rb_node, parent, p);
+ rb_insert_color(&pr->rb_node, &tree->root);
+
+out:
+ return ret;
+}
+
+/*
+ * __remove_pending - removes a pending cluster reservation from the set
+ * of pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the pending cluster reservation to be removed
+ *
+ * Returns successfully if pending reservation is not a member of the set.
+ */
+static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct pending_reservation *pr;
+ struct ext4_pending_tree *tree;
+
+ pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+ kmem_cache_free(ext4_pending_cachep, pr);
+ }
+}
+
+/*
+ * ext4_remove_pending - removes a pending cluster reservation from the set
+ * of pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the pending cluster reservation to be removed
+ *
+ * Locking for external use of __remove_pending.
+ */
+void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ write_lock(&ei->i_es_lock);
+ __remove_pending(inode, lblk);
+ write_unlock(&ei->i_es_lock);
+}
+
+/*
+ * ext4_is_pending - determine whether a cluster has a pending reservation
+ * on it
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster
+ *
+ * Returns true if there's a pending reservation for the cluster in the
+ * set of pending reservations, and false if not.
+ */
+bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ bool ret;
+
+ read_lock(&ei->i_es_lock);
+ ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
+ read_unlock(&ei->i_es_lock);
+
+ return ret;
+}
+
+/*
+ * ext4_es_insert_delayed_block - adds a delayed block to the extents status
+ * tree, adding a pending reservation where
+ * needed
+ *
+ * @inode - file containing the newly added block
+ * @lblk - logical block to be added
+ * @allocated - indicates whether a physical cluster has been allocated for
+ * the logical cluster that contains the block
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+{
+ struct extent_status newes;
+ int err = 0;
+
+ es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
+ lblk, inode->i_ino);
+
+ newes.es_lblk = lblk;
+ newes.es_len = 1;
+ ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
+ trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
+
+ ext4_es_insert_extent_check(inode, &newes);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err = __es_remove_extent(inode, lblk, lblk);
+ if (err != 0)
+ goto error;
+retry:
+ err = __es_insert_extent(inode, &newes);
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
+ goto retry;
+ if (err != 0)
+ goto error;
+
+ if (allocated)
+ __insert_pending(inode, lblk);
+
+error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_es_print_tree(inode);
+ ext4_print_pending_tree(inode);
+
+ return err;
+}
+
+/*
+ * __es_delayed_clu - count number of clusters containing blocks that
+ * are delayed only
+ *
+ * @inode - file containing block range
+ * @start - logical block defining start of range
+ * @end - logical block defining end of range
+ *
+ * Returns the number of clusters containing only delayed (not delayed
+ * and unwritten) blocks in the range specified by @start and @end. Any
+ * cluster or part of a cluster within the range and containing a delayed
+ * and not unwritten block within the range is counted as a whole cluster.
+ */
+static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t end)
+{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ struct extent_status *es;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct rb_node *node;
+ ext4_lblk_t first_lclu, last_lclu;
+ unsigned long long last_counted_lclu;
+ unsigned int n = 0;
+
+ /* guaranteed to be unequal to any ext4_lblk_t value */
+ last_counted_lclu = ~0ULL;
+
+ es = __es_tree_search(&tree->root, start);
+
+ while (es && (es->es_lblk <= end)) {
+ if (ext4_es_is_delonly(es)) {
+ if (es->es_lblk <= start)
+ first_lclu = EXT4_B2C(sbi, start);
+ else
+ first_lclu = EXT4_B2C(sbi, es->es_lblk);
+
+ if (ext4_es_end(es) >= end)
+ last_lclu = EXT4_B2C(sbi, end);
+ else
+ last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
+
+ if (first_lclu == last_counted_lclu)
+ n += last_lclu - first_lclu;
+ else
+ n += last_lclu - first_lclu + 1;
+ last_counted_lclu = last_lclu;
+ }
+ node = rb_next(&es->rb_node);
+ if (!node)
+ break;
+ es = rb_entry(node, struct extent_status, rb_node);
+ }
+
+ return n;
+}
+
+/*
+ * ext4_es_delayed_clu - count number of clusters containing blocks that
+ * are both delayed and unwritten
+ *
+ * @inode - file containing block range
+ * @lblk - logical block defining start of range
+ * @len - number of blocks in range
+ *
+ * Locking for external use of __es_delayed_clu().
+ */
+unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_lblk_t end;
+ unsigned int n;
+
+ if (len == 0)
+ return 0;
+
+ end = lblk + len - 1;
+ WARN_ON(end < lblk);
+
+ read_lock(&ei->i_es_lock);
+
+ n = __es_delayed_clu(inode, lblk, end);
+
+ read_unlock(&ei->i_es_lock);
+
+ return n;
+}
+
+/*
+ * __revise_pending - makes, cancels, or leaves unchanged pending cluster
+ * reservations for a specified block range depending
+ * upon the presence or absence of delayed blocks
+ * outside the range within clusters at the ends of the
+ * range
+ *
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
+
+ if (len == 0)
+ return;
+
+ /*
+ * Two cases - block range within single cluster and block range
+ * spanning two or more clusters. Note that a cluster belonging
+ * to a range starting and/or ending on a cluster boundary is treated
+ * as if it does not contain a delayed extent. The new range may
+ * have allocated space for previously delayed blocks out to the
+ * cluster boundary, requiring that any pre-existing pending
+ * reservation be canceled. Because this code only looks at blocks
+ * outside the range, it should revise pending reservations
+ * correctly even if the extent represented by the range can't be
+ * inserted in the extents status tree due to ENOSPC.
+ */
+
+ if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
+ first = EXT4_LBLK_CMASK(sbi, lblk);
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+ __insert_pending(inode, first);
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+ if (l_del)
+ __insert_pending(inode, last);
+ else
+ __remove_pending(inode, last);
+ }
+ } else {
+ first = EXT4_LBLK_CMASK(sbi, lblk);
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del)
+ __insert_pending(inode, first);
+ else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+ if (l_del)
+ __insert_pending(inode, last);
+ else
+ __remove_pending(inode, last);
+ }
+}
+
+/*
+ * ext4_es_remove_blks - remove block range from extents status tree and
+ * reduce reservation count or cancel pending
+ * reservation as needed
+ *
+ * @inode - file containing range
+ * @lblk - first block in range
+ * @len - number of blocks to remove
+ *
+ */
+void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned int clu_size, reserved = 0;
+ ext4_lblk_t last_lclu, first, length, remainder, last;
+ bool delonly;
+ int err = 0;
+ struct pending_reservation *pr;
+ struct ext4_pending_tree *tree;
+
+ /*
+ * Process cluster by cluster for bigalloc - there may be up to
+ * two clusters in a 4k page with a 1k block size and two blocks
+ * per cluster. Also necessary for systems with larger page sizes
+ * and potentially larger block sizes.
+ */
+ clu_size = sbi->s_cluster_ratio;
+ last_lclu = EXT4_B2C(sbi, lblk + len - 1);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ for (first = lblk, remainder = len;
+ remainder > 0;
+ first += length, remainder -= length) {
+
+ if (EXT4_B2C(sbi, first) == last_lclu)
+ length = remainder;
+ else
+ length = clu_size - EXT4_LBLK_COFF(sbi, first);
+
+ /*
+ * The BH_Delay flag, which triggers calls to this function,
+ * and the contents of the extents status tree can be
+ * inconsistent due to writepages activity. So, note whether
+ * the blocks to be removed actually belong to an extent with
+ * delayed only status.
+ */
+ delonly = __es_scan_clu(inode, &ext4_es_is_delonly, first);
+
+ /*
+ * because of the writepages effect, written and unwritten
+ * blocks could be removed here
+ */
+ last = first + length - 1;
+ err = __es_remove_extent(inode, first, last);
+ if (err)
+ ext4_warning(inode->i_sb,
+ "%s: couldn't remove page (err = %d)",
+ __func__, err);
+
+ /* non-bigalloc case: simply count the cluster for release */
+ if (sbi->s_cluster_ratio == 1 && delonly) {
+ reserved++;
+ continue;
+ }
+
+ /*
+ * bigalloc case: if all delayed allocated only blocks have
+ * just been removed from a cluster, either cancel a pending
+ * reservation if it exists or count a cluster for release
+ */
+ if (delonly &&
+ !__es_scan_clu(inode, &ext4_es_is_delonly, first)) {
+ pr = __get_pending(inode, EXT4_B2C(sbi, first));
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+ kmem_cache_free(ext4_pending_cachep, pr);
+ } else {
+ reserved++;
+ }
+ }
+ }
+
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_da_release_space(inode, reserved);
+}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 8efdeb903d6b..131a8b7df265 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -78,6 +78,51 @@ struct ext4_es_stats {
struct percpu_counter es_stats_shk_cnt;
};
+/*
+ * Pending cluster reservations for bigalloc file systems
+ *
+ * A cluster with a pending reservation is a logical cluster shared by at
+ * least one extent in the extents status tree with delayed and unwritten
+ * status and at least one other written or unwritten extent. The
+ * reservation is said to be pending because a cluster reservation would
+ * have to be taken in the event all blocks in the cluster shared with
+ * written or unwritten extents were deleted while the delayed and
+ * unwritten blocks remained.
+ *
+ * The set of pending cluster reservations is an auxiliary data structure
+ * used with the extents status tree to implement reserved cluster/block
+ * accounting for bigalloc file systems. The set is kept in memory and
+ * records all pending cluster reservations.
+ *
+ * Its primary function is to avoid the need to read extents from the
+ * disk when invalidating pages as a result of a truncate, punch hole, or
+ * collapse range operation. Page invalidation requires a decrease in the
+ * reserved cluster count if it results in the removal of all delayed
+ * and unwritten extents (blocks) from a cluster that is not shared with a
+ * written or unwritten extent, and no decrease otherwise. Determining
+ * whether the cluster is shared can be done by searching for a pending
+ * reservation on it.
+ *
+ * Secondarily, it provides a potentially faster method for determining
+ * whether the reserved cluster count should be increased when a physical
+ * cluster is deallocated as a result of a truncate, punch hole, or
+ * collapse range operation. The necessary information is also present
+ * in the extents status tree, but might be more rapidly accessed in
+ * the pending reservation set in many cases due to smaller size.
+ *
+ * The pending cluster reservation set is implemented as a red-black tree
+ * with the goal of minimizing per page search time overhead.
+ */
+
+struct pending_reservation {
+ struct rb_node rb_node;
+ ext4_lblk_t lclu;
+};
+
+struct ext4_pending_tree {
+ struct rb_root root;
+};
+
extern int __init ext4_init_es(void);
extern void ext4_exit_es(void);
extern void ext4_es_init_tree(struct ext4_es_tree *tree);
@@ -90,11 +135,18 @@ extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
unsigned int status);
extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern void ext4_es_find_delayed_extent_range(struct inode *inode,
- ext4_lblk_t lblk, ext4_lblk_t end,
- struct extent_status *es);
+extern void ext4_es_find_extent_range(struct inode *inode,
+ int (*match_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
+extern bool ext4_es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end);
+extern bool ext4_es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk);
static inline unsigned int ext4_es_status(struct extent_status *es)
{
@@ -126,6 +178,16 @@ static inline int ext4_es_is_hole(struct extent_status *es)
return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
}
+static inline int ext4_es_is_mapped(struct extent_status *es)
+{
+ return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
+}
+
+static inline int ext4_es_is_delonly(struct extent_status *es)
+{
+ return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
+}
+
static inline void ext4_es_set_referenced(struct extent_status *es)
{
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
@@ -175,4 +237,16 @@ extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
extern int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v);
+extern int __init ext4_init_pending(void);
+extern void ext4_exit_pending(void);
+extern void ext4_init_pending_tree(struct ext4_pending_tree *tree);
+extern void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk);
+extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
+extern int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated);
+extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
+extern void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
+
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7b4736022761..9c4bac18cc6c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -863,7 +863,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
- int retries;
+ int retries = 0;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d767e993591d..05f01fbd9c7f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -577,8 +577,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
+ ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
@@ -701,8 +701,8 @@ found:
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
+ ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
@@ -1595,7 +1595,7 @@ static int ext4_da_reserve_space(struct inode *inode)
return 0; /* success */
}
-static void ext4_da_release_space(struct inode *inode, int to_free)
+void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -1634,13 +1634,11 @@ static void ext4_da_page_release_reservation(struct page *page,
unsigned int offset,
unsigned int length)
{
- int to_release = 0, contiguous_blks = 0;
+ int contiguous_blks = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int stop = offset + length;
- int num_clusters;
ext4_fsblk_t lblk;
BUG_ON(stop > PAGE_SIZE || stop < length);
@@ -1654,7 +1652,6 @@ static void ext4_da_page_release_reservation(struct page *page,
break;
if ((offset <= curr_off) && (buffer_delay(bh))) {
- to_release++;
contiguous_blks++;
clear_buffer_delay(bh);
} else if (contiguous_blks) {
@@ -1662,7 +1659,7 @@ static void ext4_da_page_release_reservation(struct page *page,
(PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
- ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ ext4_es_remove_blks(inode, lblk, contiguous_blks);
contiguous_blks = 0;
}
curr_off = next_off;
@@ -1671,21 +1668,9 @@ static void ext4_da_page_release_reservation(struct page *page,
if (contiguous_blks) {
lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
- ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ ext4_es_remove_blks(inode, lblk, contiguous_blks);
}
- /* If we have released all the blocks belonging to a cluster, then we
- * need to release the reserved space for that cluster. */
- num_clusters = EXT4_NUM_B2C(sbi, to_release);
- while (num_clusters > 0) {
- lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
- ((num_clusters - 1) << sbi->s_cluster_bits);
- if (sbi->s_cluster_ratio == 1 ||
- !ext4_find_delalloc_cluster(inode, lblk))
- ext4_da_release_space(inode, 1);
-
- num_clusters--;
- }
}
/*
@@ -1781,6 +1766,65 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
}
/*
+ * ext4_insert_delayed_block - adds a delayed block to the extents status
+ * tree, incrementing the reserved cluster/block
+ * count or making a pending reservation
+ * where needed
+ *
+ * @inode - file containing the newly added block
+ * @lblk - logical block to be added
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int ret;
+ bool allocated = false;
+
+ /*
+ * If the cluster containing lblk is shared with a delayed,
+ * written, or unwritten extent in a bigalloc file system, it's
+ * already been accounted for and does not need to be reserved.
+ * A pending reservation must be made for the cluster if it's
+ * shared with a written or unwritten extent and doesn't already
+ * have one. Written and unwritten extents can be purged from the
+ * extents status tree if the system is under memory pressure, so
+ * it's necessary to examine the extent tree if a search of the
+ * extents status tree doesn't get a match.
+ */
+ if (sbi->s_cluster_ratio == 1) {
+ ret = ext4_da_reserve_space(inode);
+ if (ret != 0) /* ENOSPC */
+ goto errout;
+ } else { /* bigalloc */
+ if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
+ if (!ext4_es_scan_clu(inode,
+ &ext4_es_is_mapped, lblk)) {
+ ret = ext4_clu_mapped(inode,
+ EXT4_B2C(sbi, lblk));
+ if (ret < 0)
+ goto errout;
+ if (ret == 0) {
+ ret = ext4_da_reserve_space(inode);
+ if (ret != 0) /* ENOSPC */
+ goto errout;
+ } else {
+ allocated = true;
+ }
+ } else {
+ allocated = true;
+ }
+ }
+ }
+
+ ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+
+errout:
+ return ret;
+}
+
+/*
* This function is grabs code from the very beginning of
* ext4_map_blocks, but assumes that the caller is from delayed write
* time. This function looks up the requested blocks and sets the
@@ -1859,28 +1903,14 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
add_delayed:
if (retval == 0) {
int ret;
+
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
- /*
- * If the block was allocated from previously allocated cluster,
- * then we don't need to reserve it again. However we still need
- * to reserve metadata for every block we're going to write.
- */
- if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
- !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
- ret = ext4_da_reserve_space(inode);
- if (ret) {
- /* not enough space to reserve */
- retval = ret;
- goto out_unlock;
- }
- }
- ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- ~0, EXTENT_STATUS_DELAYED);
- if (ret) {
+ ret = ext4_insert_delayed_block(inode, map->m_lblk);
+ if (ret != 0) {
retval = ret;
goto out_unlock;
}
@@ -2613,7 +2643,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
- int tag;
+ xa_mark_t tag;
int i, err = 0;
int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
@@ -3450,7 +3480,8 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
ext4_lblk_t end = map.m_lblk + map.m_len - 1;
struct extent_status es;
- ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ map.m_lblk, end, &es);
if (!es.es_len || es.es_lblk > end) {
/* entire range is a hole */
@@ -6153,13 +6184,14 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
return !buffer_mapped(bh);
}
-int ext4_page_mkwrite(struct vm_fault *vmf)
+vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
loff_t size;
unsigned long len;
- int ret;
+ int err;
+ vm_fault_t ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
@@ -6172,8 +6204,8 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
down_read(&EXT4_I(inode)->i_mmap_sem);
- ret = ext4_convert_inline_data(inode);
- if (ret)
+ err = ext4_convert_inline_data(inode);
+ if (err)
goto out_ret;
/* Delalloc case is easy... */
@@ -6181,9 +6213,9 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
- ret = block_page_mkwrite(vma, vmf,
+ err = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
- } while (ret == -ENOSPC &&
+ } while (err == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
@@ -6228,8 +6260,8 @@ retry_alloc:
ret = VM_FAULT_SIGBUS;
goto out;
}
- ret = block_page_mkwrite(vma, vmf, get_block);
- if (!ret && ext4_should_journal_data(inode)) {
+ err = block_page_mkwrite(vma, vmf, get_block);
+ if (!err && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
@@ -6240,24 +6272,24 @@ retry_alloc:
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
- ret = block_page_mkwrite_return(ret);
+ ret = block_page_mkwrite_return(err);
out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
-int ext4_filemap_fault(struct vm_fault *vmf)
+vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- int err;
+ vm_fault_t ret;
down_read(&EXT4_I(inode)->i_mmap_sem);
- err = filemap_fault(vmf);
+ ret = filemap_fault(vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
- return err;
+ return ret;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a7074115d6f6..0edee31913d1 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -67,7 +67,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
- swap(inode1->i_flags, inode2->i_flags);
swap(inode1->i_version, inode2->i_version);
swap(inode1->i_blocks, inode2->i_blocks);
swap(inode1->i_bytes, inode2->i_bytes);
@@ -85,6 +84,21 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
i_size_write(inode2, isize);
}
+static void reset_inode_seed(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ __le32 inum = cpu_to_le32(inode->i_ino);
+ __le32 gen = cpu_to_le32(inode->i_generation);
+ __u32 csum;
+
+ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
+ ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
+}
+
/**
* Swap the information from the given @inode and the inode
* EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
@@ -102,10 +116,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
struct inode *inode_bl;
struct ext4_inode_info *ei_bl;
- if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
+ if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
+ IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
+ ext4_has_inline_data(inode))
return -EINVAL;
- if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
+ if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
+ !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
return -EPERM;
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
@@ -120,13 +137,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
* that only 1 swap_inode_boot_loader is running. */
lock_two_nondirectories(inode, inode_bl);
- truncate_inode_pages(&inode->i_data, 0);
- truncate_inode_pages(&inode_bl->i_data, 0);
-
/* Wait for all existing dio workers */
inode_dio_wait(inode);
inode_dio_wait(inode_bl);
+ truncate_inode_pages(&inode->i_data, 0);
+ truncate_inode_pages(&inode_bl->i_data, 0);
+
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
@@ -159,6 +176,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_generation = prandom_u32();
inode_bl->i_generation = prandom_u32();
+ reset_inode_seed(inode);
+ reset_inode_seed(inode_bl);
ext4_discard_preallocations(inode);
@@ -169,6 +188,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_ino, err);
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
+ ext4_mark_inode_dirty(handle, inode);
} else {
err = ext4_mark_inode_dirty(handle, inode_bl);
if (err < 0) {
@@ -178,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
ext4_mark_inode_dirty(handle, inode);
+ ext4_mark_inode_dirty(handle, inode_bl);
}
}
ext4_journal_stop(handle);
@@ -339,19 +360,14 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
return 0;
- err = mnt_want_write_file(filp);
- if (err)
- return err;
-
err = -EPERM;
- inode_lock(inode);
/* Is it quota file? Do not allow user to mess with it */
if (ext4_is_quota_file(inode))
- goto out_unlock;
+ return err;
err = ext4_get_inode_loc(inode, &iloc);
if (err)
- goto out_unlock;
+ return err;
raw_inode = ext4_raw_inode(&iloc);
if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
@@ -359,20 +375,20 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
EXT4_SB(sb)->s_want_extra_isize,
&iloc);
if (err)
- goto out_unlock;
+ return err;
} else {
brelse(iloc.bh);
}
- dquot_initialize(inode);
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
EXT4_QUOTA_INIT_BLOCKS(sb) +
EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- goto out_unlock;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
@@ -400,9 +416,6 @@ out_dirty:
err = rc;
out_stop:
ext4_journal_stop(handle);
-out_unlock:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
return err;
}
#else
@@ -626,6 +639,30 @@ group_add_out:
return err;
}
+static int ext4_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (__kprojid_val(EXT4_I(inode)->i_projid) != fa->fsx_projid)
+ return -EINVAL;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_PROJINHERIT)) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1025,19 +1062,19 @@ resizefs_out:
return err;
inode_lock(inode);
+ err = ext4_ioctl_check_project(inode, &fa);
+ if (err)
+ goto out;
flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
(flags & EXT4_FL_XFLAG_VISIBLE);
err = ext4_ioctl_setflags(inode, flags);
- inode_unlock(inode);
- mnt_drop_write_file(filp);
if (err)
- return err;
-
+ goto out;
err = ext4_ioctl_setproject(filp, fa.fsx_projid);
- if (err)
- return err;
-
- return 0;
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
}
case EXT4_IOC_SHUTDOWN:
return ext4_shutdown(sb, arg);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e29fce2fbf25..e2248083cdca 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4915,9 +4915,17 @@ do_more:
&sbi->s_flex_groups[flex_group].free_clusters);
}
- if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
- dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
- percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+ /*
+ * on a bigalloc file system, defer the s_freeclusters_counter
+ * update to the caller (ext4_remove_space and friends) so they
+ * can determine if a cluster freed here should be rereserved
+ */
+ if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
+ if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+ dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+ count_clusters);
+ }
ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index a409ff70d67b..2f5be02fc6f6 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -516,9 +516,13 @@ mext_check_arguments(struct inode *orig_inode,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
- if (orig_eof < orig_start + *len - 1)
+ if (orig_eof <= orig_start)
+ *len = 0;
+ else if (orig_eof < orig_start + *len - 1)
*len = orig_eof - orig_start;
- if (donor_eof < donor_start + *len - 1)
+ if (donor_eof <= donor_start)
+ *len = 0;
+ else if (donor_eof < donor_start + *len - 1)
*len = donor_eof - donor_start;
if (!*len) {
ext4_debug("ext4 move extent: len should not be 0 "
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 377d516c475f..67a38532032a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2261,7 +2261,7 @@ again:
dxroot->info.indirect_levels += 1;
dxtrace(printk(KERN_DEBUG
"Creating %d level index...\n",
- info->indirect_levels));
+ dxroot->info.indirect_levels));
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (err)
goto journal_error;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db7590178dfc..2aa62d58d8dd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
- wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
+ wbc_init_bio(io->io_wbc, bio);
return 0;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1145109968ef..a221f1cdf704 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -914,6 +914,18 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
for (type = 0; type < EXT4_MAXQUOTAS; type++)
ext4_quota_off(sb, type);
}
+
+/*
+ * This is a helper function which is used in the mount/remount
+ * codepaths (which holds s_umount) to fetch the quota file name.
+ */
+static inline char *get_qf_name(struct super_block *sb,
+ struct ext4_sb_info *sbi,
+ int type)
+{
+ return rcu_dereference_protected(sbi->s_qf_names[type],
+ lockdep_is_held(&sb->s_umount));
+}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
@@ -965,7 +977,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(sbi->s_qf_names[i]);
+ kfree(get_qf_name(sb, sbi, i));
#endif
/* Debugging code just in case the in-memory inode orphan list
@@ -1040,6 +1052,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_da_metadata_calc_len = 0;
ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
+ ext4_init_pending_tree(&ei->i_pending_tree);
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
@@ -1530,11 +1543,10 @@ static const char deprecated_msg[] =
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *qname;
+ char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
int ret = -1;
- if (sb_any_quota_loaded(sb) &&
- !sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && !old_qname) {
ext4_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
@@ -1551,8 +1563,8 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"Not enough memory for storing quotafile name");
return -1;
}
- if (sbi->s_qf_names[qtype]) {
- if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+ if (old_qname) {
+ if (strcmp(old_qname, qname) == 0)
ret = 1;
else
ext4_msg(sb, KERN_ERR,
@@ -1565,7 +1577,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"quotafile must be on filesystem root");
goto errout;
}
- sbi->s_qf_names[qtype] = qname;
+ rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
set_opt(sb, QUOTA);
return 1;
errout:
@@ -1577,15 +1589,16 @@ static int clear_qf_name(struct super_block *sb, int qtype)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *old_qname = get_qf_name(sb, sbi, qtype);
- if (sb_any_quota_loaded(sb) &&
- sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && old_qname) {
ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return -1;
}
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
+ rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
+ synchronize_rcu();
+ kfree(old_qname);
return 1;
}
#endif
@@ -1960,7 +1973,7 @@ static int parse_options(char *options, struct super_block *sb,
int is_remount)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *p;
+ char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -1991,11 +2004,13 @@ static int parse_options(char *options, struct super_block *sb,
"Cannot enable project quota enforcement.");
return 0;
}
- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
- if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
+ grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
+ if (usr_qf_name || grp_qf_name) {
+ if (test_opt(sb, USRQUOTA) && usr_qf_name)
clear_opt(sb, USRQUOTA);
- if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+ if (test_opt(sb, GRPQUOTA) && grp_qf_name)
clear_opt(sb, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
@@ -2029,6 +2044,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
{
#if defined(CONFIG_QUOTA)
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *usr_qf_name, *grp_qf_name;
if (sbi->s_jquota_fmt) {
char *fmtname = "";
@@ -2047,11 +2063,14 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
seq_printf(seq, ",jqfmt=%s", fmtname);
}
- if (sbi->s_qf_names[USRQUOTA])
- seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
-
- if (sbi->s_qf_names[GRPQUOTA])
- seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+ rcu_read_lock();
+ usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
+ grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
+ if (usr_qf_name)
+ seq_show_option(seq, "usrjquota", usr_qf_name);
+ if (grp_qf_name)
+ seq_show_option(seq, "grpjquota", grp_qf_name);
+ rcu_read_unlock();
#endif
}
@@ -5103,6 +5122,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
int err = 0;
#ifdef CONFIG_QUOTA
int i, j;
+ char *to_free[EXT4_MAXQUOTAS];
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -5122,8 +5142,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++)
if (sbi->s_qf_names[i]) {
- old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
- GFP_KERNEL);
+ char *qf_name = get_qf_name(sb, sbi, i);
+
+ old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
if (!old_opts.s_qf_names[i]) {
for (j = 0; j < i; j++)
kfree(old_opts.s_qf_names[j]);
@@ -5352,9 +5373,12 @@ restore_opts:
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- kfree(sbi->s_qf_names[i]);
- sbi->s_qf_names[i] = old_opts.s_qf_names[i];
+ to_free[i] = get_qf_name(sb, sbi, i);
+ rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
}
+ synchronize_rcu();
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(to_free[i]);
#endif
kfree(orig_data);
return err;
@@ -5545,7 +5569,7 @@ static int ext4_write_info(struct super_block *sb, int type)
*/
static int ext4_quota_on_mount(struct super_block *sb, int type)
{
- return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
+ return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
EXT4_SB(sb)->s_jquota_fmt, type);
}
@@ -5954,6 +5978,10 @@ static int __init ext4_init_fs(void)
if (err)
return err;
+ err = ext4_init_pending();
+ if (err)
+ goto out6;
+
err = ext4_init_pageio();
if (err)
goto out5;
@@ -5992,6 +6020,8 @@ out3:
out4:
ext4_exit_pageio();
out5:
+ ext4_exit_pending();
+out6:
ext4_exit_es();
return err;
@@ -6009,6 +6039,7 @@ static void __exit ext4_exit_fs(void)
ext4_exit_system_zone();
ext4_exit_pageio();
ext4_exit_es();
+ ext4_exit_pending();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 111824199a88..fa707cdd4120 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/acl.c
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext2/acl.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/f2fs_fs.h>
#include "f2fs.h"
@@ -53,6 +50,9 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
const char *end = value + size;
+ if (size < sizeof(struct f2fs_acl_header))
+ return ERR_PTR(-EINVAL);
+
if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION))
return ERR_PTR(-EINVAL);
@@ -394,12 +394,16 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl,
ipage);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return error;
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 2c685185c24d..b96823c59b15 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/acl.h
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext2/acl.h
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_ACL_H__
#define __F2FS_ACL_H__
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index e8b6b89bddb8..9c28ea439e0b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/checkpoint.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/bio.h>
@@ -122,11 +119,8 @@ retry:
if (PTR_ERR(page) == -EIO &&
++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
-
f2fs_stop_checkpoint(sbi, false);
- f2fs_bug_on(sbi, 1);
}
-
return page;
}
@@ -282,8 +276,7 @@ static int __f2fs_write_meta_page(struct page *page,
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
- f2fs_submit_merged_write_cond(sbi, page->mapping->host,
- 0, page->index, META);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
unlock_page(page);
@@ -696,6 +689,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
/* clear Orphan Flag */
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
out:
+ set_sbi_flag(sbi, SBI_IS_RECOVERED);
+
#ifdef CONFIG_QUOTA
/* Turn quotas off */
if (quota_enabled)
@@ -1084,6 +1079,21 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
ckpt->next_free_nid = cpu_to_le32(last_nid);
}
+static bool __need_flush_quota(struct f2fs_sb_info *sbi)
+{
+ if (!is_journalled_quota(sbi))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH))
+ return true;
+ if (get_pages(sbi, F2FS_DIRTY_QDATA))
+ return true;
+ return false;
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -1095,12 +1105,36 @@ static int block_operations(struct f2fs_sb_info *sbi)
.for_reclaim = 0,
};
struct blk_plug plug;
- int err = 0;
+ int err = 0, cnt = 0;
blk_start_plug(&plug);
-retry_flush_dents:
+retry_flush_quotas:
+ if (__need_flush_quota(sbi)) {
+ int locked;
+
+ if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
+ set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+ f2fs_lock_all(sbi);
+ goto retry_flush_dents;
+ }
+ clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+ /* only failed during mount/umount/freeze/quotactl */
+ locked = down_read_trylock(&sbi->sb->s_umount);
+ f2fs_quota_sync(sbi->sb, -1);
+ if (locked)
+ up_read(&sbi->sb->s_umount);
+ }
+
f2fs_lock_all(sbi);
+ if (__need_flush_quota(sbi)) {
+ f2fs_unlock_all(sbi);
+ cond_resched();
+ goto retry_flush_quotas;
+ }
+
+retry_flush_dents:
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
@@ -1108,7 +1142,7 @@ retry_flush_dents:
if (err)
goto out;
cond_resched();
- goto retry_flush_dents;
+ goto retry_flush_quotas;
}
/*
@@ -1117,6 +1151,12 @@ retry_flush_dents:
*/
down_write(&sbi->node_change);
+ if (__need_flush_quota(sbi)) {
+ up_write(&sbi->node_change);
+ f2fs_unlock_all(sbi);
+ goto retry_flush_quotas;
+ }
+
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
@@ -1124,7 +1164,7 @@ retry_flush_dents:
if (err)
goto out;
cond_resched();
- goto retry_flush_dents;
+ goto retry_flush_quotas;
}
retry_flush_nodes:
@@ -1215,6 +1255,19 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
+ __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+ __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+ __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
__clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
@@ -1422,6 +1475,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
+ clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+ sbi->unusable_block_count = 0;
__set_cp_next_pack(sbi);
/*
@@ -1446,6 +1501,12 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long long ckpt_ver;
int err = 0;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (cpc->reason != CP_PAUSE)
+ return 0;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Start checkpoint disabled!");
+ }
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
@@ -1497,7 +1558,10 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */
- f2fs_flush_nat_entries(sbi, cpc);
+ err = f2fs_flush_nat_entries(sbi, cpc);
+ if (err)
+ goto stop;
+
f2fs_flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
@@ -1506,7 +1570,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_release_discard_addrs(sbi);
else
f2fs_clear_prefree_segments(sbi, cpc);
-
+stop:
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 382c1ef9a9e4..b293cb3e27a2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -49,12 +46,29 @@ static bool __is_cp_guaranteed(struct page *page)
inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
(S_ISREG(inode->i_mode) &&
- is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
+ (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
is_cold_data(page))
return true;
return false;
}
+static enum count_type __read_io_type(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (mapping) {
+ struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi))
+ return F2FS_RD_META;
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi))
+ return F2FS_RD_NODE;
+ }
+ return F2FS_RD_DATA;
+}
+
/* postprocessing steps for read bios */
enum bio_post_read_step {
STEP_INITIAL = 0,
@@ -80,10 +94,12 @@ static void __read_end_io(struct bio *bio)
/* PG_error was set if any post_read step failed */
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page);
- SetPageError(page);
+ /* will re-read again later */
+ ClearPageError(page);
} else {
SetPageUptodate(page);
}
+ dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
}
if (bio->bi_private)
@@ -126,8 +142,9 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
- if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
- f2fs_show_injection_info(FAULT_IO);
+ if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
+ FAULT_READ_IO)) {
+ f2fs_show_injection_info(FAULT_READ_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -148,6 +165,11 @@ static void f2fs_write_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
+ if (time_to_inject(sbi, FAULT_WRITE_IO)) {
+ f2fs_show_injection_info(FAULT_WRITE_IO);
+ bio->bi_status = BLK_STS_IOERR;
+ }
+
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
@@ -319,8 +341,8 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
io->bio = NULL;
}
-static bool __has_merged_page(struct f2fs_bio_info *io,
- struct inode *inode, nid_t ino, pgoff_t idx)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+ struct page *page, nid_t ino)
{
struct bio_vec *bvec;
struct page *target;
@@ -329,7 +351,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
if (!io->bio)
return false;
- if (!inode && !ino)
+ if (!inode && !page && !ino)
return true;
bio_for_each_segment_all(bvec, io->bio, i) {
@@ -339,11 +361,10 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
else
target = fscrypt_control_page(bvec->bv_page);
- if (idx != target->index)
- continue;
-
if (inode && inode == target->mapping->host)
return true;
+ if (page && page == target)
+ return true;
if (ino && ino == ino_of_node(target))
return true;
}
@@ -352,7 +373,8 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
}
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
- nid_t ino, pgoff_t idx, enum page_type type)
+ struct page *page, nid_t ino,
+ enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
enum temp_type temp;
@@ -363,7 +385,7 @@ static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
io = sbi->write_io[btype] + temp;
down_read(&io->io_rwsem);
- ret = __has_merged_page(io, inode, ino, idx);
+ ret = __has_merged_page(io, inode, page, ino);
up_read(&io->io_rwsem);
/* TODO: use HOT temp only for meta pages now. */
@@ -394,12 +416,12 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
}
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type, bool force)
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
- if (!force && !has_merged_page(sbi, inode, ino, idx, type))
+ if (!force && !has_merged_page(sbi, inode, page, ino, type))
return;
for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
@@ -418,10 +440,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
}
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type)
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type)
{
- __submit_merged_write_cond(sbi, inode, ino, idx, type, false);
+ __submit_merged_write_cond(sbi, inode, page, ino, type, false);
}
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
@@ -456,12 +478,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio_put(bio);
return -EFAULT;
}
+
+ if (fio->io_wbc && !is_read_io(fio->op))
+ wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
+
bio_set_op_attrs(bio, fio->op, fio->op_flags);
- __submit_bio(fio->sbi, bio, fio->type);
+ inc_page_count(fio->sbi, is_read_io(fio->op) ?
+ __read_io_type(page): WB_DATA_TYPE(fio->page));
- if (!is_read_io(fio->op))
- inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
+ __submit_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -533,6 +559,9 @@ skip:
if (fio->in_list)
goto next;
out:
+ if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
+ f2fs_is_checkpoint_ready(sbi))
+ __submit_merged_bio(io);
up_write(&io->io_rwsem);
}
@@ -565,9 +594,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
ctx->bio = bio;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_block_writeback(sbi, blkaddr);
}
return bio;
@@ -582,10 +608,15 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
if (IS_ERR(bio))
return PTR_ERR(bio);
+ /* wait for GCed page writeback via META_MAPPING */
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
+ ClearPageError(page);
+ inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -876,7 +907,6 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
struct f2fs_summary sum;
struct node_info ni;
block_t old_blkaddr;
- pgoff_t fofs;
blkcnt_t count = 1;
int err;
@@ -889,7 +919,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
dn->data_blkaddr = datablock_addr(dn->inode,
dn->node_page, dn->ofs_in_node);
- if (dn->data_blkaddr == NEW_ADDR)
+ if (dn->data_blkaddr != NULL_ADDR)
goto alloc;
if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
@@ -905,12 +935,10 @@ alloc:
old_blkaddr, old_blkaddr);
f2fs_set_data_blkaddr(dn);
- /* update i_size */
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
- dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
- f2fs_i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_SHIFT));
+ /*
+ * i_size will be updated by direct_IO. Otherwise, we'll get stale
+ * data from unwritten block via dio_read.
+ */
return 0;
}
@@ -945,7 +973,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
if (direct_io) {
map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
- flag = f2fs_force_buffered_io(inode, WRITE) ?
+ flag = f2fs_force_buffered_io(inode, iocb, from) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO;
goto map_blocks;
@@ -970,7 +998,7 @@ map_blocks:
return err;
}
-static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (lock)
@@ -1025,6 +1053,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
map->m_flags = F2FS_MAP_MAPPED;
if (map->m_next_extent)
*map->m_next_extent = pgofs + map->m_len;
+
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
goto out;
}
@@ -1064,7 +1097,15 @@ next_block:
goto sync_out;
}
- if (!is_valid_data_blkaddr(sbi, blkaddr)) {
+ if (is_valid_data_blkaddr(sbi, blkaddr)) {
+ /* use out-place-update for driect IO under LFS mode */
+ if (test_opt(sbi, LFS) && create &&
+ flag == F2FS_GET_BLOCK_DIO) {
+ err = __allocate_data_block(&dn, map->m_seg_type);
+ if (!err)
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ }
+ } else {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -1076,6 +1117,8 @@ next_block:
last_ofs_in_node = dn.ofs_in_node;
}
} else {
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
+ flag != F2FS_GET_BLOCK_DIO);
err = __allocate_data_block(&dn,
map->m_seg_type);
if (!err)
@@ -1173,6 +1216,12 @@ skip:
goto next_dnode;
sync_out:
+
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
+
if (flag == F2FS_GET_BLOCK_PRECACHE) {
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
@@ -1255,7 +1304,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DEFAULT, NULL,
+ F2FS_GET_BLOCK_DIO, NULL,
f2fs_rw_hint_to_seg_type(
inode->i_write_hint));
}
@@ -1558,9 +1607,17 @@ submit_and_realloc:
}
}
+ /*
+ * If the page is under writeback, we need to wait for
+ * its completion to see the correct decrypted data.
+ */
+ f2fs_wait_on_block_writeback(inode, block_nr);
+
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
+ inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+ ClearPageError(page);
last_block_in_bio = block_nr;
goto next_page;
set_error_page:
@@ -1625,7 +1682,7 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
return 0;
/* wait for GCed page writeback via META_MAPPING */
- f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
+ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
@@ -1682,6 +1739,10 @@ static inline bool check_inplace_update_policy(struct inode *inode,
is_inode_flag_set(inode, FI_NEED_IPU))
return true;
+ if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
+ return true;
+
return false;
}
@@ -1705,6 +1766,8 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (S_ISDIR(inode->i_mode))
return true;
+ if (IS_NOQUOTA(inode))
+ return true;
if (f2fs_is_atomic_file(inode))
return true;
if (fio) {
@@ -1712,6 +1775,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
return true;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
+ return true;
}
return false;
}
@@ -1763,6 +1829,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
+ clear_cold_data(page);
goto out_writepage;
}
got_it:
@@ -1938,18 +2005,20 @@ done:
out:
inode_dec_dirty_pages(inode);
- if (err)
+ if (err) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
clear_inode_flag(inode, FI_HOT_DATA);
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
unlock_page(page);
- if (!S_ISDIR(inode->i_mode))
+ if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
@@ -2000,10 +2069,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- pgoff_t last_idx = ULONG_MAX;
int cycled;
int range_whole = 0;
- int tag;
+ xa_mark_t tag;
+ int nwritten = 0;
pagevec_init(&pvec);
@@ -2106,7 +2175,7 @@ continue_unlock:
done = 1;
break;
} else if (submitted) {
- last_idx = page->index;
+ nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@@ -2128,9 +2197,9 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
- if (last_idx != ULONG_MAX)
+ if (nwritten)
f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
- 0, last_idx, DATA);
+ NULL, 0, DATA);
return ret;
}
@@ -2140,6 +2209,8 @@ static inline bool __should_serialize_io(struct inode *inode,
{
if (!S_ISREG(inode->i_mode))
return false;
+ if (IS_NOQUOTA(inode))
+ return false;
if (wbc->sync_mode != WB_SYNC_ALL)
return true;
if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
@@ -2169,7 +2240,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
- if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+ if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
+ wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
f2fs_available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
@@ -2234,7 +2306,7 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
- f2fs_truncate_blocks(inode, i_size, true);
+ f2fs_truncate_blocks(inode, i_size, true, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2332,6 +2404,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags);
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ goto fail;
+
if ((f2fs_is_atomic_file(inode) &&
!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
@@ -2369,7 +2445,8 @@ repeat:
if (err)
goto fail;
- if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+ if (need_balance && !IS_NOQUOTA(inode) &&
+ has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
@@ -2382,10 +2459,6 @@ repeat:
f2fs_wait_on_page_writeback(page, DATA, false);
- /* wait for GCed page writeback via META_MAPPING */
- if (f2fs_post_read_required(inode))
- f2fs_wait_on_block_writeback(sbi, blkaddr);
-
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
@@ -2480,36 +2553,53 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
int rw = iov_iter_rw(iter);
int err;
enum rw_hint hint = iocb->ki_hint;
int whint_mode = F2FS_OPTION(sbi).whint_mode;
+ bool do_opu;
err = check_direct_IO(inode, iter, offset);
if (err)
return err < 0 ? err : 0;
- if (f2fs_force_buffered_io(inode, rw))
+ if (f2fs_force_buffered_io(inode, iocb, iter))
return 0;
+ do_opu = allow_outplace_dio(inode, iocb, iter);
+
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
iocb->ki_hint = WRITE_LIFE_NOT_SET;
- if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) {
- if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
iocb->ki_hint = hint;
err = -EAGAIN;
goto out;
}
- down_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
+ if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ up_read(&fi->i_gc_rwsem[rw]);
+ iocb->ki_hint = hint;
+ err = -EAGAIN;
+ goto out;
+ }
+ } else {
+ down_read(&fi->i_gc_rwsem[rw]);
+ if (do_opu)
+ down_read(&fi->i_gc_rwsem[READ]);
}
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
- up_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
+
+ if (do_opu)
+ up_read(&fi->i_gc_rwsem[READ]);
+
+ up_read(&fi->i_gc_rwsem[rw]);
if (rw == WRITE) {
if (whint_mode == WHINT_MODE_OFF)
@@ -2517,7 +2607,8 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (err > 0) {
f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
err);
- set_inode_flag(inode, FI_UPDATE_WRITE);
+ if (!do_opu)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
} else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
}
@@ -2550,6 +2641,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
}
}
+ clear_cold_data(page);
+
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return f2fs_drop_inmem_page(inode, page);
@@ -2568,6 +2661,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ clear_cold_data(page);
set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
@@ -2583,10 +2677,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
- /* don't remain PG_checked flag which was set during GC */
- if (is_cold_data(page))
- clear_cold_data(page);
-
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
f2fs_register_inmem_page(inode, page);
@@ -2697,13 +2787,13 @@ const struct address_space_operations f2fs_dblock_aops = {
#endif
};
-void f2fs_clear_radix_tree_dirty_tag(struct page *page)
+void f2fs_clear_page_cache_dirty_tag(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags);
- radix_tree_tag_clear(&mapping->i_pages, page_index(page),
+ __xa_clear_mark(&mapping->i_pages, page_index(page),
PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(&mapping->i_pages, flags);
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 214a968962a1..139b4d5c83d5 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs debugging statistics
*
@@ -5,10 +6,6 @@
* http://www.samsung.com/
* Copyright (c) 2012 Linux Foundation
* Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -58,6 +55,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
+ si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
+ si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE);
+ si->nr_rd_meta = get_pages(sbi, F2FS_RD_META);
if (SM_I(sbi) && SM_I(sbi)->fcc_info) {
si->nr_flushed =
atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
@@ -104,6 +104,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->avail_nids = NM_I(sbi)->available_nids;
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->bg_gc = sbi->bg_gc;
+ si->io_skip_bggc = sbi->io_skip_bggc;
+ si->other_skip_bggc = sbi->other_skip_bggc;
si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
@@ -121,6 +123,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
}
+ for (i = META_CP; i < META_MAX; i++)
+ si->meta_count[i] = atomic_read(&sbi->meta_count[i]);
+
for (i = 0; i < 2; i++) {
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
@@ -190,8 +195,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
- if (f2fs_discard_en(sbi))
- si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@@ -271,7 +275,8 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
si->sbi->sb->s_bdev, i++,
f2fs_readonly(si->sbi->sb) ? "RO": "RW",
- f2fs_cp_error(si->sbi) ? "Error": "Good");
+ is_set_ckpt_flags(si->sbi, CP_DISABLED_FLAG) ?
+ "Disabled": (f2fs_cp_error(si->sbi) ? "Error": "Good"));
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
@@ -333,6 +338,13 @@ static int stat_show(struct seq_file *s, void *v)
si->prefree_count, si->free_segs, si->free_secs);
seq_printf(s, "CP calls: %d (BG: %d)\n",
si->cp_count, si->bg_cp_count);
+ seq_printf(s, " - cp blocks : %u\n", si->meta_count[META_CP]);
+ seq_printf(s, " - sit blocks : %u\n",
+ si->meta_count[META_SIT]);
+ seq_printf(s, " - nat blocks : %u\n",
+ si->meta_count[META_NAT]);
+ seq_printf(s, " - ssa blocks : %u\n",
+ si->meta_count[META_SSA]);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
@@ -349,6 +361,8 @@ static int stat_show(struct seq_file *s, void *v)
si->skipped_atomic_files[BG_GC] +
si->skipped_atomic_files[FG_GC],
si->skipped_atomic_files[BG_GC]);
+ seq_printf(s, "BG skip : IO: %u, Other: %u\n",
+ si->io_skip_bggc, si->other_skip_bggc);
seq_puts(s, "\nExtent Cache:\n");
seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
si->hit_largest, si->hit_cached,
@@ -360,7 +374,9 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
+ seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
+ si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
+ seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
"Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
si->nr_wb_cp_data, si->nr_wb_data,
si->nr_flushing, si->nr_flushed,
@@ -445,6 +461,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
+ int i;
si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
if (!si)
@@ -470,6 +487,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0);
atomic_set(&sbi->inplace_count, 0);
+ for (i = META_CP; i < META_MAX; i++)
+ atomic_set(&sbi->meta_count[i], 0);
atomic_set(&sbi->aw_cnt, 0);
atomic_set(&sbi->vw_cnt, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ecc3a4e2be96..bacc667950b6 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/dir.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -658,9 +655,9 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
f2fs_put_page(page, 1);
clear_inode_flag(inode, FI_NEW_INODE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
fail:
up_write(&F2FS_I(inode)->i_sem);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
@@ -729,10 +726,11 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
- f2fs_clear_radix_tree_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page);
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
ClearPageUptodate(page);
+ clear_cold_data(page);
inode_dec_dirty_pages(dir);
f2fs_remove_dirty_inode(dir);
}
@@ -784,9 +782,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
struct f2fs_dir_entry *de = NULL;
struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
+ struct blk_plug plug;
+ bool readdir_ra = sbi->readdir_ra == 1;
+ int err = 0;
bit_pos = ((unsigned long)ctx->pos % d->max);
+ if (readdir_ra)
+ blk_start_plug(&plug);
+
while (bit_pos < d->max) {
bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
if (bit_pos >= d->max)
@@ -806,29 +810,33 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int err;
err = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0,
&de_name, fstr);
if (err)
- return err;
+ goto out;
de_name = *fstr;
fstr->len = save_len;
}
if (!dir_emit(ctx, de_name.name, de_name.len,
- le32_to_cpu(de->ino), d_type))
- return 1;
+ le32_to_cpu(de->ino), d_type)) {
+ err = 1;
+ goto out;
+ }
- if (sbi->readdir_ra == 1)
+ if (readdir_ra)
f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return 0;
+out:
+ if (readdir_ra)
+ blk_finish_plug(&plug);
+ return err;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 231b77ef5a53..1cb0fcc67d2d 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs extent cache support
*
@@ -5,10 +6,6 @@
* Copyright (c) 2015 Samsung Electronics
* Authors: Jaegeuk Kim <jaegeuk@kernel.org>
* Chao Yu <chao2.yu@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -30,10 +27,10 @@ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
return NULL;
}
-static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
+static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
unsigned int ofs)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct rb_entry *re;
while (node) {
@@ -49,7 +46,7 @@ static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
return NULL;
}
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs)
{
struct rb_entry *re;
@@ -62,22 +59,25 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
}
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root *root, struct rb_node **parent,
- unsigned int ofs)
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+ unsigned int ofs, bool *leftmost)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_entry *re;
while (*p) {
*parent = *p;
re = rb_entry(*parent, struct rb_entry, rb_node);
- if (ofs < re->ofs)
+ if (ofs < re->ofs) {
p = &(*p)->rb_left;
- else if (ofs >= re->ofs + re->len)
+ } else if (ofs >= re->ofs + re->len) {
p = &(*p)->rb_right;
- else
+ *leftmost = false;
+ } else {
f2fs_bug_on(sbi, 1);
+ }
}
return p;
@@ -92,16 +92,16 @@ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
* in order to simpfy the insertion after.
* tree must stay unchanged between lookup and insertion.
*/
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
struct rb_entry *cached_re,
unsigned int ofs,
struct rb_entry **prev_entry,
struct rb_entry **next_entry,
struct rb_node ***insert_p,
struct rb_node **insert_parent,
- bool force)
+ bool force, bool *leftmost)
{
- struct rb_node **pnode = &root->rb_node;
+ struct rb_node **pnode = &root->rb_root.rb_node;
struct rb_node *parent = NULL, *tmp_node;
struct rb_entry *re = cached_re;
@@ -110,7 +110,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
*prev_entry = NULL;
*next_entry = NULL;
- if (RB_EMPTY_ROOT(root))
+ if (RB_EMPTY_ROOT(&root->rb_root))
return NULL;
if (re) {
@@ -118,16 +118,22 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
goto lookup_neighbors;
}
+ if (leftmost)
+ *leftmost = true;
+
while (*pnode) {
parent = *pnode;
re = rb_entry(*pnode, struct rb_entry, rb_node);
- if (ofs < re->ofs)
+ if (ofs < re->ofs) {
pnode = &(*pnode)->rb_left;
- else if (ofs >= re->ofs + re->len)
+ } else if (ofs >= re->ofs + re->len) {
pnode = &(*pnode)->rb_right;
- else
+ if (leftmost)
+ *leftmost = false;
+ } else {
goto lookup_neighbors;
+ }
}
*insert_p = pnode;
@@ -160,10 +166,10 @@ lookup_neighbors:
}
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
- struct rb_node *cur = rb_first(root), *next;
+ struct rb_node *cur = rb_first_cached(root), *next;
struct rb_entry *cur_re, *next_re;
if (!cur)
@@ -196,7 +202,8 @@ static struct kmem_cache *extent_node_slab;
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
- struct rb_node *parent, struct rb_node **p)
+ struct rb_node *parent, struct rb_node **p,
+ bool leftmost)
{
struct extent_node *en;
@@ -209,7 +216,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
en->et = et;
rb_link_node(&en->rb_node, parent, p);
- rb_insert_color(&en->rb_node, &et->root);
+ rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
@@ -218,7 +225,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
- rb_erase(&en->rb_node, &et->root);
+ rb_erase_cached(&en->rb_node, &et->root);
atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
@@ -257,7 +264,7 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
- et->root = RB_ROOT;
+ et->root = RB_ROOT_CACHED;
et->cached_en = NULL;
rwlock_init(&et->lock);
INIT_LIST_HEAD(&et->list);
@@ -278,10 +285,10 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei)
{
- struct rb_node **p = &et->root.rb_node;
+ struct rb_node **p = &et->root.rb_root.rb_node;
struct extent_node *en;
- en = __attach_extent_node(sbi, et, ei, NULL, p);
+ en = __attach_extent_node(sbi, et, ei, NULL, p, true);
if (!en)
return NULL;
@@ -297,7 +304,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
struct extent_node *en;
unsigned int count = atomic_read(&et->node_cnt);
- node = rb_first(&et->root);
+ node = rb_first_cached(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
@@ -308,14 +315,13 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
return count - atomic_read(&et->node_cnt);
}
-static void __drop_largest_extent(struct inode *inode,
+static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
-
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
- largest->len = 0;
- f2fs_mark_inode_dirty_sync(inode, true);
+ if (fofs < et->largest.fofs + et->largest.len &&
+ fofs + len > et->largest.fofs) {
+ et->largest.len = 0;
+ et->largest_updated = true;
}
}
@@ -416,12 +422,11 @@ out:
return ret;
}
-static struct extent_node *__try_merge_extent_node(struct inode *inode,
+static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
@@ -443,7 +448,7 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
if (!en)
return NULL;
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
spin_lock(&sbi->extent_lock);
if (!list_empty(&en->list)) {
@@ -454,12 +459,12 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
return en;
}
-static struct extent_node *__insert_extent_tree(struct inode *inode,
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
- struct rb_node *insert_parent)
+ struct rb_node *insert_parent,
+ bool leftmost)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -470,13 +475,16 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
goto do_insert;
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
+ leftmost = true;
+
+ p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
+ ei->fofs, &leftmost);
do_insert:
- en = __attach_extent_node(sbi, et, ei, parent, p);
+ en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
if (!en)
return NULL;
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
/* update in global extent list */
spin_lock(&sbi->extent_lock);
@@ -497,6 +505,8 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
struct rb_node **insert_p = NULL, *insert_parent = NULL;
unsigned int end = fofs + len;
unsigned int pos = (unsigned int)fofs;
+ bool updated = false;
+ bool leftmost;
if (!et)
return;
@@ -517,14 +527,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
* drop largest extent before lookup, in case it's already
* been shrunk from extent tree
*/
- __drop_largest_extent(inode, fofs, len);
+ __drop_largest_extent(et, fofs, len);
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
(struct rb_entry *)et->cached_en, fofs,
(struct rb_entry **)&prev_en,
(struct rb_entry **)&next_en,
- &insert_p, &insert_parent, false);
+ &insert_p, &insert_parent, false,
+ &leftmost);
if (!en)
en = next_en;
@@ -550,8 +561,8 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(inode, et, &ei,
- NULL, NULL);
+ en1 = __insert_extent_tree(sbi, et, &ei,
+ NULL, NULL, true);
next_en = en1;
} else {
en->ei.fofs = end;
@@ -570,7 +581,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
}
if (parts)
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
else
__release_extent_node(sbi, et, en);
@@ -590,15 +601,16 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
if (blkaddr) {
set_extent_info(&ei, fofs, blkaddr, len);
- if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
- __insert_extent_tree(inode, et, &ei,
- insert_p, insert_parent);
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ __insert_extent_tree(sbi, et, &ei,
+ insert_p, insert_parent, leftmost);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- __drop_largest_extent(inode, 0, UINT_MAX);
+ et->largest.len = 0;
+ et->largest_updated = true;
set_inode_flag(inode, FI_NO_EXTENT);
}
}
@@ -606,7 +618,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
if (is_inode_flag_set(inode, FI_NO_EXTENT))
__free_extent_tree(sbi, et);
+ if (et->largest_updated) {
+ et->largest_updated = false;
+ updated = true;
+ }
+
write_unlock(&et->lock);
+
+ if (updated)
+ f2fs_mark_inode_dirty_sync(inode, true);
}
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -705,6 +725,7 @@ void f2fs_drop_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ bool updated = false;
if (!f2fs_may_extent_tree(inode))
return;
@@ -713,8 +734,13 @@ void f2fs_drop_extent_tree(struct inode *inode)
write_lock(&et->lock);
__free_extent_tree(sbi, et);
- __drop_largest_extent(inode, 0, UINT_MAX);
+ if (et->largest.len) {
+ et->largest.len = 0;
+ updated = true;
+ }
write_unlock(&et->lock);
+ if (updated)
+ f2fs_mark_inode_dirty_sync(inode, true);
}
void f2fs_destroy_extent_tree(struct inode *inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index abf925664d9c..1e031971a466 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/f2fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_H
#define _LINUX_F2FS_H
+#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
#include <linux/buffer_head.h>
@@ -53,9 +51,10 @@ enum {
FAULT_DIR_DEPTH,
FAULT_EVICT_INODE,
FAULT_TRUNCATE,
- FAULT_IO,
+ FAULT_READ_IO,
FAULT_CHECKPOINT,
FAULT_DISCARD,
+ FAULT_WRITE_IO,
FAULT_MAX,
};
@@ -100,6 +99,7 @@ extern char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_QUOTA 0x00400000
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
+#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -150,6 +150,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_INODE_CRTIME 0x0100
#define F2FS_FEATURE_LOST_FOUND 0x0200
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
+#define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -178,6 +179,7 @@ enum {
#define CP_RECOVERY 0x00000008
#define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020
+#define CP_PAUSE 0x00000040
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
@@ -187,6 +189,7 @@ enum {
#define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
+#define DEF_DISABLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
@@ -203,6 +206,7 @@ enum {
META_NAT,
META_SIT,
META_SSA,
+ META_MAX,
META_POR,
DATA_GENERIC,
META_GENERIC,
@@ -324,7 +328,7 @@ struct discard_cmd_control {
atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
- struct rb_root root; /* root of discard rb-tree */
+ struct rb_root_cached root; /* root of discard rb-tree */
bool rbtree_check; /* config for consistence check */
};
@@ -527,6 +531,9 @@ enum {
#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+/* maximum retry quota flush count */
+#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
@@ -566,12 +573,13 @@ struct extent_node {
struct extent_tree {
nid_t ino; /* inode number */
- struct rb_root root; /* root of extent info rb-tree */
+ struct rb_root_cached root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
atomic_t node_cnt; /* # of extent node in rb-tree*/
+ bool largest_updated; /* largest extent updated */
};
/*
@@ -600,6 +608,7 @@ enum {
F2FS_GET_BLOCK_DEFAULT,
F2FS_GET_BLOCK_FIEMAP,
F2FS_GET_BLOCK_BMAP,
+ F2FS_GET_BLOCK_DIO,
F2FS_GET_BLOCK_PRE_DIO,
F2FS_GET_BLOCK_PRE_AIO,
F2FS_GET_BLOCK_PRECACHE,
@@ -754,12 +763,12 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
}
extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
-static inline void __try_update_largest_extent(struct inode *inode,
- struct extent_tree *et, struct extent_node *en)
+static inline void __try_update_largest_extent(struct extent_tree *et,
+ struct extent_node *en)
{
if (en->ei.len > et->largest.len) {
et->largest = en->ei;
- f2fs_mark_inode_dirty_sync(inode, true);
+ et->largest_updated = true;
}
}
@@ -944,6 +953,9 @@ enum count_type {
F2FS_DIRTY_IMETA,
F2FS_WB_CP_DATA,
F2FS_WB_DATA,
+ F2FS_RD_DATA,
+ F2FS_RD_NODE,
+ F2FS_RD_META,
NR_COUNT_TYPE,
};
@@ -1088,11 +1100,19 @@ enum {
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
SBI_IS_SHUTDOWN, /* shutdown by ioctl */
+ SBI_IS_RECOVERED, /* recovered orphan/data */
+ SBI_CP_DISABLED, /* CP was disabled last mount */
+ SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
+ SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
+ SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
};
enum {
CP_TIME,
REQ_TIME,
+ DISCARD_TIME,
+ GC_TIME,
+ DISABLE_TIME,
MAX_TIME,
};
@@ -1209,7 +1229,6 @@ struct f2fs_sb_info {
unsigned int total_valid_node_count; /* valid node block count */
loff_t max_file_blocks; /* max block index of file */
int dir_level; /* directory level */
- unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
int readdir_ra; /* readahead inode in readdir */
block_t user_block_count; /* # of user blocks */
@@ -1219,6 +1238,9 @@ struct f2fs_sb_info {
block_t reserved_blocks; /* configurable reserved blocks */
block_t current_reserved_blocks; /* current reserved blocks */
+ /* Additional tracking for no checkpoint mode */
+ block_t unusable_block_count; /* # of blocks saved by last cp */
+
unsigned int nquota_files; /* # of quota sysfile */
u32 s_next_generation; /* for NFS support */
@@ -1257,6 +1279,7 @@ struct f2fs_sb_info {
*/
#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_stat_info *stat_info; /* FS status information */
+ atomic_t meta_count[META_MAX]; /* # of meta blocks */
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
atomic_t inplace_count; /* # of inplace update */
@@ -1272,6 +1295,8 @@ struct f2fs_sb_info {
atomic_t max_aw_cnt; /* max # of atomic writes */
atomic_t max_vw_cnt; /* max # of volatile writes */
int bg_gc; /* background gc calls */
+ unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
+ unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
spinlock_t stat_lock; /* lock for stat operations */
@@ -1306,9 +1331,9 @@ struct f2fs_sb_info {
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define f2fs_show_injection_info(type) \
- printk("%sF2FS-fs : inject %s in %s of %pF\n", \
- KERN_INFO, f2fs_fault_name[type], \
+#define f2fs_show_injection_info(type) \
+ printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \
+ KERN_INFO, f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1344,7 +1369,15 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
{
- sbi->last_time[type] = jiffies;
+ unsigned long now = jiffies;
+
+ sbi->last_time[type] = now;
+
+ /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
+ if (type == REQ_TIME) {
+ sbi->last_time[DISCARD_TIME] = now;
+ sbi->last_time[GC_TIME] = now;
+ }
}
static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
@@ -1354,16 +1387,18 @@ static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
return time_after(jiffies, sbi->last_time[type] + interval);
}
-static inline bool is_idle(struct f2fs_sb_info *sbi)
+static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
+ int type)
{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- struct request_list *rl = &q->root_rl;
+ unsigned long interval = sbi->interval_time[type] * HZ;
+ unsigned int wait_ms = 0;
+ long delta;
- if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
- return false;
+ delta = (sbi->last_time[type] + interval) - jiffies;
+ if (delta > 0)
+ wait_ms = jiffies_to_msecs(delta);
- return f2fs_time_over(sbi, REQ_TIME);
+ return wait_ms;
}
/*
@@ -1704,7 +1739,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, true))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
-
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ avail_user_block_count -= sbi->unusable_block_count;
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
@@ -1755,7 +1791,9 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
atomic_inc(&sbi->nr_pages[count_type]);
if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
- count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA ||
+ count_type == F2FS_RD_DATA || count_type == F2FS_RD_NODE ||
+ count_type == F2FS_RD_META)
return;
set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -1891,12 +1929,18 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
{
block_t valid_block_count;
unsigned int valid_node_count;
- bool quota = inode && !is_inode;
+ int err;
- if (quota) {
- int ret = dquot_reserve_block(inode, 1);
- if (ret)
- return ret;
+ if (is_inode) {
+ if (inode) {
+ err = dquot_alloc_inode(inode);
+ if (err)
+ return err;
+ }
+ } else {
+ err = dquot_reserve_block(inode, 1);
+ if (err)
+ return err;
}
if (time_to_inject(sbi, FAULT_BLOCK)) {
@@ -1911,6 +1955,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, false))
valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ valid_block_count += sbi->unusable_block_count;
if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
@@ -1938,8 +1984,12 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
return 0;
enospc:
- if (quota)
+ if (is_inode) {
+ if (inode)
+ dquot_free_inode(inode);
+ } else {
dquot_release_reservation_block(inode, 1);
+ }
return -ENOSPC;
}
@@ -1960,7 +2010,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
spin_unlock(&sbi->stat_lock);
- if (!is_inode)
+ if (is_inode)
+ dquot_free_inode(inode);
+ else
f2fs_i_blocks_write(inode, 1, false, true);
}
@@ -2090,6 +2142,15 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
return bio_alloc(GFP_KERNEL, npages);
}
+static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
+{
+ if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
+ get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
+ get_pages(sbi, F2FS_WB_CP_DATA))
+ return false;
+ return f2fs_time_over(sbi, type);
+}
+
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
@@ -2739,7 +2800,8 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+ bool buf_write);
int f2fs_truncate(struct inode *inode);
int f2fs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
@@ -2749,6 +2811,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_precache_extents(struct inode *inode);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
int f2fs_pin_file_control(struct inode *inode, bool inc);
/*
@@ -2827,6 +2890,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
+int f2fs_quota_sync(struct super_block *sb, int type);
void f2fs_quota_off_umount(struct super_block *sb);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
@@ -2869,7 +2933,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-void f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
@@ -2886,7 +2950,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
-void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_node_manager_caches(void);
@@ -2914,6 +2978,8 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
+void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi);
void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
@@ -2942,7 +3008,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_io_info *fio, bool add_list);
void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered);
-void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
+void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
+ block_t len);
void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
@@ -3002,8 +3070,8 @@ int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type);
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
void f2fs_submit_page_write(struct f2fs_io_info *fio);
@@ -3025,6 +3093,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
struct page *f2fs_get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
+void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3039,7 +3108,7 @@ int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);
#endif
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_radix_tree_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct page *page);
/*
* gc.c
@@ -3077,6 +3146,8 @@ struct f2fs_stat_info {
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_rd_data, nr_rd_node, nr_rd_meta;
+ unsigned int io_skip_bggc, other_skip_bggc;
int nr_flushing, nr_flushed, flush_list_empty;
int nr_discarding, nr_discarded;
int nr_discard_cmd;
@@ -3098,6 +3169,7 @@ struct f2fs_stat_info {
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
+ unsigned int meta_count[META_MAX];
unsigned int segment_count[2];
unsigned int block_count[2];
unsigned int inplace_count;
@@ -3113,6 +3185,8 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
+#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
+#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
@@ -3149,6 +3223,17 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (f2fs_has_inline_dentry(inode)) \
(atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
} while (0)
+#define stat_inc_meta_count(sbi, blkaddr) \
+ do { \
+ if (blkaddr < SIT_I(sbi)->sit_base_addr) \
+ atomic_inc(&(sbi)->meta_count[META_CP]); \
+ else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_SIT]); \
+ else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_NAT]); \
+ else if (blkaddr < SM_I(sbi)->main_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_SSA]); \
+ } while (0)
#define stat_inc_seg_type(sbi, curseg) \
((sbi)->segment_count[(curseg)->alloc_type]++)
#define stat_inc_block_count(sbi, curseg) \
@@ -3218,6 +3303,8 @@ void f2fs_destroy_root_stats(void);
#define stat_inc_bg_cp_count(si) do { } while (0)
#define stat_inc_call_count(si) do { } while (0)
#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_io_skip_bggc_count(sbi) do { } while (0)
+#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
#define stat_dec_dirty_inode(sbi, type) do { } while (0)
#define stat_inc_total_hit(sb) do { } while (0)
@@ -3236,6 +3323,7 @@ void f2fs_destroy_root_stats(void);
#define stat_inc_volatile_write(inode) do { } while (0)
#define stat_dec_volatile_write(inode) do { } while (0)
#define stat_update_max_volatile_write(inode) do { } while (0)
+#define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
#define stat_inc_inplace_blocks(sbi) do { } while (0)
@@ -3305,18 +3393,19 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs);
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root *root, struct rb_node **parent,
- unsigned int ofs);
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+ unsigned int ofs, bool *leftmost);
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs,
struct rb_entry **prev_entry, struct rb_entry **next_entry,
struct rb_node ***insert_p, struct rb_node **insert_parent,
- bool force);
+ bool force, bool *leftmost);
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root *root);
+ struct rb_root_cached *root);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
void f2fs_drop_extent_tree(struct inode *inode);
@@ -3356,7 +3445,7 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
file_set_encrypt(inode);
- inode->i_flags |= S_ENCRYPTED;
+ f2fs_set_inode_flags(inode);
#endif
}
@@ -3384,6 +3473,7 @@ F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
+F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
@@ -3399,11 +3489,20 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
}
#endif
-static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
+static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
- struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+ return f2fs_sb_has_blkzoned(sbi->sb);
+}
- return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
+static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
+{
+ return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
+}
+
+static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
+{
+ return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
+ f2fs_hw_should_discard(sbi);
}
static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -3432,11 +3531,50 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
#endif
}
-static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
+static inline int block_unaligned_IO(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
{
- return (f2fs_post_read_required(inode) ||
- (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
- F2FS_I_SB(inode)->s_ndevs);
+ unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned int blocksize_mask = (1 << i_blkbits) - 1;
+ loff_t offset = iocb->ki_pos;
+ unsigned long align = offset | iov_iter_alignment(iter);
+
+ return align & blocksize_mask;
+}
+
+static inline int allow_outplace_dio(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int rw = iov_iter_rw(iter);
+
+ return (test_opt(sbi, LFS) && (rw == WRITE) &&
+ !block_unaligned_IO(inode, iocb, iter));
+}
+
+static inline bool f2fs_force_buffered_io(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int rw = iov_iter_rw(iter);
+
+ if (f2fs_post_read_required(inode))
+ return true;
+ if (sbi->s_ndevs)
+ return true;
+ /*
+ * for blkzoned device, fallback direct IO to buffered IO, so
+ * all IOs can be serialized by log-structured write.
+ */
+ if (f2fs_sb_has_blkzoned(sbi->sb))
+ return true;
+ if (test_opt(sbi, LFS) && (rw == WRITE) &&
+ block_unaligned_IO(inode, iocb, iter))
+ return true;
+ if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
+ return true;
+
+ return false;
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -3447,3 +3585,16 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
#endif
#endif
+
+static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_QUOTA
+ if (f2fs_sb_has_quota_ino(sbi->sb))
+ return true;
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+ return true;
+#endif
+ return false;
+}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5474aaa274b9..88b124677189 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/file.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -50,7 +47,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn;
+ struct dnode_of_data dn = { .node_changed = false };
int err;
if (unlikely(f2fs_cp_error(sbi))) {
@@ -62,19 +59,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
- /* block allocation */
- f2fs_lock_op(sbi);
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_reserve_block(&dn, page->index);
- if (err) {
- f2fs_unlock_op(sbi);
- goto out;
- }
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
-
- f2fs_balance_fs(sbi, dn.node_changed);
-
file_update_time(vmf->vma->vm_file);
down_read(&F2FS_I(inode)->i_mmap_sem);
lock_page(page);
@@ -86,11 +70,28 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto out_sem;
}
+ /* block allocation */
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_block(&dn, page->index);
+ f2fs_put_dnode(&dn);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ if (err) {
+ unlock_page(page);
+ goto out_sem;
+ }
+
+ /* fill the page */
+ f2fs_wait_on_page_writeback(page, DATA, false);
+
+ /* wait for GCed page writeback via META_MAPPING */
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
+
/*
* check to see if the page is mapped already (no holes)
*/
if (PageMappedToDisk(page))
- goto mapped;
+ goto out_sem;
/* page is wholly or partially inside EOF */
if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
@@ -105,21 +106,15 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
SetPageUptodate(page);
f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
+ f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_vm_page_mkwrite(page, DATA);
-mapped:
- /* fill the page */
- f2fs_wait_on_page_writeback(page, DATA, false);
-
- /* wait for GCed page writeback via META_MAPPING */
- if (f2fs_post_read_required(inode))
- f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
-
out_sem:
up_read(&F2FS_I(inode)->i_mmap_sem);
-out:
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
sb_end_pagefault(inode->i_sb);
- f2fs_update_time(sbi, REQ_TIME);
err:
return block_page_mkwrite_return(err);
}
@@ -215,7 +210,8 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
};
unsigned int seq_id = 0;
- if (unlikely(f2fs_readonly(inode->i_sb)))
+ if (unlikely(f2fs_readonly(inode->i_sb) ||
+ is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return 0;
trace_f2fs_sync_file_enter(inode);
@@ -590,7 +586,8 @@ truncate_out:
return 0;
}
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+ bool buf_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -598,6 +595,7 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
int count = 0, err = 0;
struct page *ipage;
bool truncate_page = false;
+ int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -607,7 +605,7 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
goto free_partial;
if (lock)
- f2fs_lock_op(sbi);
+ __do_map_lock(sbi, flag, true);
ipage = f2fs_get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
@@ -645,7 +643,7 @@ free_next:
err = f2fs_truncate_inode_blocks(inode, free_from);
out:
if (lock)
- f2fs_unlock_op(sbi);
+ __do_map_lock(sbi, flag, false);
free_partial:
/* lastly zero out the first data page */
if (!err)
@@ -680,7 +678,7 @@ int f2fs_truncate(struct inode *inode)
return err;
}
- err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
if (err)
return err;
@@ -789,9 +787,24 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
!uid_eq(attr->ia_uid, inode->i_uid)) ||
(attr->ia_valid & ATTR_GID &&
!gid_eq(attr->ia_gid, inode->i_gid))) {
+ f2fs_lock_op(F2FS_I_SB(inode));
err = dquot_transfer(inode, attr);
- if (err)
+ if (err) {
+ set_sbi_flag(F2FS_I_SB(inode),
+ SBI_QUOTA_NEED_REPAIR);
+ f2fs_unlock_op(F2FS_I_SB(inode));
return err;
+ }
+ /*
+ * update uid/gid under lock_op(), so that dquot and inode can
+ * be updated atomically.
+ */
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ f2fs_unlock_op(F2FS_I_SB(inode));
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -1246,7 +1259,7 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
new_size = i_size_read(inode) - len;
truncate_pagecache(inode, new_size);
- ret = f2fs_truncate_blocks(inode, new_size, true);
+ ret = f2fs_truncate_blocks(inode, new_size, true, false);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
@@ -1431,7 +1444,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
down_write(&F2FS_I(inode)->i_mmap_sem);
- ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
return ret;
@@ -1978,7 +1991,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!f2fs_hw_support_discard(F2FS_SB(sb)))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
@@ -2162,6 +2175,12 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Skipping Checkpoint. Checkpoints currently disabled.");
+ return -EINVAL;
+ }
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -2533,6 +2552,9 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return -EINVAL;
+
if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
sizeof(range)))
return -EFAULT;
@@ -2591,13 +2613,29 @@ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
}
#ifdef CONFIG_QUOTA
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+{
+ struct dquot *transfer_to[MAXQUOTAS] = {};
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct super_block *sb = sbi->sb;
+ int err = 0;
+
+ transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+ if (!IS_ERR(transfer_to[PRJQUOTA])) {
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
+ }
+ return err;
+}
+
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- struct dquot *transfer_to[MAXQUOTAS] = {};
struct page *ipage;
kprojid_t kprojid;
int err;
@@ -2617,53 +2655,45 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
return 0;
- err = mnt_want_write_file(filp);
- if (err)
- return err;
-
err = -EPERM;
- inode_lock(inode);
-
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
- goto out_unlock;
+ return err;
ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto out_unlock;
- }
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
i_projid)) {
err = -EOVERFLOW;
f2fs_put_page(ipage, 1);
- goto out_unlock;
+ return err;
}
f2fs_put_page(ipage, 1);
err = dquot_initialize(inode);
if (err)
- goto out_unlock;
+ return err;
- transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- dqput(transfer_to[PRJQUOTA]);
- if (err)
- goto out_dirty;
- }
+ f2fs_lock_op(sbi);
+ err = f2fs_transfer_project_quota(inode, kprojid);
+ if (err)
+ goto out_unlock;
F2FS_I(inode)->i_projid = kprojid;
inode->i_ctime = current_time(inode);
-out_dirty:
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
+ f2fs_unlock_op(sbi);
return err;
}
#else
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+{
+ return 0;
+}
+
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
if (projid != F2FS_DEF_PROJID)
@@ -2736,6 +2766,30 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
return 0;
}
+static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
+ return -EINVAL;
+
+ if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -2763,19 +2817,20 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return err;
inode_lock(inode);
+ err = f2fs_ioctl_check_project(inode, &fa);
+ if (err)
+ goto out;
flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
(flags & F2FS_FL_XFLAG_VISIBLE);
err = __f2fs_ioc_setflags(inode, flags);
- inode_unlock(inode);
- mnt_drop_write_file(filp);
if (err)
- return err;
+ goto out;
err = f2fs_ioc_setproject(filp, fa.fsx_projid);
- if (err)
- return err;
-
- return 0;
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
}
int f2fs_pin_file_control(struct inode *inode, bool inc)
@@ -2992,7 +3047,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!f2fs_overwrite_io(inode, iocb->ki_pos,
iov_iter_count(from)) ||
f2fs_has_inline_data(inode) ||
- f2fs_force_buffered_io(inode, WRITE)) {
+ f2fs_force_buffered_io(inode,
+ iocb, from)) {
clear_inode_flag(inode,
FI_NO_PREALLOC);
inode_unlock(inode);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 5c8d00422237..a07241fb8537 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/gc.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/module.h>
@@ -43,13 +40,16 @@ static int gc_thread_func(void *data)
if (gc_th->gc_wake)
gc_th->gc_wake = 0;
- if (try_to_freeze())
+ if (try_to_freeze()) {
+ stat_other_skip_bggc_count(sbi);
continue;
+ }
if (kthread_should_stop())
break;
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
increase_sleep_time(gc_th, &wait_ms);
+ stat_other_skip_bggc_count(sbi);
continue;
}
@@ -58,8 +58,10 @@ static int gc_thread_func(void *data)
f2fs_stop_checkpoint(sbi, false);
}
- if (!sb_start_write_trylock(sbi->sb))
+ if (!sb_start_write_trylock(sbi->sb)) {
+ stat_other_skip_bggc_count(sbi);
continue;
+ }
/*
* [GC triggering condition]
@@ -80,12 +82,15 @@ static int gc_thread_func(void *data)
goto do_gc;
}
- if (!mutex_trylock(&sbi->gc_mutex))
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ stat_other_skip_bggc_count(sbi);
goto next;
+ }
- if (!is_idle(sbi)) {
+ if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex);
+ stat_io_skip_bggc_count(sbi);
goto next;
}
@@ -365,6 +370,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (sec_usage_check(sbi, secno))
goto next;
+ /* Don't touch checkpointed data */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ get_ckpt_valid_blocks(sbi, segno)))
+ goto next;
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
goto next;
@@ -464,7 +473,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
struct f2fs_summary *entry;
@@ -472,6 +481,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
int off;
int phase = 0;
bool fggc = (gc_type == FG_GC);
+ int submitted = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -485,10 +495,11 @@ next_step:
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
struct node_info ni;
+ int err;
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
- return;
+ return submitted;
if (check_valid_map(sbi, segno, off) == 0)
continue;
@@ -525,7 +536,9 @@ next_step:
continue;
}
- f2fs_move_node_page(node_page, gc_type);
+ err = f2fs_move_node_page(node_page, gc_type);
+ if (!err && gc_type == FG_GC)
+ submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
}
@@ -534,6 +547,7 @@ next_step:
if (fggc)
atomic_dec(&sbi->wb_sync_req[NODE]);
+ return submitted;
}
/*
@@ -669,7 +683,7 @@ put_page:
* Move data block via META_MAPPING while keeping locked data page.
* This can be used to move blocks, aka LBAs, directly on disk.
*/
-static void move_data_block(struct inode *inode, block_t bidx,
+static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
{
struct f2fs_io_info fio = {
@@ -688,25 +702,29 @@ static void move_data_block(struct inode *inode, block_t bidx,
struct node_info ni;
struct page *page, *mpage;
block_t newaddr;
- int err;
+ int err = 0;
bool lfs_mode = test_opt(fio.sbi, LFS);
/* do not read out */
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
if (!page)
- return;
+ return -ENOMEM;
- if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
+ err = -ENOENT;
goto out;
+ }
if (f2fs_is_atomic_file(inode)) {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
+ err = -EAGAIN;
goto out;
}
if (f2fs_is_pinned_file(inode)) {
f2fs_pin_file_control(inode, true);
+ err = -EAGAIN;
goto out;
}
@@ -717,6 +735,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
ClearPageUptodate(page);
+ err = -ENOENT;
goto put_out;
}
@@ -799,6 +818,7 @@ write_page:
fio.new_blkaddr = newaddr;
f2fs_submit_page_write(&fio);
if (fio.retry) {
+ err = -EAGAIN;
if (PageWriteback(fio.encrypted_page))
end_page_writeback(fio.encrypted_page);
goto put_page_out;
@@ -822,34 +842,42 @@ put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
+ return err;
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
unsigned int segno, int off)
{
struct page *page;
+ int err = 0;
page = f2fs_get_lock_data_page(inode, bidx, true);
if (IS_ERR(page))
- return;
+ return PTR_ERR(page);
- if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
+ err = -ENOENT;
goto out;
+ }
if (f2fs_is_atomic_file(inode)) {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
+ err = -EAGAIN;
goto out;
}
if (f2fs_is_pinned_file(inode)) {
if (gc_type == FG_GC)
f2fs_pin_file_control(inode, true);
+ err = -EAGAIN;
goto out;
}
if (gc_type == BG_GC) {
- if (PageWriteback(page))
+ if (PageWriteback(page)) {
+ err = -EAGAIN;
goto out;
+ }
set_page_dirty(page);
set_cold_data(page);
} else {
@@ -867,7 +895,6 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
.io_type = FS_GC_DATA_IO,
};
bool is_dirty = PageDirty(page);
- int err;
retry:
set_page_dirty(page);
@@ -892,6 +919,7 @@ retry:
}
out:
f2fs_put_page(page, 1);
+ return err;
}
/*
@@ -901,7 +929,7 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
@@ -909,6 +937,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t start_addr;
int off;
int phase = 0;
+ int submitted = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -925,7 +954,7 @@ next_step:
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
- return;
+ return submitted;
if (check_valid_map(sbi, segno, off) == 0)
continue;
@@ -997,6 +1026,7 @@ next_step:
if (inode) {
struct f2fs_inode_info *fi = F2FS_I(inode);
bool locked = false;
+ int err;
if (S_ISREG(inode->i_mode)) {
if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
@@ -1016,12 +1046,16 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_post_read_required(inode))
- move_data_block(inode, start_bidx, gc_type,
- segno, off);
+ err = move_data_block(inode, start_bidx,
+ gc_type, segno, off);
else
- move_data_page(inode, start_bidx, gc_type,
+ err = move_data_page(inode, start_bidx, gc_type,
segno, off);
+ if (!err && (gc_type == FG_GC ||
+ f2fs_post_read_required(inode)))
+ submitted++;
+
if (locked) {
up_write(&fi->i_gc_rwsem[WRITE]);
up_write(&fi->i_gc_rwsem[READ]);
@@ -1033,6 +1067,8 @@ next_step:
if (++phase < 5)
goto next_step;
+
+ return submitted;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -1060,6 +1096,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
int seg_freed = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
+ int submitted = 0;
/* readahead multi ssa blocks those have contiguous address */
if (sbi->segs_per_sec > 1)
@@ -1069,6 +1106,18 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
/* reference all summary page */
while (segno < end_segno) {
sum_page = f2fs_get_sum_page(sbi, segno++);
+ if (IS_ERR(sum_page)) {
+ int err = PTR_ERR(sum_page);
+
+ end_segno = segno - 1;
+ for (segno = start_segno; segno < end_segno; segno++) {
+ sum_page = find_get_page(META_MAPPING(sbi),
+ GET_SUM_BLOCK(sbi, segno));
+ f2fs_put_page(sum_page, 0);
+ f2fs_put_page(sum_page, 0);
+ }
+ return err;
+ }
unlock_page(sum_page);
}
@@ -1103,10 +1152,11 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
* - lock_page(sum_page)
*/
if (type == SUM_TYPE_NODE)
- gc_node_segment(sbi, sum->entries, segno, gc_type);
- else
- gc_data_segment(sbi, sum->entries, gc_list, segno,
+ submitted += gc_node_segment(sbi, sum->entries, segno,
gc_type);
+ else
+ submitted += gc_data_segment(sbi, sum->entries, gc_list,
+ segno, gc_type);
stat_inc_seg_count(sbi, type, gc_type);
@@ -1117,7 +1167,7 @@ next:
f2fs_put_page(sum_page, 0);
}
- if (gc_type == FG_GC)
+ if (submitted)
f2fs_submit_merged_write(sbi,
(type == SUM_TYPE_NODE) ? NODE : DATA);
@@ -1172,7 +1222,8 @@ gc_more:
* threshold, we can make them free by checkpoint. Then, we
* secure free segments which doesn't need fggc any more.
*/
- if (prefree_segments(sbi)) {
+ if (prefree_segments(sbi) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -1224,7 +1275,7 @@ gc_more:
segno = NULL_SEGNO;
goto gc_more;
}
- if (gc_type == FG_GC)
+ if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
ret = f2fs_write_checkpoint(sbi, &cpc);
}
stop:
@@ -1244,7 +1295,7 @@ stop:
put_gc_inode(&gc_list);
- if (sync)
+ if (sync && !ret)
ret = sec_freed ? 0 : -EAGAIN;
return ret;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index c8619e408009..bbac9d3787bd 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/gc.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define GC_THREAD_MIN_WB_PAGES 1 /*
* a threshold to determine
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index eb2e031ea887..cc82f142f811 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/hash.c
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext3/hash.c
*
* Copyright (C) 2002 by Theodore Ts'o
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 115dc219344b..7b0cff7e6051 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/inline.c
* Copyright (c) 2013, Intel Corporation
* Authors: Huajun Li <huajun.li@intel.com>
* Haicheng Li <haicheng.li@intel.com>
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -245,7 +243,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
kunmap_atomic(src_addr);
set_page_dirty(dn.inode_page);
- f2fs_clear_radix_tree_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
@@ -300,7 +298,7 @@ process_inline:
clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
- if (f2fs_truncate_blocks(inode, 0, false))
+ if (f2fs_truncate_blocks(inode, 0, false, false))
return false;
goto process_inline;
}
@@ -472,7 +470,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
return 0;
punch_dentry_pages:
truncate_inode_pages(&dir->i_data, 0);
- f2fs_truncate_blocks(dir, 0, false);
+ f2fs_truncate_blocks(dir, 0, false, false);
f2fs_remove_dirty_inode(dir);
return err;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 959df2249875..91ceee0ed4c4 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/inode.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -368,6 +365,12 @@ static int do_read_inode(struct inode *inode)
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
+ /* try to recover cold bit for non-dir inode */
+ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
+ set_cold_node(node_page, false);
+ set_page_dirty(node_page);
+ }
+
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
@@ -610,6 +613,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
+ if (f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
+
/*
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
@@ -648,7 +654,11 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
- dquot_initialize(inode);
+ err = dquot_initialize(inode);
+ if (err) {
+ err = 0;
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ }
f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
@@ -680,9 +690,10 @@ retry:
goto retry;
}
- if (err)
+ if (err) {
f2fs_update_inode_page(inode);
- dquot_free_inode(inode);
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ }
sb_end_intwrite(inode->i_sb);
no_delete:
dquot_drop(inode);
@@ -691,7 +702,8 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
+ if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
else
f2fs_inode_synced(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 1f67e389169f..99299ede7429 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/namei.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -19,6 +16,7 @@
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include "xattr.h"
#include "acl.h"
#include <trace/events/f2fs.h>
@@ -74,10 +72,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
- err = dquot_alloc_inode(inode);
- if (err)
- goto fail_drop;
-
set_inode_flag(inode, FI_NEW_INODE);
/* If the directory encrypted, then we should encrypt the inode. */
@@ -124,6 +118,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
+ f2fs_set_inode_flags(inode);
+
trace_f2fs_new_inode(inode, 0);
return inode;
@@ -184,16 +180,19 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
hot_count = sbi->raw_super->hot_ext_count;
for (i = 0; i < cold_count + hot_count; i++) {
- if (!is_extension_exist(name, extlist[i]))
- continue;
- if (i < cold_count)
- file_set_cold(inode);
- else
- file_set_hot(inode);
- break;
+ if (is_extension_exist(name, extlist[i]))
+ break;
}
up_read(&sbi->sb_lock);
+
+ if (i == cold_count + hot_count)
+ return;
+
+ if (i < cold_count)
+ file_set_cold(inode);
+ else
+ file_set_hot(inode);
}
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
@@ -272,6 +271,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = dquot_initialize(dir);
if (err)
@@ -318,6 +320,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = fscrypt_prepare_link(old_dentry, dir, dentry);
if (err)
@@ -564,6 +569,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
&disk_link);
@@ -693,6 +701,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = dquot_initialize(dir);
if (err)
@@ -823,10 +834,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
bool is_old_inline = f2fs_has_inline_dentry(old_dir);
- int err = -ENOENT;
+ int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -847,6 +861,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
}
+ err = -ENOENT;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
@@ -983,6 +998,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
put_out_dir:
@@ -1012,10 +1029,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
struct f2fs_dir_entry *old_entry, *new_entry;
int old_nlink = 0, new_nlink = 0;
- int err = -ENOENT;
+ int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -1033,6 +1053,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto out;
+ err = -ENOENT;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
@@ -1136,6 +1157,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
out_new_dir:
if (new_dir_entry) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index dd2e45a661aa..d338740d0fda 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/node.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -104,7 +101,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
static void clear_node_page_dirty(struct page *page)
{
if (PageDirty(page)) {
- f2fs_clear_radix_tree_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page);
clear_page_dirty_for_io(page);
dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
}
@@ -129,6 +126,8 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
/* get current nat block page with lock */
src_page = get_current_nat_page(sbi, nid);
+ if (IS_ERR(src_page))
+ return src_page;
dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
@@ -1307,9 +1306,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (f2fs_check_nid_range(sbi, nid))
return;
- rcu_read_lock();
- apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
- rcu_read_unlock();
+ apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
if (apage)
return;
@@ -1542,8 +1539,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
}
if (__is_valid_data_blkaddr(ni.blk_addr) &&
- !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
+ up_read(&sbi->node_write);
goto redirty_out;
+ }
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
@@ -1564,8 +1563,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
up_read(&sbi->node_write);
if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
- page->index, NODE);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
submitted = NULL;
}
@@ -1587,8 +1585,10 @@ redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
-void f2fs_move_node_page(struct page *node_page, int gc_type)
+int f2fs_move_node_page(struct page *node_page, int gc_type)
{
+ int err = 0;
+
if (gc_type == FG_GC) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
@@ -1600,12 +1600,16 @@ void f2fs_move_node_page(struct page *node_page, int gc_type)
f2fs_wait_on_page_writeback(node_page, NODE, true);
f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
- if (!clear_page_dirty_for_io(node_page))
+ if (!clear_page_dirty_for_io(node_page)) {
+ err = -EAGAIN;
goto out_page;
+ }
if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO, NULL))
+ &wbc, false, FS_GC_NODE_IO, NULL)) {
+ err = -EAGAIN;
unlock_page(node_page);
+ }
goto release_page;
} else {
/* set page dirty and write it */
@@ -1616,6 +1620,7 @@ out_page:
unlock_page(node_page);
release_page:
f2fs_put_page(node_page, 0);
+ return err;
}
static int f2fs_write_node_page(struct page *page,
@@ -1630,13 +1635,13 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
unsigned int *seq_id)
{
pgoff_t index;
- pgoff_t last_idx = ULONG_MAX;
struct pagevec pvec;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
int nr_pages;
+ int nwritten = 0;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1714,7 +1719,7 @@ continue_unlock:
f2fs_put_page(last_page, 0);
break;
} else if (submitted) {
- last_idx = page->index;
+ nwritten++;
}
if (page == last_page) {
@@ -1740,8 +1745,8 @@ continue_unlock:
goto retry;
}
out:
- if (last_idx != ULONG_MAX)
- f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
+ if (nwritten)
+ f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
return ret ? -EIO: 0;
}
@@ -2268,15 +2273,19 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);
- ret = scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ } else {
+ ret = scan_nat_page(sbi, page, nid);
+ f2fs_put_page(page, 1);
+ }
if (ret) {
up_read(&nm_i->nat_tree_lock);
f2fs_bug_on(sbi, !mount);
f2fs_msg(sbi->sb, KERN_ERR,
"NAT is corrupt, run fsck to fix it");
- return -EINVAL;
+ return ret;
}
}
@@ -2353,8 +2362,9 @@ retry:
spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- f2fs_build_free_nids(sbi, true, false);
- goto retry;
+ if (!f2fs_build_free_nids(sbi, true, false))
+ goto retry;
+ return false;
}
/*
@@ -2537,7 +2547,7 @@ retry:
if (!PageUptodate(ipage))
SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
- set_cold_node(page, false);
+ set_cold_node(ipage, false);
src = F2FS_INODE(page);
dst = F2FS_INODE(ipage);
@@ -2560,6 +2570,13 @@ retry:
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_projid))
dst->i_projid = src->i_projid;
+
+ if (f2fs_sb_has_inode_crtime(sbi->sb) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_crtime_nsec)) {
+ dst->i_crtime = src->i_crtime;
+ dst->i_crtime_nsec = src->i_crtime_nsec;
+ }
}
new_ni = old_ni;
@@ -2703,7 +2720,7 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
__clear_bit_le(nat_index, nm_i->full_nat_bits);
}
-static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct nat_entry_set *set, struct cp_control *cpc)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2727,6 +2744,9 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
down_write(&curseg->journal_rwsem);
} else {
page = get_next_nat_page(sbi, start_nid);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
nat_blk = page_address(page);
f2fs_bug_on(sbi, !nat_blk);
}
@@ -2772,12 +2792,13 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
kmem_cache_free(nat_entry_set_slab, set);
}
+ return 0;
}
/*
* This function is called during the checkpointing process.
*/
-void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2787,6 +2808,7 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned int found;
nid_t set_idx = 0;
LIST_HEAD(sets);
+ int err = 0;
/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
if (enabled_nat_bits(sbi, cpc)) {
@@ -2796,7 +2818,7 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
if (!nm_i->dirty_nat_cnt)
- return;
+ return 0;
down_write(&nm_i->nat_tree_lock);
@@ -2819,11 +2841,16 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
/* flush dirty nats in nat entry set */
- list_for_each_entry_safe(set, tmp, &sets, set_list)
- __flush_nat_entry_set(sbi, set, cpc);
+ list_for_each_entry_safe(set, tmp, &sets, set_list) {
+ err = __flush_nat_entry_set(sbi, set, cpc);
+ if (err)
+ break;
+ }
up_write(&nm_i->nat_tree_lock);
/* Allow dirty nats by node block allocation in write_begin */
+
+ return err;
}
static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
@@ -2850,10 +2877,8 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
struct page *page;
page = f2fs_get_meta_page(sbi, nat_bits_addr++);
- if (IS_ERR(page)) {
- disable_nat_bits(sbi, true);
+ if (IS_ERR(page))
return PTR_ERR(page);
- }
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 0f4db7a61254..1c73d879a9bc 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/node.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* start node id of a node block dedicated to the given node id */
#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 95511ed11a22..1dfb17f9f9ff 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/recovery.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -99,8 +96,12 @@ err_out:
return ERR_PTR(err);
}
-static void del_fsync_inode(struct fsync_inode_entry *entry)
+static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
{
+ if (drop) {
+ /* inode should not be recovered, drop it */
+ f2fs_inode_synced(entry->inode);
+ }
iput(entry->inode);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
@@ -194,6 +195,33 @@ out:
return err;
}
+static int recover_quota_data(struct inode *inode, struct page *page)
+{
+ struct f2fs_inode *raw = F2FS_INODE(page);
+ struct iattr attr;
+ uid_t i_uid = le32_to_cpu(raw->i_uid);
+ gid_t i_gid = le32_to_cpu(raw->i_gid);
+ int err;
+
+ memset(&attr, 0, sizeof(attr));
+
+ attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
+ attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
+
+ if (!uid_eq(attr.ia_uid, inode->i_uid))
+ attr.ia_valid |= ATTR_UID;
+ if (!gid_eq(attr.ia_gid, inode->i_gid))
+ attr.ia_valid |= ATTR_GID;
+
+ if (!attr.ia_valid)
+ return 0;
+
+ err = dquot_transfer(inode, &attr);
+ if (err)
+ set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
+ return err;
+}
+
static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
{
if (ri->i_inline & F2FS_PIN_FILE)
@@ -206,12 +234,41 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
clear_inode_flag(inode, FI_DATA_EXIST);
}
-static void recover_inode(struct inode *inode, struct page *page)
+static int recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
char *name;
+ int err;
inode->i_mode = le16_to_cpu(raw->i_mode);
+
+ err = recover_quota_data(inode, page);
+ if (err)
+ return err;
+
+ i_uid_write(inode, le32_to_cpu(raw->i_uid));
+ i_gid_write(inode, le32_to_cpu(raw->i_gid));
+
+ if (raw->i_inline & F2FS_EXTRA_ATTR) {
+ if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+ F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
+ i_projid)) {
+ projid_t i_projid;
+ kprojid_t kprojid;
+
+ i_projid = (projid_t)le32_to_cpu(raw->i_projid);
+ kprojid = make_kprojid(&init_user_ns, i_projid);
+
+ if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
+ err = f2fs_transfer_project_quota(inode,
+ kprojid);
+ if (err)
+ return err;
+ F2FS_I(inode)->i_projid = kprojid;
+ }
+ }
+ }
+
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
@@ -221,9 +278,15 @@ static void recover_inode(struct inode *inode, struct page *page)
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
F2FS_I(inode)->i_advise = raw->i_advise;
+ F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
+ f2fs_set_inode_flags(inode);
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
+ le16_to_cpu(raw->i_gc_failures);
recover_inline_flags(inode, raw);
+ f2fs_mark_inode_dirty_sync(inode, true);
+
if (file_enc_name(inode))
name = "<encrypted>";
else
@@ -232,6 +295,7 @@ static void recover_inode(struct inode *inode, struct page *page)
f2fs_msg(inode->i_sb, KERN_NOTICE,
"recover_inode: ino = %x, name = %s, inline = %x",
ino_of_node(page), name, raw->i_inline);
+ return 0;
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
@@ -320,12 +384,12 @@ next:
return err;
}
-static void destroy_fsync_dnodes(struct list_head *head)
+static void destroy_fsync_dnodes(struct list_head *head, int drop)
{
struct fsync_inode_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, head, list)
- del_fsync_inode(entry);
+ del_fsync_inode(entry, drop);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
@@ -358,6 +422,8 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
}
sum_page = f2fs_get_sum_page(sbi, segno);
+ if (IS_ERR(sum_page))
+ return PTR_ERR(sum_page);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
sum = sum_node->entries[blkoff];
f2fs_put_page(sum_page, 1);
@@ -560,7 +626,7 @@ out:
}
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
- struct list_head *dir_list)
+ struct list_head *tmp_inode_list, struct list_head *dir_list)
{
struct curseg_info *curseg;
struct page *page = NULL;
@@ -598,8 +664,11 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (IS_INODE(page))
- recover_inode(entry->inode, page);
+ if (IS_INODE(page)) {
+ err = recover_inode(entry->inode, page);
+ if (err)
+ break;
+ }
if (entry->last_dentry == blkaddr) {
err = recover_dentry(entry->inode, page, dir_list);
if (err) {
@@ -614,7 +683,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
}
if (entry->blkaddr == blkaddr)
- del_fsync_inode(entry);
+ list_move_tail(&entry->list, tmp_inode_list);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -627,7 +696,7 @@ next:
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
- struct list_head inode_list;
+ struct list_head inode_list, tmp_inode_list;
struct list_head dir_list;
int err;
int ret = 0;
@@ -658,6 +727,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
}
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&tmp_inode_list);
INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
@@ -676,11 +746,16 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, &dir_list);
+ err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
+ else {
+ /* restore s_flags to let iput() trash data */
+ sbi->sb->s_flags = s_flags;
+ }
skip:
- destroy_fsync_dnodes(&inode_list);
+ destroy_fsync_dnodes(&inode_list, err);
+ destroy_fsync_dnodes(&tmp_inode_list, err);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
@@ -689,19 +764,23 @@ skip:
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
+ } else {
+ clear_sbi_flag(sbi, SBI_POR_DOING);
}
-
- clear_sbi_flag(sbi, SBI_POR_DOING);
mutex_unlock(&sbi->cp_mutex);
/* let's drop all the directory inodes for clean checkpoint */
- destroy_fsync_dnodes(&dir_list);
+ destroy_fsync_dnodes(&dir_list, err);
- if (!err && need_writecp) {
- struct cp_control cpc = {
- .reason = CP_RECOVERY,
- };
- err = f2fs_write_checkpoint(sbi, &cpc);
+ if (need_writecp) {
+ set_sbi_flag(sbi, SBI_IS_RECOVERED);
+
+ if (!err) {
+ struct cp_control cpc = {
+ .reason = CP_RECOVERY,
+ };
+ err = f2fs_write_checkpoint(sbi, &cpc);
+ }
}
kmem_cache_destroy(fsync_entry_slab);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 30779aaa9dba..6edcf8391dd3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/segment.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -179,6 +176,8 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
return false;
if (sbi->gc_mode == GC_URGENT)
return true;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return true;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
@@ -267,8 +266,10 @@ retry:
}
next:
/* we don't need to invalidate this in the sccessful status */
- if (drop || recover)
+ if (drop || recover) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 1);
@@ -374,7 +375,7 @@ static int __f2fs_commit_inmem_pages(struct inode *inode)
.io_type = FS_DATA_IO,
};
struct list_head revoke_list;
- pgoff_t last_idx = ULONG_MAX;
+ bool submit_bio = false;
int err = 0;
INIT_LIST_HEAD(&revoke_list);
@@ -409,14 +410,14 @@ retry:
}
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
- last_idx = page->index;
+ submit_bio = true;
}
unlock_page(page);
list_move_tail(&cur->list, &revoke_list);
}
- if (last_idx != ULONG_MAX)
- f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
+ if (submit_bio)
+ f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
if (err) {
/*
@@ -483,6 +484,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
+ if (f2fs_is_checkpoint_ready(sbi))
+ return;
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
@@ -511,7 +515,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
else
f2fs_build_free_nids(sbi, false, false);
- if (!is_idle(sbi) &&
+ if (!is_idle(sbi, REQ_TIME) &&
(!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
return;
@@ -799,7 +803,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned short valid_blocks;
+ unsigned short valid_blocks, ckpt_valid_blocks;
if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
return;
@@ -807,8 +811,10 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_lock(&dirty_i->seglist_lock);
valid_blocks = get_valid_blocks(sbi, segno, false);
+ ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
- if (valid_blocks == 0) {
+ if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
+ ckpt_valid_blocks == sbi->blocks_per_seg)) {
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
} else if (valid_blocks < sbi->blocks_per_seg) {
@@ -821,6 +827,66 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
+/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
+void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ if (get_valid_blocks(sbi, segno, false))
+ continue;
+ if (IS_CURSEG(sbi, segno))
+ continue;
+ __locate_dirty_segment(sbi, segno, PRE);
+ __remove_dirty_segment(sbi, segno, DIRTY);
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
+ block_t holes[2] = {0, 0}; /* DATA and NODE */
+ struct seg_entry *se;
+ unsigned int segno;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ se = get_seg_entry(sbi, segno);
+ if (IS_NODESEG(se->type))
+ holes[NODE] += sbi->blocks_per_seg - se->valid_blocks;
+ else
+ holes[DATA] += sbi->blocks_per_seg - se->valid_blocks;
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+
+ if (holes[DATA] > ovp || holes[NODE] > ovp)
+ return -EAGAIN;
+ return 0;
+}
+
+/* This is only used by SBI_CP_DISABLED */
+static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno = 0;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ if (get_valid_blocks(sbi, segno, false))
+ continue;
+ if (get_ckpt_valid_blocks(sbi, segno))
+ continue;
+ mutex_unlock(&dirty_i->seglist_lock);
+ return segno;
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+ return NULL_SEGNO;
+}
+
static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len)
@@ -856,7 +922,8 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
- struct rb_node *parent, struct rb_node **p)
+ struct rb_node *parent, struct rb_node **p,
+ bool leftmost)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
@@ -864,7 +931,7 @@ static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
rb_link_node(&dc->rb_node, parent, p);
- rb_insert_color(&dc->rb_node, &dcc->root);
+ rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
return dc;
}
@@ -876,7 +943,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
atomic_sub(dc->issuing, &dcc->issing_discard);
list_del(&dc->list);
- rb_erase(&dc->rb_node, &dcc->root);
+ rb_erase_cached(&dc->rb_node, &dcc->root);
dcc->undiscard_blks -= dc->len;
kmem_cache_free(discard_cmd_slab, dc);
@@ -905,9 +972,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
dc->error = 0;
if (dc->error)
- f2fs_msg(sbi->sb, KERN_INFO,
- "Issue discard(%u, %u, %u) failed, ret: %d",
- dc->lstart, dc->start, dc->len, dc->error);
+ printk_ratelimited(
+ "%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
+ KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1113,6 +1180,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct rb_node **p;
struct rb_node *parent = NULL;
struct discard_cmd *dc = NULL;
+ bool leftmost = true;
if (insert_p && insert_parent) {
parent = insert_parent;
@@ -1120,9 +1188,11 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
goto do_insert;
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
+ p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
+ lstart, &leftmost);
do_insert:
- dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
+ dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
+ p, leftmost);
if (!dc)
return NULL;
@@ -1190,7 +1260,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
NULL, lstart,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (dc)
prev_dc = dc;
@@ -1298,7 +1368,7 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
NULL, pos,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (!dc)
dc = next_dc;
@@ -1311,7 +1381,7 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
if (dc->state != D_PREP)
goto next;
- if (dpolicy->io_aware && !is_idle(sbi)) {
+ if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
io_interrupted = true;
break;
}
@@ -1371,7 +1441,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, dc->state != D_PREP);
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
- !is_idle(sbi)) {
+ !is_idle(sbi, DISCARD_TIME)) {
io_interrupted = true;
break;
}
@@ -1600,7 +1670,9 @@ static int issue_discard_thread(void *data)
__wait_all_discard_cmd(sbi, &dpolicy);
wait_ms = dpolicy.min_interval;
} else if (issued == -1){
- wait_ms = dpolicy.mid_interval;
+ wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
+ if (!wait_ms)
+ wait_ms = dpolicy.mid_interval;
} else {
wait_ms = dpolicy.max_interval;
}
@@ -1725,11 +1797,11 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
- if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
+ if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
return false;
if (!force) {
- if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
+ if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
SM_I(sbi)->dcc_info->nr_discards >=
SM_I(sbi)->dcc_info->max_discards)
return false;
@@ -1835,7 +1907,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
dirty_i->nr_dirty[PRE]--;
}
- if (!test_opt(sbi, DISCARD))
+ if (!f2fs_realtime_discard_enable(sbi))
continue;
if (force && start >= cpc->trim_start &&
@@ -1928,7 +2000,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
dcc->next_pos = 0;
- dcc->root = RB_ROOT;
+ dcc->root = RB_ROOT_CACHED;
dcc->rbtree_check = false;
init_waitqueue_head(&dcc->discard_wait_queue);
@@ -2025,12 +2097,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
del = 0;
}
- if (f2fs_discard_en(sbi) &&
- !f2fs_test_and_set_bit(offset, se->discard_map))
+ if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
/* don't overwrite by SSR to keep node chain */
- if (IS_NODESEG(se->type)) {
+ if (IS_NODESEG(se->type) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
@@ -2052,10 +2124,18 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
del = 0;
+ } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ /*
+ * If checkpoints are off, we must not reuse data that
+ * was used in the previous checkpoint. If it was used
+ * before, we must track that to know how much space we
+ * really have.
+ */
+ if (f2fs_test_bit(offset, se->ckpt_valid_map))
+ sbi->unusable_block_count++;
}
- if (f2fs_discard_en(sbi) &&
- f2fs_test_and_clear_bit(offset, se->discard_map))
+ if (f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
@@ -2335,6 +2415,9 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
if (sbi->segs_per_sec != 1)
return CURSEG_I(sbi, type)->segno;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
+
if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
return 0;
@@ -2432,6 +2515,7 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
__next_free_blkoff(sbi, curseg, 0);
sum_page = f2fs_get_sum_page(sbi, new_segno);
+ f2fs_bug_on(sbi, IS_ERR(sum_page));
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
f2fs_put_page(sum_page, 1);
@@ -2478,6 +2562,15 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
return 1;
}
}
+
+ /* find valid_blocks=0 in dirty list */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ segno = get_free_segment(sbi);
+ if (segno != NULL_SEGNO) {
+ curseg->next_segno = segno;
+ return 1;
+ }
+ }
return 0;
}
@@ -2495,7 +2588,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
- else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
+ else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type) &&
+ likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
new_curseg(sbi, type, false);
else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
change_curseg(sbi, type);
@@ -2570,7 +2664,7 @@ next:
NULL, start,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (!dc)
dc = next_dc;
@@ -2671,7 +2765,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
* discard option. User configuration looks like using runtime discard
* or periodic fstrim instead of it.
*/
- if (test_opt(sbi, DISCARD))
+ if (f2fs_realtime_discard_enable(sbi))
goto out;
start_block = START_BLOCK(sbi, start_segno);
@@ -3020,6 +3114,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
ClearPageError(page);
f2fs_submit_page_write(&fio);
+ stat_inc_meta_count(sbi, page->index);
f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
}
@@ -3182,8 +3277,7 @@ void f2fs_wait_on_page_writeback(struct page *page,
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- f2fs_submit_merged_write_cond(sbi, page->mapping->host,
- 0, page->index, type);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
if (ordered)
wait_on_page_writeback(page);
else
@@ -3191,10 +3285,14 @@ void f2fs_wait_on_page_writeback(struct page *page,
}
}
-void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
+void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *cpage;
+ if (!f2fs_post_read_required(inode))
+ return;
+
if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
@@ -3205,6 +3303,15 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
}
}
+void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
+ block_t len)
+{
+ block_t i;
+
+ for (i = 0; i < len; i++)
+ f2fs_wait_on_block_writeback(inode, blkaddr + i);
+}
+
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -3762,13 +3869,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
#endif
- if (f2fs_discard_en(sbi)) {
- sit_i->sentries[start].discard_map
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
- GFP_KERNEL);
- if (!sit_i->sentries[start].discard_map)
- return -ENOMEM;
- }
+ sit_i->sentries[start].discard_map
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
+ GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
}
sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -3904,6 +4009,8 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
@@ -3916,18 +4023,16 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
total_node_blocks += se->valid_blocks;
/* build discard map only one time */
- if (f2fs_discard_en(sbi)) {
- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
- memset(se->discard_map, 0xff,
- SIT_VBLOCK_MAP_SIZE);
- } else {
- memcpy(se->discard_map,
- se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks +=
- sbi->blocks_per_seg -
- se->valid_blocks;
- }
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map,
+ se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks +=
+ sbi->blocks_per_seg -
+ se->valid_blocks;
}
if (sbi->segs_per_sec > 1)
@@ -3965,16 +4070,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
if (IS_NODESEG(se->type))
total_node_blocks += se->valid_blocks;
- if (f2fs_discard_en(sbi)) {
- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
- memset(se->discard_map, 0xff,
- SIT_VBLOCK_MAP_SIZE);
- } else {
- memcpy(se->discard_map, se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += old_valid_blocks;
- sbi->discard_blks -= se->valid_blocks;
- }
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks;
+ sbi->discard_blks -= se->valid_blocks;
}
if (sbi->segs_per_sec > 1) {
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index b3d9e317ff0c..ab3465faddf1 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/segment.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -342,6 +339,12 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
return get_seg_entry(sbi, segno)->valid_blocks;
}
+static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+}
+
static inline void seg_info_from_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
@@ -579,6 +582,15 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
reserved_sections(sbi) + needed);
}
+static inline int f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
+{
+ if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
+ if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
+ return 0;
+ return -ENOSPC;
+}
+
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
{
return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 36cfd816c160..9e13db994fdf 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs shrinker support
* the basic infra was copied from fs/ubifs/shrinker.c
*
* Copyright (c) 2015 Motorola Mobility
* Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 896b885f504e..af58b2cc21b8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/super.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -53,9 +50,10 @@ char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_TRUNCATE] = "truncate fail",
- [FAULT_IO] = "IO error",
+ [FAULT_READ_IO] = "read IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
[FAULT_DISCARD] = "discard error",
+ [FAULT_WRITE_IO] = "write IO error",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
@@ -138,6 +136,7 @@ enum {
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
+ Opt_checkpoint,
Opt_err,
};
@@ -196,6 +195,7 @@ static match_table_t f2fs_tokens = {
{Opt_alloc, "alloc_mode=%s"},
{Opt_fsync, "fsync_mode=%s"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
+ {Opt_checkpoint, "checkpoint=%s"},
{Opt_err, NULL},
};
@@ -207,7 +207,7 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
va_end(args);
}
@@ -360,7 +360,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
static int parse_options(struct super_block *sb, char *options)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- struct request_queue *q;
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
@@ -415,14 +414,7 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
break;
case Opt_discard:
- q = bdev_get_queue(sb->s_bdev);
- if (blk_queue_discard(q)) {
- set_opt(sbi, DISCARD);
- } else if (!f2fs_sb_has_blkzoned(sb)) {
- f2fs_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but "
- "the device does not support discard");
- }
+ set_opt(sbi, DISCARD);
break;
case Opt_nodiscard:
if (f2fs_sb_has_blkzoned(sb)) {
@@ -602,28 +594,31 @@ static int parse_options(struct super_block *sb, char *options)
}
F2FS_OPTION(sbi).write_io_size_bits = arg;
break;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
set_opt(sbi, FAULT_INJECTION);
-#else
- f2fs_msg(sb, KERN_INFO,
- "FAULT_INJECTION was not selected");
-#endif
break;
+
case Opt_fault_type:
if (args->from && match_int(args, &arg))
return -EINVAL;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, 0, arg);
set_opt(sbi, FAULT_INJECTION);
+ break;
#else
+ case Opt_fault_injection:
f2fs_msg(sb, KERN_INFO,
- "FAULT_INJECTION was not selected");
-#endif
+ "fault_injection options not supported");
+ break;
+
+ case Opt_fault_type:
+ f2fs_msg(sb, KERN_INFO,
+ "fault_type options not supported");
break;
+#endif
case Opt_lazytime:
sb->s_flags |= SB_LAZYTIME;
break;
@@ -776,6 +771,23 @@ static int parse_options(struct super_block *sb, char *options)
"Test dummy encryption mount option ignored");
#endif
break;
+ case Opt_checkpoint:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ if (strlen(name) == 6 &&
+ !strncmp(name, "enable", 6)) {
+ clear_opt(sbi, DISABLE_CHECKPOINT);
+ } else if (strlen(name) == 7 &&
+ !strncmp(name, "disable", 7)) {
+ set_opt(sbi, DISABLE_CHECKPOINT);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -834,6 +846,12 @@ static int parse_options(struct super_block *sb, char *options)
}
}
+ if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
+ f2fs_msg(sb, KERN_ERR,
+ "LFS not compatible with checkpoint=disable\n");
+ return -EINVAL;
+ }
+
/* Not pass down write hints if the number of active logs is lesser
* than NR_CURSEG_TYPE.
*/
@@ -1021,8 +1039,8 @@ static void f2fs_put_super(struct super_block *sb)
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
- if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -1032,7 +1050,8 @@ static void f2fs_put_super(struct super_block *sb)
/* be sure to wait for any on-going discard commands */
dropped = f2fs_wait_discard_bios(sbi);
- if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
+ if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
+ !sbi->discard_blks && !dropped) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
@@ -1093,6 +1112,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(f2fs_cp_error(sbi)))
return 0;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
trace_f2fs_sync_fs(sb, sync);
@@ -1192,6 +1213,11 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
+ if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
+ buf->f_bfree = 0;
+ else
+ buf->f_bfree -= sbi->unusable_block_count;
+
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
buf->f_bavail = buf->f_bfree -
F2FS_OPTION(sbi).root_reserved_blocks;
@@ -1336,7 +1362,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
from_kgid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resgid));
if (F2FS_IO_SIZE_BITS(sbi))
- seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+ seq_printf(seq, ",io_bits=%u",
+ F2FS_OPTION(sbi).write_io_size_bits);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
@@ -1370,6 +1397,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
seq_printf(seq, ",alloc_mode=%s", "reuse");
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
+ seq_puts(seq, ",checkpoint=disable");
+
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
seq_printf(seq, ",fsync_mode=%s", "posix");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
@@ -1397,10 +1427,10 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
set_opt(sbi, NOHEAP);
+ clear_opt(sbi, DISABLE_CHECKPOINT);
sbi->sb->s_flags |= SB_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
- set_opt(sbi, DISCARD);
+ set_opt(sbi, DISCARD);
if (f2fs_sb_has_blkzoned(sbi->sb))
set_opt_mode(sbi, F2FS_MOUNT_LFS);
else
@@ -1419,6 +1449,57 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_QUOTA
static int f2fs_enable_quotas(struct super_block *sb);
#endif
+
+static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
+{
+ struct cp_control cpc;
+ int err;
+
+ sbi->sb->s_flags |= SB_ACTIVE;
+
+ mutex_lock(&sbi->gc_mutex);
+ f2fs_update_time(sbi, DISABLE_TIME);
+
+ while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ err = f2fs_gc(sbi, true, false, NULL_SEGNO);
+ if (err == -ENODATA)
+ break;
+ if (err && err != -EAGAIN) {
+ mutex_unlock(&sbi->gc_mutex);
+ return err;
+ }
+ }
+ mutex_unlock(&sbi->gc_mutex);
+
+ err = sync_filesystem(sbi->sb);
+ if (err)
+ return err;
+
+ if (f2fs_disable_cp_again(sbi))
+ return -EAGAIN;
+
+ mutex_lock(&sbi->gc_mutex);
+ cpc.reason = CP_PAUSE;
+ set_sbi_flag(sbi, SBI_CP_DISABLED);
+ f2fs_write_checkpoint(sbi, &cpc);
+
+ sbi->unusable_block_count = 0;
+ mutex_unlock(&sbi->gc_mutex);
+ return 0;
+}
+
+static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+{
+ mutex_lock(&sbi->gc_mutex);
+ f2fs_dirty_to_prefree(sbi);
+
+ clear_sbi_flag(sbi, SBI_CP_DISABLED);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ mutex_unlock(&sbi->gc_mutex);
+
+ f2fs_sync_fs(sbi->sb, 1);
+}
+
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -1428,6 +1509,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+ bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
+ bool checkpoint_changed;
#ifdef CONFIG_QUOTA
int i, j;
#endif
@@ -1472,6 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = parse_options(sb, data);
if (err)
goto restore_opts;
+ checkpoint_changed =
+ disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
/*
* Previous and new state of filesystem is RO,
@@ -1485,7 +1570,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
+ } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -1505,6 +1590,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = -EINVAL;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "disabling checkpoint not compatible with read-only");
+ goto restore_opts;
+ }
+
/*
* We stop the GC thread if FS is mounted as RO
* or if background_gc = off is passed in mount
@@ -1533,6 +1625,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
+ if (checkpoint_changed) {
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = f2fs_disable_checkpoint(sbi);
+ if (err)
+ goto restore_gc;
+ } else {
+ f2fs_enable_checkpoint(sbi);
+ }
+ }
+
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
@@ -1556,6 +1658,7 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
+ *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
restore_gc:
if (need_restart_gc) {
@@ -1608,6 +1711,7 @@ repeat:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto repeat;
}
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
return PTR_ERR(page);
}
@@ -1619,6 +1723,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
return -EIO;
}
@@ -1660,6 +1765,7 @@ retry:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
break;
}
@@ -1696,6 +1802,12 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
{
+ if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "quota sysfile may be corrupted, skip loading it");
+ return 0;
+ }
+
return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
F2FS_OPTION(sbi).s_jquota_fmt, type);
}
@@ -1766,7 +1878,14 @@ static int f2fs_enable_quotas(struct super_block *sb)
test_opt(F2FS_SB(sb), PRJQUOTA),
};
- sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
+ if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
+ f2fs_msg(sb, KERN_ERR,
+ "quota file may be corrupted, skip loading it");
+ return 0;
+ }
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+
for (type = 0; type < MAXQUOTAS; type++) {
qf_inum = f2fs_qf_ino(sb, type);
if (qf_inum) {
@@ -1780,6 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
"fsck to fix.", type, err);
for (type--; type >= 0; type--)
dquot_quota_off(sb, type);
+ set_sbi_flag(F2FS_SB(sb),
+ SBI_QUOTA_NEED_REPAIR);
return err;
}
}
@@ -1787,35 +1908,51 @@ static int f2fs_enable_quotas(struct super_block *sb)
return 0;
}
-static int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_quota_sync(struct super_block *sb, int type)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
int ret;
ret = dquot_writeback_dquots(sb, type);
if (ret)
- return ret;
+ goto out;
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ struct address_space *mapping;
+
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+ mapping = dqopt->files[cnt]->i_mapping;
+
+ ret = filemap_fdatawrite(mapping);
+ if (ret)
+ goto out;
+
+ /* if we are using journalled quota */
+ if (is_journalled_quota(sbi))
+ continue;
+
+ ret = filemap_fdatawait(mapping);
if (ret)
- return ret;
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
inode_lock(dqopt->files[cnt]);
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
inode_unlock(dqopt->files[cnt]);
}
- return 0;
+out:
+ if (ret)
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
}
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
@@ -1836,8 +1973,7 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
inode_lock(inode);
F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
- inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
- S_NOATIME | S_IMMUTABLE);
+ f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
@@ -1852,7 +1988,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
if (!inode || !igrab(inode))
return dquot_quota_off(sb, type);
- f2fs_quota_sync(sb, type);
+ err = f2fs_quota_sync(sb, type);
+ if (err)
+ goto out_put;
err = dquot_quota_off(sb, type);
if (err || f2fs_sb_has_quota_ino(sb))
@@ -1860,7 +1998,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
inode_lock(inode);
F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
- inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+ f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
out_put:
@@ -1871,9 +2009,88 @@ out_put:
void f2fs_quota_off_umount(struct super_block *sb)
{
int type;
+ int err;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ err = f2fs_quota_off(sb, type);
+ if (err) {
+ int ret = dquot_quota_off(sb, type);
+
+ f2fs_msg(sb, KERN_ERR,
+ "Fail to turn off disk quota "
+ "(type: %d, err: %d, ret:%d), Please "
+ "run fsck to fix it.", type, err, ret);
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ }
+ }
+}
+
+static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!dqopt->files[type])
+ continue;
+ f2fs_inode_synced(dqopt->files[type]);
+ }
+}
+
+static int f2fs_dquot_commit(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_commit(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
+}
+
+static int f2fs_dquot_acquire(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_acquire(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+
+ return ret;
+}
+
+static int f2fs_dquot_release(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_release(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
+}
+
+static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+ struct super_block *sb = dquot->dq_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int ret;
+
+ ret = dquot_mark_dquot_dirty(dquot);
+
+ /* if we are using journalled quota */
+ if (is_journalled_quota(sbi))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+ return ret;
+}
- for (type = 0; type < MAXQUOTAS; type++)
- f2fs_quota_off(sb, type);
+static int f2fs_dquot_commit_info(struct super_block *sb, int type)
+{
+ int ret;
+
+ ret = dquot_commit_info(sb, type);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
}
static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
@@ -1884,11 +2101,11 @@ static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
static const struct dquot_operations f2fs_quota_operations = {
.get_reserved_space = f2fs_get_reserved_space,
- .write_dquot = dquot_commit,
- .acquire_dquot = dquot_acquire,
- .release_dquot = dquot_release,
- .mark_dirty = dquot_mark_dquot_dirty,
- .write_info = dquot_commit_info,
+ .write_dquot = f2fs_dquot_commit,
+ .acquire_dquot = f2fs_dquot_acquire,
+ .release_dquot = f2fs_dquot_release,
+ .mark_dirty = f2fs_dquot_mark_dquot_dirty,
+ .write_info = f2fs_dquot_commit_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = f2fs_get_projid,
@@ -1906,6 +2123,11 @@ static const struct quotactl_ops f2fs_quotactl_ops = {
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
+int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ return 0;
+}
+
void f2fs_quota_off_umount(struct super_block *sb)
{
}
@@ -2170,6 +2392,26 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
(bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
unsigned int blocksize;
+ size_t crc_offset = 0;
+ __u32 crc = 0;
+
+ /* Check checksum_offset and crc in superblock */
+ if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
+ crc_offset = le32_to_cpu(raw_super->checksum_offset);
+ if (crc_offset !=
+ offsetof(struct f2fs_super_block, crc)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid SB checksum offset: %zu",
+ crc_offset);
+ return 1;
+ }
+ crc = le32_to_cpu(raw_super->crc);
+ if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid SB checksum value: %u", crc);
+ return 1;
+ }
+ }
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
f2fs_msg(sb, KERN_INFO,
@@ -2320,7 +2562,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int segment_count_main;
unsigned int cp_pack_start_sum, cp_payload;
block_t user_block_count;
- int i;
+ int i, j;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -2361,11 +2603,43 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_node_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Node segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Data segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_data_segno[i]));
+ return 1;
+ }
+ }
+ }
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Data segment (%u) and Data segment (%u)"
+ " has the same segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
@@ -2423,6 +2697,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
for (i = 0; i < NR_COUNT_TYPE; i++)
@@ -2453,8 +2730,12 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
if (err)
return err;
- return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
+ if (err)
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+
+ return err;
}
#ifdef CONFIG_BLK_DEV_ZONED
@@ -2589,6 +2870,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
struct buffer_head *bh;
+ __u32 crc = 0;
int err;
if ((recover && f2fs_readonly(sbi->sb)) ||
@@ -2597,6 +2879,13 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return -EROFS;
}
+ /* we should update superblock crc here */
+ if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
+ crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
+ offsetof(struct f2fs_super_block, crc));
+ F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
+ }
+
/* write back-up superblock first */
bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
if (!bh)
@@ -2866,7 +3155,7 @@ try_onemore:
GFP_KERNEL);
if (!sbi->write_io[i]) {
err = -ENOMEM;
- goto free_options;
+ goto free_bio_info;
}
for (j = HOT; j < n; j++) {
@@ -2909,6 +3198,9 @@ try_onemore:
goto free_meta_inode;
}
+ if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
/* Initialize device list */
err = f2fs_scan_devices(sbi);
if (err) {
@@ -3007,11 +3299,9 @@ try_onemore:
/* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
- if (err) {
+ if (err)
f2fs_msg(sb, KERN_ERR,
"Cannot turn on quotas: error %d", err);
- goto free_sysfs;
- }
}
#endif
/* if there are nt orphan nodes free them */
@@ -3019,6 +3309,9 @@ try_onemore:
if (err)
goto free_meta;
+ if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
+ goto skip_recovery;
+
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
/*
@@ -3058,6 +3351,14 @@ skip_recovery:
/* f2fs_recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = f2fs_disable_checkpoint(sbi);
+ if (err)
+ goto free_meta;
+ } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
+ f2fs_enable_checkpoint(sbi);
+ }
+
/*
* If filesystem is not mounted as read-only then
* do start the gc_thread.
@@ -3090,10 +3391,10 @@ skip_recovery:
free_meta:
#ifdef CONFIG_QUOTA
+ f2fs_truncate_quota_inode_pages(sb);
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
f2fs_quota_off_umount(sbi->sb);
#endif
- f2fs_sync_inode_meta(sbi);
/*
* Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
* failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
@@ -3101,9 +3402,6 @@ free_meta:
* falls into an infinite loop in f2fs_sync_meta_pages().
*/
truncate_inode_pages_final(META_MAPPING(sbi));
-#ifdef CONFIG_QUOTA
-free_sysfs:
-#endif
f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
@@ -3175,6 +3473,9 @@ static void kill_f2fs_super(struct super_block *sb)
};
f2fs_write_checkpoint(sbi, &cpc);
}
+
+ if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
+ sb->s_flags &= ~SB_RDONLY;
}
kill_block_super(sb);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 81c0e5337443..b777cbdd796b 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs sysfs interface
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
* Copyright (c) 2017 Chao Yu <chao@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/compiler.h>
#include <linux/proc_fs.h>
@@ -120,6 +117,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_lost_found(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found");
+ if (f2fs_sb_has_sb_chksum(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "sb_checksum");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -337,6 +337,7 @@ enum feat_id {
FEAT_QUOTA_INO,
FEAT_INODE_CRTIME,
FEAT_LOST_FOUND,
+ FEAT_SB_CHECKSUM,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -353,6 +354,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_QUOTA_INO:
case FEAT_INODE_CRTIME:
case FEAT_LOST_FOUND:
+ case FEAT_SB_CHECKSUM:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -407,6 +409,9 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
+ interval_time[DISCARD_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
@@ -434,6 +439,7 @@ F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
+F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -460,6 +466,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
+ ATTR_LIST(discard_idle_interval),
+ ATTR_LIST(gc_idle_interval),
ATTR_LIST(iostat_enable),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
@@ -491,6 +499,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(quota_ino),
ATTR_LIST(inode_crtime),
ATTR_LIST(lost_found),
+ ATTR_LIST(sb_checksum),
NULL,
};
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index a1fcd00bbb2b..ce2a5eb210b6 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
index 67db24ac1e85..e8075fc5b228 100644
--- a/fs/f2fs/trace.h
+++ b/fs/f2fs/trace.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_TRACE_H__
#define __F2FS_TRACE_H__
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 77a010e625f5..7261245c208d 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/xattr.c
*
@@ -13,10 +14,6 @@
* suggestion of Luka Renko <luka.renko@hermes.si>.
* xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
* Red Hat Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/rwsem.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index dbcd1d16e669..67db134da0f5 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/xattr.h
*
@@ -9,10 +10,6 @@
* On-disk format of extended attributes for the ext2 filesystem.
*
* (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_XATTR_H__
#define __F2FS_XATTR_H__
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 7f5f3699fc6c..c8366cb8eccd 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -369,7 +369,9 @@ static int fat_parse_short(struct super_block *sb,
}
memcpy(work, de->name, sizeof(work));
- /* see namei.c, msdos_format_name */
+ /* For an explanation of the special treatment of 0x05 in
+ * filenames, see msdos_format_name in namei_msdos.c
+ */
if (work[0] == 0x05)
work[0] = 0xE5;
@@ -1071,7 +1073,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
}
}
- dir->i_mtime = dir->i_atime = current_time(dir);
+ fat_truncate_time(dir, NULL, S_ATIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 9d7d2d5da28b..4e1b2f6df5e6 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -416,6 +416,10 @@ extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
+extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
+ int flags);
+extern int fat_update_time(struct inode *inode, struct timespec64 *now,
+ int flags);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4f3d72fb1e60..13935ee99e1e 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -227,7 +227,7 @@ static int fat_cont_expand(struct inode *inode, loff_t size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
int err2;
@@ -330,7 +330,7 @@ static int fat_free(struct inode *inode, int skip)
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
if (wait) {
err = fat_sync_inode(inode);
if (err) {
@@ -542,6 +542,18 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
up_write(&MSDOS_I(inode)->truncate_lock);
}
+ /*
+ * setattr_copy can't truncate these appropriately, so we'll
+ * copy them ourselves
+ */
+ if (attr->ia_valid & ATTR_ATIME)
+ fat_truncate_time(inode, &attr->ia_atime, S_ATIME);
+ if (attr->ia_valid & ATTR_CTIME)
+ fat_truncate_time(inode, &attr->ia_ctime, S_CTIME);
+ if (attr->ia_valid & ATTR_MTIME)
+ fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
+
setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
@@ -552,4 +564,5 @@ EXPORT_SYMBOL_GPL(fat_setattr);
const struct inode_operations fat_file_inode_operations = {
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index d6b81e31f9f5..c0b5b5c3373b 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -244,7 +244,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
mark_inode_dirty(inode);
}
@@ -564,7 +564,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
de->cdate, de->ctime_cs);
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
} else
- inode->i_ctime = inode->i_atime = inode->i_mtime;
+ fat_truncate_time(inode, &inode->i_mtime, S_ATIME|S_CTIME);
return 0;
}
@@ -1626,6 +1626,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
+ /*
+ * fat timestamps are complex and truncated by fat itself, so
+ * we set 1 here to be fast
+ */
+ sb->s_time_gran = 1;
mutex_init(&sbi->nfs_build_inode_lock);
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 573836dcaefc..fce0a76f3f1e 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -7,6 +7,7 @@
*/
#include "fat.h"
+#include <linux/iversion.h>
/*
* fat_fs_error reports a file system problem that might indicate fa data
@@ -185,6 +186,13 @@ static long days_in_year[] = {
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
+static inline int fat_tz_offset(struct msdos_sb_info *sbi)
+{
+ return (sbi->options.tz_set ?
+ -sbi->options.time_offset :
+ sys_tz.tz_minuteswest) * SECS_PER_MIN;
+}
+
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs)
@@ -210,10 +218,7 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
- if (!sbi->options.tz_set)
- second += sys_tz.tz_minuteswest * SECS_PER_MIN;
- else
- second -= sbi->options.time_offset * SECS_PER_MIN;
+ second += fat_tz_offset(sbi);
if (time_cs) {
ts->tv_sec = second + (time_cs / 100);
@@ -229,9 +234,7 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
- time64_to_tm(ts->tv_sec,
- (sbi->options.tz_set ? sbi->options.time_offset :
- -sys_tz.tz_minuteswest) * SECS_PER_MIN, &tm);
+ time64_to_tm(ts->tv_sec, -fat_tz_offset(sbi), &tm);
/* FAT can only support year between 1980 to 2107 */
if (tm.tm_year < 1980 - 1900) {
@@ -263,6 +266,80 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
}
EXPORT_SYMBOL_GPL(fat_time_unix2fat);
+static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
+{
+ return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
+}
+/*
+ * truncate the various times with appropriate granularity:
+ * root inode:
+ * all times always 0
+ * all other inodes:
+ * mtime - 2 seconds
+ * ctime
+ * msdos - 2 seconds
+ * vfat - 10 milliseconds
+ * atime - 24 hours (00:00:00 in local timezone)
+ */
+int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 ts;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ if (now == NULL) {
+ now = &ts;
+ ts = current_time(inode);
+ }
+
+ if (flags & S_ATIME) {
+ /* to localtime */
+ time64_t seconds = now->tv_sec - fat_tz_offset(sbi);
+ s32 remainder;
+
+ div_s64_rem(seconds, SECS_PER_DAY, &remainder);
+ /* to day boundary, and back to unix time */
+ seconds = seconds + fat_tz_offset(sbi) - remainder;
+
+ inode->i_atime = (struct timespec64){ seconds, 0 };
+ }
+ if (flags & S_CTIME) {
+ if (sbi->options.isvfat)
+ inode->i_ctime = timespec64_trunc(*now, 10000000);
+ else
+ inode->i_ctime = fat_timespec64_trunc_2secs(*now);
+ }
+ if (flags & S_MTIME)
+ inode->i_mtime = fat_timespec64_trunc_2secs(*now);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_truncate_time);
+
+int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ int iflags = I_DIRTY_TIME;
+ bool dirty = false;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ fat_truncate_time(inode, now, flags);
+ if (flags & S_VERSION)
+ dirty = inode_maybe_inc_iversion(inode, false);
+ if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
+ !(inode->i_sb->s_flags & SB_LAZYTIME))
+ dirty = true;
+
+ if (dirty)
+ iflags |= I_DIRTY_SYNC;
+ __mark_inode_dirty(inode, iflags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_update_time);
+
int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
{
int i, err = 0;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index efb8c40c9d27..f2cd365a4e86 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -250,7 +250,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
if (err)
return err;
- dir->i_ctime = dir->i_mtime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -294,7 +294,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
goto out;
}
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -327,7 +327,7 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -380,7 +380,7 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -413,7 +413,7 @@ static int msdos_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -478,7 +478,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
mark_inode_dirty(old_inode);
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ fat_truncate_time(old_dir, NULL, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -538,7 +538,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -548,7 +548,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -637,6 +637,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
.rename = msdos_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 82cd1e69cbdf..996c8c25e9c6 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -678,7 +678,7 @@ static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
goto cleanup;
/* update timestamp */
- dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -779,7 +779,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -810,7 +810,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -836,7 +836,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -876,7 +876,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -969,7 +969,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -979,7 +979,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -1032,6 +1032,7 @@ static const struct inode_operations vfat_dir_inode_operations = {
.rename = vfat_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 4137d96534a6..083185174c6d 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -735,7 +735,7 @@ static void send_sigio_to_task(struct task_struct *p,
return;
switch (signum) {
- siginfo_t si;
+ kernel_siginfo_t si;
default:
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 471d863958bc..b40168fcc94a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -339,9 +339,9 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
- struct radix_tree_iter iter;
+ XA_STATE(xas, &mapping->i_pages, 0);
+ struct page *page;
bool switched = false;
- void **slot;
/*
* By the time control reaches here, RCU grace period has passed
@@ -375,25 +375,18 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
* pages actually under writeback.
*/
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
- PAGECACHE_TAG_DIRTY) {
- struct page *page = radix_tree_deref_slot_protected(slot,
- &mapping->i_pages.xa_lock);
- if (likely(page) && PageDirty(page)) {
+ xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
+ if (PageDirty(page)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
- PAGECACHE_TAG_WRITEBACK) {
- struct page *page = radix_tree_deref_slot_protected(slot,
- &mapping->i_pages.xa_lock);
- if (likely(page)) {
- WARN_ON_ONCE(!PageWriteback(page));
- dec_wb_stat(old_wb, WB_WRITEBACK);
- inc_wb_stat(new_wb, WB_WRITEBACK);
- }
+ xas_set(&xas, 0);
+ xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
+ WARN_ON_ONCE(!PageWriteback(page));
+ dec_wb_stat(old_wb, WB_WRITEBACK);
+ inc_wb_stat(new_wb, WB_WRITEBACK);
}
wb_get(new_wb);
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 60da84a86dab..f7b807bc1027 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
-fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 0b694655d988..989df5accaee 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -107,7 +107,7 @@ static ssize_t fuse_conn_max_background_read(struct file *file,
if (!fc)
return 0;
- val = fc->max_background;
+ val = READ_ONCE(fc->max_background);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
@@ -125,7 +125,12 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
if (ret > 0) {
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
+ spin_lock(&fc->bg_lock);
fc->max_background = val;
+ fc->blocked = fc->num_background >= fc->max_background;
+ if (!fc->blocked)
+ wake_up(&fc->blocked_waitq);
+ spin_unlock(&fc->bg_lock);
fuse_conn_put(fc);
}
}
@@ -144,7 +149,7 @@ static ssize_t fuse_conn_congestion_threshold_read(struct file *file,
if (!fc)
return 0;
- val = fc->congestion_threshold;
+ val = READ_ONCE(fc->congestion_threshold);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
@@ -155,18 +160,31 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
size_t count, loff_t *ppos)
{
unsigned uninitialized_var(val);
+ struct fuse_conn *fc;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
max_user_congthresh);
- if (ret > 0) {
- struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
- if (fc) {
- fc->congestion_threshold = val;
- fuse_conn_put(fc);
+ if (ret <= 0)
+ goto out;
+ fc = fuse_ctl_file_conn_get(file);
+ if (!fc)
+ goto out;
+
+ spin_lock(&fc->bg_lock);
+ fc->congestion_threshold = val;
+ if (fc->sb) {
+ if (fc->num_background < fc->congestion_threshold) {
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ } else {
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
}
}
-
+ spin_unlock(&fc->bg_lock);
+ fuse_conn_put(fc);
+out:
return ret;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 11ea2c4a38ab..ae813e609932 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -25,6 +25,10 @@
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
+/* Ordinary requests have even IDs, while interrupts IDs are odd */
+#define FUSE_INT_REQ_BIT (1ULL << 0)
+#define FUSE_REQ_ID_STEP (1ULL << 1)
+
static struct kmem_cache *fuse_req_cachep;
static struct fuse_dev *fuse_get_dev(struct file *file)
@@ -40,9 +44,6 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
struct fuse_page_desc *page_descs,
unsigned npages)
{
- memset(req, 0, sizeof(*req));
- memset(pages, 0, sizeof(*pages) * npages);
- memset(page_descs, 0, sizeof(*page_descs) * npages);
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
@@ -53,30 +54,36 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
__set_bit(FR_PENDING, &req->flags);
}
+static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
+ struct fuse_page_desc **desc)
+{
+ struct page **pages;
+
+ pages = kzalloc(npages * (sizeof(struct page *) +
+ sizeof(struct fuse_page_desc)), flags);
+ *desc = (void *) pages + npages * sizeof(struct page *);
+
+ return pages;
+}
+
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
- struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
+ struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) {
- struct page **pages;
- struct fuse_page_desc *page_descs;
-
- if (npages <= FUSE_REQ_INLINE_PAGES) {
+ struct page **pages = NULL;
+ struct fuse_page_desc *page_descs = NULL;
+
+ WARN_ON(npages > FUSE_MAX_MAX_PAGES);
+ if (npages > FUSE_REQ_INLINE_PAGES) {
+ pages = fuse_req_pages_alloc(npages, flags,
+ &page_descs);
+ if (!pages) {
+ kmem_cache_free(fuse_req_cachep, req);
+ return NULL;
+ }
+ } else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
- } else {
- pages = kmalloc_array(npages, sizeof(struct page *),
- flags);
- page_descs =
- kmalloc_array(npages,
- sizeof(struct fuse_page_desc),
- flags);
- }
-
- if (!pages || !page_descs) {
- kfree(pages);
- kfree(page_descs);
- kmem_cache_free(fuse_req_cachep, req);
- return NULL;
}
fuse_request_init(req, pages, page_descs, npages);
@@ -95,12 +102,41 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
return __fuse_request_alloc(npages, GFP_NOFS);
}
-void fuse_request_free(struct fuse_req *req)
+static void fuse_req_pages_free(struct fuse_req *req)
{
- if (req->pages != req->inline_pages) {
+ if (req->pages != req->inline_pages)
kfree(req->pages);
- kfree(req->page_descs);
- }
+}
+
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags)
+{
+ struct page **pages;
+ struct fuse_page_desc *page_descs;
+ unsigned int npages = min_t(unsigned int,
+ max_t(unsigned int, req->max_pages * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ fc->max_pages);
+ WARN_ON(npages <= req->max_pages);
+
+ pages = fuse_req_pages_alloc(npages, flags, &page_descs);
+ if (!pages)
+ return false;
+
+ memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
+ memcpy(page_descs, req->page_descs,
+ sizeof(struct fuse_page_desc) * req->max_pages);
+ fuse_req_pages_free(req);
+ req->pages = pages;
+ req->page_descs = page_descs;
+ req->max_pages = npages;
+
+ return true;
+}
+
+void fuse_request_free(struct fuse_req *req)
+{
+ fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
@@ -235,8 +271,10 @@ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
struct file *file = req->stolen_file;
struct fuse_file *ff = file->private_data;
+ WARN_ON(req->max_pages);
spin_lock(&fc->lock);
- fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
+ memset(req, 0, sizeof(*req));
+ fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq);
@@ -287,10 +325,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
* We get here in the unlikely case that a background
* request was allocated but not sent
*/
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
if (!fc->blocked)
wake_up(&fc->blocked_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
if (test_bit(FR_WAITING, &req->flags)) {
@@ -319,7 +357,13 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
static u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
- return ++fiq->reqctr;
+ fiq->reqctr += FUSE_REQ_ID_STEP;
+ return fiq->reqctr;
+}
+
+static unsigned int fuse_req_hash(u64 unique)
+{
+ return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
@@ -353,12 +397,13 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
static void flush_bg_queue(struct fuse_conn *fc)
{
+ struct fuse_iqueue *fiq = &fc->iq;
+
while (fc->active_background < fc->max_background &&
!list_empty(&fc->bg_queue)) {
struct fuse_req *req;
- struct fuse_iqueue *fiq = &fc->iq;
- req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+ req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
spin_lock(&fiq->waitq.lock);
@@ -389,14 +434,21 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
clear_bit(FR_BACKGROUND, &req->flags);
- if (fc->num_background == fc->max_background)
+ if (fc->num_background == fc->max_background) {
fc->blocked = 0;
-
- /* Wake up next waiter, if any */
- if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
wake_up(&fc->blocked_waitq);
+ } else if (!fc->blocked) {
+ /*
+ * Wake up next waiter, if any. It's okay to use
+ * waitqueue_active(), as we've already synced up
+ * fc->blocked with waiters with the wake_up() call
+ * above.
+ */
+ if (waitqueue_active(&fc->blocked_waitq))
+ wake_up(&fc->blocked_waitq);
+ }
if (fc->num_background == fc->congestion_threshold && fc->sb) {
clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
@@ -405,7 +457,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
fc->num_background--;
fc->active_background--;
flush_bg_queue(fc);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
wake_up(&req->waitq);
if (req->end)
@@ -573,40 +625,38 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
return ret;
}
-/*
- * Called under fc->lock
- *
- * fc->connected must have been checked previously
- */
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req)
+bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
{
- BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
+ bool queued = false;
+
+ WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
if (!test_bit(FR_WAITING, &req->flags)) {
__set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
- fc->num_background++;
- if (fc->num_background == fc->max_background)
- fc->blocked = 1;
- if (fc->num_background == fc->congestion_threshold && fc->sb) {
- set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
- set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ spin_lock(&fc->bg_lock);
+ if (likely(fc->connected)) {
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ if (fc->num_background == fc->congestion_threshold && fc->sb) {
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ }
+ list_add_tail(&req->list, &fc->bg_queue);
+ flush_bg_queue(fc);
+ queued = true;
}
- list_add_tail(&req->list, &fc->bg_queue);
- flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
+
+ return queued;
}
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
- BUG_ON(!req->end);
- spin_lock(&fc->lock);
- if (fc->connected) {
- fuse_request_send_background_locked(fc, req);
- spin_unlock(&fc->lock);
- } else {
- spin_unlock(&fc->lock);
+ WARN_ON(!req->end);
+ if (!fuse_request_queue_background(fc, req)) {
req->out.h.error = -ENOTCONN;
req->end(fc, req);
fuse_put_request(fc, req);
@@ -1084,12 +1134,11 @@ __releases(fiq->waitq.lock)
int err;
list_del_init(&req->intr_entry);
- req->intr_unique = fuse_get_unique(fiq);
memset(&ih, 0, sizeof(ih));
memset(&arg, 0, sizeof(arg));
ih.len = reqsize;
ih.opcode = FUSE_INTERRUPT;
- ih.unique = req->intr_unique;
+ ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
spin_unlock(&fiq->waitq.lock);
@@ -1238,6 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_req *req;
struct fuse_in *in;
unsigned reqsize;
+ unsigned int hash;
restart:
spin_lock(&fiq->waitq.lock);
@@ -1310,13 +1360,16 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
err = reqsize;
goto out_end;
}
- list_move_tail(&req->list, &fpq->processing);
- spin_unlock(&fpq->lock);
+ hash = fuse_req_hash(req->in.h.unique);
+ list_move_tail(&req->list, &fpq->processing[hash]);
+ __fuse_get_request(req);
set_bit(FR_SENT, &req->flags);
+ spin_unlock(&fpq->lock);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
+ fuse_put_request(fc, req);
return reqsize;
@@ -1663,7 +1716,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
unsigned int num;
unsigned int offset;
size_t total_len = 0;
- int num_pages;
+ unsigned int num_pages;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
@@ -1675,7 +1728,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num = file_size - outarg->offset;
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
- num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
+ num_pages = min(num_pages, fc->max_pages);
req = fuse_get_req(fc, num_pages);
if (IS_ERR(req))
@@ -1792,10 +1845,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
{
+ unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
- list_for_each_entry(req, &fpq->processing, list) {
- if (req->in.h.unique == unique || req->intr_unique == unique)
+ list_for_each_entry(req, &fpq->processing[hash], list) {
+ if (req->in.h.unique == unique)
return req;
}
return NULL;
@@ -1869,22 +1923,26 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
if (!fpq->connected)
goto err_unlock_pq;
- req = request_find(fpq, oh.unique);
+ req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
if (!req)
goto err_unlock_pq;
- /* Is it an interrupt reply? */
- if (req->intr_unique == oh.unique) {
+ /* Is it an interrupt reply ID? */
+ if (oh.unique & FUSE_INT_REQ_BIT) {
+ __fuse_get_request(req);
spin_unlock(&fpq->lock);
err = -EINVAL;
- if (nbytes != sizeof(struct fuse_out_header))
+ if (nbytes != sizeof(struct fuse_out_header)) {
+ fuse_put_request(fc, req);
goto err_finish;
+ }
if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
queue_interrupt(&fc->iq, req);
+ fuse_put_request(fc, req);
fuse_copy_finish(cs);
return nbytes;
@@ -2102,9 +2160,13 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
struct fuse_dev *fud;
struct fuse_req *req, *next;
LIST_HEAD(to_end);
+ unsigned int i;
+ /* Background queuing checks fc->connected under bg_lock */
+ spin_lock(&fc->bg_lock);
fc->connected = 0;
- fc->blocked = 0;
+ spin_unlock(&fc->bg_lock);
+
fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
@@ -2123,11 +2185,16 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
}
spin_unlock(&req->waitq.lock);
}
- list_splice_tail_init(&fpq->processing, &to_end);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_tail_init(&fpq->processing[i],
+ &to_end);
spin_unlock(&fpq->lock);
}
+ spin_lock(&fc->bg_lock);
+ fc->blocked = 0;
fc->max_background = UINT_MAX;
flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
@@ -2163,10 +2230,12 @@ int fuse_dev_release(struct inode *inode, struct file *file)
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
LIST_HEAD(to_end);
+ unsigned int i;
spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
- list_splice_init(&fpq->processing, &to_end);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
end_requests(fc, &to_end);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0979609d6eba..47395b0c3b35 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -14,24 +14,9 @@
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/iversion.h>
#include <linux/posix_acl.h>
-static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
-{
- struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_inode *fi = get_fuse_inode(dir);
-
- if (!fc->do_readdirplus)
- return false;
- if (!fc->readdirplus_auto)
- return true;
- if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
- return true;
- if (ctx->pos == 0)
- return true;
- return false;
-}
-
static void fuse_advise_use_readdirplus(struct inode *dir)
{
struct fuse_inode *fi = get_fuse_inode(dir);
@@ -80,8 +65,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
* Set dentry and possibly attribute timeouts from the lookup/mk*
* replies
*/
-static void fuse_change_entry_timeout(struct dentry *entry,
- struct fuse_entry_out *o)
+void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
{
fuse_dentry_settime(entry,
time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
@@ -92,18 +76,29 @@ static u64 attr_timeout(struct fuse_attr_out *o)
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
}
-static u64 entry_attr_timeout(struct fuse_entry_out *o)
+u64 entry_attr_timeout(struct fuse_entry_out *o)
{
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
}
+static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
+{
+ set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
+}
+
/*
* Mark the attributes as stale, so that at the next call to
* ->getattr() they will be fetched from userspace
*/
void fuse_invalidate_attr(struct inode *inode)
{
- get_fuse_inode(inode)->i_time = 0;
+ fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
+}
+
+static void fuse_dir_changed(struct inode *dir)
+{
+ fuse_invalidate_attr(dir);
+ inode_maybe_inc_iversion(dir, false);
}
/**
@@ -113,7 +108,7 @@ void fuse_invalidate_attr(struct inode *inode)
void fuse_invalidate_atime(struct inode *inode)
{
if (!IS_RDONLY(inode))
- fuse_invalidate_attr(inode);
+ fuse_invalidate_attr_mask(inode, STATX_ATIME);
}
/*
@@ -262,11 +257,6 @@ invalid:
goto out;
}
-static int invalid_nodeid(u64 nodeid)
-{
- return !nodeid || nodeid == FUSE_ROOT_ID;
-}
-
static int fuse_dentry_init(struct dentry *dentry)
{
dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL);
@@ -469,7 +459,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
kfree(forget);
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
err = finish_open(file, entry, generic_file_open);
if (err) {
fuse_sync_release(ff, flags);
@@ -583,7 +573,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
} else {
fuse_change_entry_timeout(entry, &outarg);
}
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
return 0;
out_put_forget_req:
@@ -693,7 +683,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
drop_nlink(inode);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
fuse_update_ctime(inode);
} else if (err == -EINTR)
@@ -715,7 +705,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(d_inode(entry));
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
} else if (err == -EINTR)
fuse_invalidate_entry(entry);
@@ -754,9 +744,9 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
fuse_update_ctime(d_inode(newent));
}
- fuse_invalidate_attr(olddir);
+ fuse_dir_changed(olddir);
if (olddir != newdir)
- fuse_invalidate_attr(newdir);
+ fuse_dir_changed(newdir);
/* newent will end up negative */
if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
@@ -932,7 +922,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
}
static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, unsigned int flags)
+ struct kstat *stat, u32 request_mask,
+ unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
@@ -942,6 +933,8 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
sync = true;
else if (flags & AT_STATX_DONT_SYNC)
sync = false;
+ else if (request_mask & READ_ONCE(fi->inval_mask))
+ sync = true;
else
sync = time_before64(fi->i_time, get_jiffies_64());
@@ -959,7 +952,9 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
int fuse_update_attributes(struct inode *inode, struct file *file)
{
- return fuse_update_get_attr(inode, file, NULL, 0);
+ /* Do *not* need to get atime for internal purposes */
+ return fuse_update_get_attr(inode, file, NULL,
+ STATX_BASIC_STATS & ~STATX_ATIME, 0);
}
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
@@ -989,7 +984,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
if (!entry)
goto unlock;
- fuse_invalidate_attr(parent);
+ fuse_dir_changed(parent);
fuse_invalidate_entry(entry);
if (child_nodeid != 0 && d_really_is_positive(entry)) {
@@ -1165,271 +1160,78 @@ static int fuse_permission(struct inode *inode, int mask)
return err;
}
-static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx)
-{
- while (nbytes >= FUSE_NAME_OFFSET) {
- struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
- size_t reclen = FUSE_DIRENT_SIZE(dirent);
- if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
- return -EIO;
- if (reclen > nbytes)
- break;
- if (memchr(dirent->name, '/', dirent->namelen) != NULL)
- return -EIO;
-
- if (!dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type))
- break;
-
- buf += reclen;
- nbytes -= reclen;
- ctx->pos = dirent->off;
- }
-
- return 0;
-}
-
-static int fuse_direntplus_link(struct file *file,
- struct fuse_direntplus *direntplus,
- u64 attr_version)
-{
- struct fuse_entry_out *o = &direntplus->entry_out;
- struct fuse_dirent *dirent = &direntplus->dirent;
- struct dentry *parent = file->f_path.dentry;
- struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
- struct dentry *dentry;
- struct dentry *alias;
- struct inode *dir = d_inode(parent);
- struct fuse_conn *fc;
- struct inode *inode;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
-
- if (!o->nodeid) {
- /*
- * Unlike in the case of fuse_lookup, zero nodeid does not mean
- * ENOENT. Instead, it only means the userspace filesystem did
- * not want to return attributes/handle for this entry.
- *
- * So do nothing.
- */
- return 0;
- }
-
- if (name.name[0] == '.') {
- /*
- * We could potentially refresh the attributes of the directory
- * and its parent?
- */
- if (name.len == 1)
- return 0;
- if (name.name[1] == '.' && name.len == 2)
- return 0;
- }
-
- if (invalid_nodeid(o->nodeid))
- return -EIO;
- if (!fuse_valid_type(o->attr.mode))
- return -EIO;
-
- fc = get_fuse_conn(dir);
-
- name.hash = full_name_hash(parent, name.name, name.len);
- dentry = d_lookup(parent, &name);
- if (!dentry) {
-retry:
- dentry = d_alloc_parallel(parent, &name, &wq);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- }
- if (!d_in_lookup(dentry)) {
- struct fuse_inode *fi;
- inode = d_inode(dentry);
- if (!inode ||
- get_node_id(inode) != o->nodeid ||
- ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
- d_invalidate(dentry);
- dput(dentry);
- goto retry;
- }
- if (is_bad_inode(inode)) {
- dput(dentry);
- return -EIO;
- }
-
- fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->nlookup++;
- spin_unlock(&fc->lock);
-
- forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &o->attr,
- entry_attr_timeout(o),
- attr_version);
- /*
- * The other branch comes via fuse_iget()
- * which bumps nlookup inside
- */
- } else {
- inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
- &o->attr, entry_attr_timeout(o),
- attr_version);
- if (!inode)
- inode = ERR_PTR(-ENOMEM);
-
- alias = d_splice_alias(inode, dentry);
- d_lookup_done(dentry);
- if (alias) {
- dput(dentry);
- dentry = alias;
- }
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- }
- if (fc->readdirplus_auto)
- set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
- fuse_change_entry_timeout(dentry, o);
-
- dput(dentry);
- return 0;
-}
-
-static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx, u64 attr_version)
+static int fuse_readlink_page(struct inode *inode, struct page *page)
{
- struct fuse_direntplus *direntplus;
- struct fuse_dirent *dirent;
- size_t reclen;
- int over = 0;
- int ret;
-
- while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
- direntplus = (struct fuse_direntplus *) buf;
- dirent = &direntplus->dirent;
- reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
-
- if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
- return -EIO;
- if (reclen > nbytes)
- break;
- if (memchr(dirent->name, '/', dirent->namelen) != NULL)
- return -EIO;
-
- if (!over) {
- /* We fill entries into dstbuf only as much as
- it can hold. But we still continue iterating
- over remaining entries to link them. If not,
- we need to send a FORGET for each of those
- which we did not link.
- */
- over = !dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type);
- if (!over)
- ctx->pos = dirent->off;
- }
-
- buf += reclen;
- nbytes -= reclen;
-
- ret = fuse_direntplus_link(file, direntplus, attr_version);
- if (ret)
- fuse_force_forget(file, direntplus->entry_out.nodeid);
- }
-
- return 0;
-}
-
-static int fuse_readdir(struct file *file, struct dir_context *ctx)
-{
- int plus, err;
- size_t nbytes;
- struct page *page;
- struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
- u64 attr_version = 0;
- bool locked;
-
- if (is_bad_inode(inode))
- return -EIO;
+ int err;
req = fuse_get_req(fc, 1);
if (IS_ERR(req))
return PTR_ERR(req);
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- fuse_put_request(fc, req);
- return -ENOMEM;
- }
-
- plus = fuse_use_readdirplus(inode, ctx);
+ req->out.page_zeroing = 1;
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE;
- if (plus) {
- attr_version = fuse_get_attr_version(fc);
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
- } else {
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
- }
- locked = fuse_lock_inode(inode);
+ req->page_descs[0].length = PAGE_SIZE - 1;
+ req->in.h.opcode = FUSE_READLINK;
+ req->in.h.nodeid = get_node_id(inode);
+ req->out.argvar = 1;
+ req->out.numargs = 1;
+ req->out.args[0].size = PAGE_SIZE - 1;
fuse_request_send(fc, req);
- fuse_unlock_inode(inode, locked);
- nbytes = req->out.args[0].size;
err = req->out.h.error;
- fuse_put_request(fc, req);
+
if (!err) {
- if (plus) {
- err = parse_dirplusfile(page_address(page), nbytes,
- file, ctx,
- attr_version);
- } else {
- err = parse_dirfile(page_address(page), nbytes, file,
- ctx);
- }
+ char *link = page_address(page);
+ size_t len = req->out.args[0].size;
+
+ BUG_ON(len >= PAGE_SIZE);
+ link[len] = '\0';
}
- __free_page(page);
+ fuse_put_request(fc, req);
fuse_invalidate_atime(inode);
+
return err;
}
-static const char *fuse_get_link(struct dentry *dentry,
- struct inode *inode,
- struct delayed_call *done)
+static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- char *link;
- ssize_t ret;
+ struct page *page;
+ int err;
+
+ err = -EIO;
+ if (is_bad_inode(inode))
+ goto out_err;
+ if (fc->cache_symlinks)
+ return page_get_link(dentry, inode, callback);
+
+ err = -ECHILD;
if (!dentry)
- return ERR_PTR(-ECHILD);
+ goto out_err;
- link = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!link)
- return ERR_PTR(-ENOMEM);
+ page = alloc_page(GFP_KERNEL);
+ err = -ENOMEM;
+ if (!page)
+ goto out_err;
- args.in.h.opcode = FUSE_READLINK;
- args.in.h.nodeid = get_node_id(inode);
- args.out.argvar = 1;
- args.out.numargs = 1;
- args.out.args[0].size = PAGE_SIZE - 1;
- args.out.args[0].value = link;
- ret = fuse_simple_request(fc, &args);
- if (ret < 0) {
- kfree(link);
- link = ERR_PTR(ret);
- } else {
- link[ret] = '\0';
- set_delayed_call(done, kfree_link, link);
+ err = fuse_readlink_page(inode, page);
+ if (err) {
+ __free_page(page);
+ goto out_err;
}
- fuse_invalidate_atime(inode);
- return link;
+
+ set_delayed_call(callback, page_put_link, page);
+
+ return page_address(page);
+
+out_err:
+ return ERR_PTR(err);
}
static int fuse_dir_open(struct inode *inode, struct file *file)
@@ -1662,8 +1464,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
file = NULL;
}
- if (attr->ia_valid & ATTR_SIZE)
+ if (attr->ia_valid & ATTR_SIZE) {
+ if (WARN_ON(!S_ISREG(inode->i_mode)))
+ return -EIO;
is_truncate = true;
+ }
if (is_truncate) {
fuse_set_nowrite(inode);
@@ -1811,7 +1616,7 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
if (!fuse_allow_current_process(fc))
return -EACCES;
- return fuse_update_get_attr(inode, NULL, stat, flags);
+ return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
@@ -1867,11 +1672,37 @@ void fuse_init_common(struct inode *inode)
void fuse_init_dir(struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
inode->i_op = &fuse_dir_inode_operations;
inode->i_fop = &fuse_dir_operations;
+
+ spin_lock_init(&fi->rdc.lock);
+ fi->rdc.cached = false;
+ fi->rdc.size = 0;
+ fi->rdc.pos = 0;
+ fi->rdc.version = 0;
}
+static int fuse_symlink_readpage(struct file *null, struct page *page)
+{
+ int err = fuse_readlink_page(page->mapping->host, page);
+
+ if (!err)
+ SetPageUptodate(page);
+
+ unlock_page(page);
+
+ return err;
+}
+
+static const struct address_space_operations fuse_symlink_aops = {
+ .readpage = fuse_symlink_readpage,
+};
+
void fuse_init_symlink(struct inode *inode)
{
inode->i_op = &fuse_symlink_inode_operations;
+ inode->i_data.a_ops = &fuse_symlink_aops;
+ inode_nohighmem(inode);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c9ccd45156dc..cc2121b37bf5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -59,6 +59,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
}
INIT_LIST_HEAD(&ff->write_entry);
+ mutex_init(&ff->readdir.lock);
refcount_set(&ff->count, 1);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
@@ -73,6 +74,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
void fuse_file_free(struct fuse_file *ff)
{
fuse_request_free(ff->reserved_req);
+ mutex_destroy(&ff->readdir.lock);
kfree(ff);
}
@@ -848,11 +850,11 @@ static int fuse_readpages_fill(void *_data, struct page *page)
fuse_wait_on_page_writeback(inode, page->index);
if (req->num_pages &&
- (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+ (req->num_pages == fc->max_pages ||
(req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
req->pages[req->num_pages - 1]->index + 1 != page->index)) {
- int nr_alloc = min_t(unsigned, data->nr_pages,
- FUSE_MAX_PAGES_PER_REQ);
+ unsigned int nr_alloc = min_t(unsigned int, data->nr_pages,
+ fc->max_pages);
fuse_send_readpages(req, data->file);
if (fc->async_read)
req = fuse_get_req_for_background(fc, nr_alloc);
@@ -887,7 +889,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
- int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
+ unsigned int nr_alloc = min_t(unsigned int, nr_pages, fc->max_pages);
err = -EIO;
if (is_bad_inode(inode))
@@ -1102,12 +1104,13 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
return count > 0 ? count : err;
}
-static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
+static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
+ unsigned int max_pages)
{
- return min_t(unsigned,
+ return min_t(unsigned int,
((pos + len - 1) >> PAGE_SHIFT) -
(pos >> PAGE_SHIFT) + 1,
- FUSE_MAX_PAGES_PER_REQ);
+ max_pages);
}
static ssize_t fuse_perform_write(struct kiocb *iocb,
@@ -1129,7 +1132,8 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
do {
struct fuse_req *req;
ssize_t count;
- unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
+ unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
+ fc->max_pages);
req = fuse_get_req(fc, nr_pages);
if (IS_ERR(req)) {
@@ -1319,11 +1323,6 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
return ret < 0 ? ret : 0;
}
-static inline int fuse_iter_npages(const struct iov_iter *ii_p)
-{
- return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
-}
-
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
@@ -1343,9 +1342,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int err = 0;
if (io->async)
- req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
+ req = fuse_get_req_for_background(fc, iov_iter_npages(iter,
+ fc->max_pages));
else
- req = fuse_get_req(fc, fuse_iter_npages(iter));
+ req = fuse_get_req(fc, iov_iter_npages(iter, fc->max_pages));
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1390,9 +1390,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
fuse_put_request(fc, req);
if (io->async)
req = fuse_get_req_for_background(fc,
- fuse_iter_npages(iter));
+ iov_iter_npages(iter, fc->max_pages));
else
- req = fuse_get_req(fc, fuse_iter_npages(iter));
+ req = fuse_get_req(fc, iov_iter_npages(iter,
+ fc->max_pages));
if (IS_ERR(req))
break;
}
@@ -1418,7 +1419,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
res = fuse_direct_io(io, iter, ppos, 0);
- fuse_invalidate_attr(inode);
+ fuse_invalidate_atime(inode);
return res;
}
@@ -1487,6 +1488,7 @@ __acquires(fc->lock)
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
__u64 data_size = req->num_pages * PAGE_SIZE;
+ bool queued;
if (!fc->connected)
goto out_free;
@@ -1502,7 +1504,8 @@ __acquires(fc->lock)
req->in.args[1].size = inarg->size;
fi->writectr++;
- fuse_request_send_background_locked(fc, req);
+ queued = fuse_request_queue_background(fc, req);
+ WARN_ON(!queued);
return;
out_free:
@@ -1819,12 +1822,18 @@ static int fuse_writepages_fill(struct page *page,
is_writeback = fuse_page_is_writeback(inode, page->index);
if (req && req->num_pages &&
- (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+ (is_writeback || req->num_pages == fc->max_pages ||
(req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
data->req = NULL;
+ } else if (req && req->num_pages == req->max_pages) {
+ if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+ fuse_writepages_send(data);
+ req = data->req = NULL;
+ }
}
+
err = -ENOMEM;
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
@@ -1847,7 +1856,7 @@ static int fuse_writepages_fill(struct page *page,
struct fuse_inode *fi = get_fuse_inode(inode);
err = -ENOMEM;
- req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+ req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
if (!req) {
__free_page(tmp_page);
goto out_unlock;
@@ -1904,6 +1913,7 @@ static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_wb_data data;
int err;
@@ -1916,7 +1926,7 @@ static int fuse_writepages(struct address_space *mapping,
data.ff = NULL;
err = -ENOMEM;
- data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+ data.orig_pages = kcalloc(fc->max_pages,
sizeof(struct page *),
GFP_NOFS);
if (!data.orig_pages)
@@ -2387,10 +2397,11 @@ static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
}
/* Make sure iov_length() won't overflow */
-static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov,
+ size_t count)
{
size_t n;
- u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+ u32 max = fc->max_pages << PAGE_SHIFT;
for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
@@ -2514,7 +2525,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
- pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
+ pages = kcalloc(fc->max_pages, sizeof(pages[0]), GFP_KERNEL);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
if (!pages || !iov_page)
goto out;
@@ -2553,7 +2564,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
/* make sure there are enough buffer pages and init request with them */
err = -ENOMEM;
- if (max_pages > FUSE_MAX_PAGES_PER_REQ)
+ if (max_pages > fc->max_pages)
goto out;
while (num_pages < max_pages) {
pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2640,11 +2651,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iov = iov_page;
out_iov = in_iov + in_iovs;
- err = fuse_verify_ioctl_iov(in_iov, in_iovs);
+ err = fuse_verify_ioctl_iov(fc, in_iov, in_iovs);
if (err)
goto out;
- err = fuse_verify_ioctl_iov(out_iov, out_iovs);
+ err = fuse_verify_ioctl_iov(fc, out_iov, out_iovs);
if (err)
goto out;
@@ -2835,9 +2846,9 @@ static void fuse_do_truncate(struct file *file)
fuse_do_setattr(file_dentry(file), &attr, file);
}
-static inline loff_t fuse_round_up(loff_t off)
+static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
{
- return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
+ return round_up(off, fc->max_pages << PAGE_SHIFT);
}
static ssize_t
@@ -2866,7 +2877,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
- iov_iter_truncate(iter, fuse_round_up(i_size - offset));
+ iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
count = iov_iter_count(iter);
}
@@ -3011,6 +3022,82 @@ out:
return err;
}
+static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ size_t len, unsigned int flags)
+{
+ struct fuse_file *ff_in = file_in->private_data;
+ struct fuse_file *ff_out = file_out->private_data;
+ struct inode *inode_out = file_inode(file_out);
+ struct fuse_inode *fi_out = get_fuse_inode(inode_out);
+ struct fuse_conn *fc = ff_in->fc;
+ FUSE_ARGS(args);
+ struct fuse_copy_file_range_in inarg = {
+ .fh_in = ff_in->fh,
+ .off_in = pos_in,
+ .nodeid_out = ff_out->nodeid,
+ .fh_out = ff_out->fh,
+ .off_out = pos_out,
+ .len = len,
+ .flags = flags
+ };
+ struct fuse_write_out outarg;
+ ssize_t err;
+ /* mark unstable when write-back is not used, and file_out gets
+ * extended */
+ bool is_unstable = (!fc->writeback_cache) &&
+ ((pos_out + len) > inode_out->i_size);
+
+ if (fc->no_copy_file_range)
+ return -EOPNOTSUPP;
+
+ inode_lock(inode_out);
+
+ if (fc->writeback_cache) {
+ err = filemap_write_and_wait_range(inode_out->i_mapping,
+ pos_out, pos_out + len);
+ if (err)
+ goto out;
+
+ fuse_sync_writes(inode_out);
+ }
+
+ if (is_unstable)
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
+
+ args.in.h.opcode = FUSE_COPY_FILE_RANGE;
+ args.in.h.nodeid = ff_in->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
+ if (err == -ENOSYS) {
+ fc->no_copy_file_range = 1;
+ err = -EOPNOTSUPP;
+ }
+ if (err)
+ goto out;
+
+ if (fc->writeback_cache) {
+ fuse_write_update_size(inode_out, pos_out + outarg.size);
+ file_update_time(file_out);
+ }
+
+ fuse_invalidate_attr(inode_out);
+
+ err = outarg.size;
+out:
+ if (is_unstable)
+ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
+
+ inode_unlock(inode_out);
+
+ return err;
+}
+
static const struct file_operations fuse_file_operations = {
.llseek = fuse_file_llseek,
.read_iter = fuse_file_read_iter,
@@ -3027,6 +3114,7 @@ static const struct file_operations fuse_file_operations = {
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
.fallocate = fuse_file_fallocate,
+ .copy_file_range = fuse_copy_file_range,
};
static const struct file_operations fuse_direct_io_file_operations = {
@@ -3062,6 +3150,14 @@ static const struct address_space_operations fuse_file_aops = {
void fuse_init_file_inode(struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
+
+ INIT_LIST_HEAD(&fi->write_files);
+ INIT_LIST_HEAD(&fi->queued_writes);
+ fi->writectr = 0;
+ init_waitqueue_head(&fi->page_waitq);
+ INIT_LIST_HEAD(&fi->writepages);
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f78e9614bb5f..e9f712e81c7d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -28,8 +28,11 @@
#include <linux/refcount.h>
#include <linux/user_namespace.h>
-/** Max number of pages that can be used in a single read request */
-#define FUSE_MAX_PAGES_PER_REQ 32
+/** Default max number of pages that can be used in a single read request */
+#define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32
+
+/** Maximum of max_pages received in init_out */
+#define FUSE_MAX_MAX_PAGES 256
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
@@ -77,6 +80,9 @@ struct fuse_inode {
/** Time in jiffies until the file attributes are valid */
u64 i_time;
+ /* Which attributes are invalid */
+ u32 inval_mask;
+
/** The sticky bit in inode->i_mode may have been removed, so
preserve the original mode */
umode_t orig_i_mode;
@@ -87,21 +93,51 @@ struct fuse_inode {
/** Version of last attribute change */
u64 attr_version;
- /** Files usable in writepage. Protected by fc->lock */
- struct list_head write_files;
+ union {
+ /* Write related fields (regular file only) */
+ struct {
+ /* Files usable in writepage. Protected by fc->lock */
+ struct list_head write_files;
+
+ /* Writepages pending on truncate or fsync */
+ struct list_head queued_writes;
- /** Writepages pending on truncate or fsync */
- struct list_head queued_writes;
+ /* Number of sent writes, a negative bias
+ * (FUSE_NOWRITE) means more writes are blocked */
+ int writectr;
+
+ /* Waitq for writepage completion */
+ wait_queue_head_t page_waitq;
+
+ /* List of writepage requestst (pending or sent) */
+ struct list_head writepages;
+ };
+
+ /* readdir cache (directory only) */
+ struct {
+ /* true if fully cached */
+ bool cached;
- /** Number of sent writes, a negative bias (FUSE_NOWRITE)
- * means more writes are blocked */
- int writectr;
+ /* size of cache */
+ loff_t size;
- /** Waitq for writepage completion */
- wait_queue_head_t page_waitq;
+ /* position at end of cache (position of next entry) */
+ loff_t pos;
- /** List of writepage requestst (pending or sent) */
- struct list_head writepages;
+ /* version of the cache */
+ u64 version;
+
+ /* modification time of directory when cache was
+ * started */
+ struct timespec64 mtime;
+
+ /* iversion of directory when cache was started */
+ u64 iversion;
+
+ /* protects above fields */
+ spinlock_t lock;
+ } rdc;
+ };
/** Miscellaneous bits describing inode state */
unsigned long state;
@@ -148,6 +184,25 @@ struct fuse_file {
/** Entry on inode's write_files list */
struct list_head write_entry;
+ /* Readdir related */
+ struct {
+ /*
+ * Protects below fields against (crazy) parallel readdir on
+ * same open file. Uncontended in the normal case.
+ */
+ struct mutex lock;
+
+ /* Dir stream position */
+ loff_t pos;
+
+ /* Offset in cache */
+ loff_t cache_off;
+
+ /* Version of cache we are reading */
+ u64 version;
+
+ } readdir;
+
/** RB node to be linked on fuse_conn->polled_files */
struct rb_node polled_node;
@@ -311,9 +366,6 @@ struct fuse_req {
/** refcount */
refcount_t count;
- /** Unique ID for the interrupt request */
- u64 intr_unique;
-
/* Request flags, updated with test/set/clear_bit() */
unsigned long flags;
@@ -411,6 +463,9 @@ struct fuse_iqueue {
struct fasync_struct *fasync;
};
+#define FUSE_PQ_HASH_BITS 8
+#define FUSE_PQ_HASH_SIZE (1 << FUSE_PQ_HASH_BITS)
+
struct fuse_pqueue {
/** Connection established */
unsigned connected;
@@ -418,8 +473,8 @@ struct fuse_pqueue {
/** Lock protecting accessess to members of this structure */
spinlock_t lock;
- /** The list of requests being processed */
- struct list_head processing;
+ /** Hash table of requests being processed */
+ struct list_head *processing;
/** The list of requests under I/O */
struct list_head io;
@@ -476,6 +531,9 @@ struct fuse_conn {
/** Maximum write size */
unsigned max_write;
+ /** Maxmum number of pages that can be used in a single request */
+ unsigned int max_pages;
+
/** Input queue */
struct fuse_iqueue iq;
@@ -500,6 +558,10 @@ struct fuse_conn {
/** The list of background requests set aside for later queuing */
struct list_head bg_queue;
+ /** Protects: max_background, congestion_threshold, num_background,
+ * active_background, bg_queue, blocked */
+ spinlock_t bg_lock;
+
/** Flag indicating that INIT reply has been received. Allocating
* any fuse request will be suspended until the flag is set */
int initialized;
@@ -551,6 +613,9 @@ struct fuse_conn {
/** handle fs handles killing suid/sgid/cap on write/chown/trunc */
unsigned handle_killpriv:1;
+ /** cache READLINK responses in page cache */
+ unsigned cache_symlinks:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
@@ -637,6 +702,9 @@ struct fuse_conn {
/** Allow other than the mounter user to access the filesystem ? */
unsigned allow_other:1;
+ /** Does the filesystem support copy_file_range? */
+ unsigned no_copy_file_range:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -697,6 +765,11 @@ static inline u64 get_node_id(struct inode *inode)
return get_fuse_inode(inode)->nodeid;
}
+static inline int invalid_nodeid(u64 nodeid)
+{
+ return !nodeid || nodeid == FUSE_ROOT_ID;
+}
+
/** Device operations */
extern const struct file_operations fuse_dev_operations;
@@ -812,6 +885,10 @@ struct fuse_req *fuse_request_alloc(unsigned npages);
struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags);
+
+
/**
* Free a request
*/
@@ -856,9 +933,7 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
* Send a request in the background
*/
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
-
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req);
+bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
@@ -873,6 +948,9 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
void fuse_invalidate_atime(struct inode *inode);
+u64 entry_attr_timeout(struct fuse_entry_out *o);
+void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
+
/**
* Acquire reference to fuse_conn
*/
@@ -992,4 +1070,8 @@ struct posix_acl;
struct posix_acl *fuse_get_acl(struct inode *inode, int type);
int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+
+/* readdir.c */
+int fuse_readdir(struct file *file, struct dir_context *ctx);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index db9e60b7eb69..0b94b23b02d4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -90,16 +90,12 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
fi = get_fuse_inode(inode);
fi->i_time = 0;
+ fi->inval_mask = 0;
fi->nodeid = 0;
fi->nlookup = 0;
fi->attr_version = 0;
- fi->writectr = 0;
fi->orig_ino = 0;
fi->state = 0;
- INIT_LIST_HEAD(&fi->write_files);
- INIT_LIST_HEAD(&fi->queued_writes);
- INIT_LIST_HEAD(&fi->writepages);
- init_waitqueue_head(&fi->page_waitq);
mutex_init(&fi->mutex);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
@@ -119,8 +115,10 @@ static void fuse_i_callback(struct rcu_head *head)
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
- BUG_ON(!list_empty(&fi->write_files));
- BUG_ON(!list_empty(&fi->queued_writes));
+ if (S_ISREG(inode->i_mode)) {
+ WARN_ON(!list_empty(&fi->write_files));
+ WARN_ON(!list_empty(&fi->queued_writes));
+ }
mutex_destroy(&fi->mutex);
kfree(fi->forget);
call_rcu(&inode->i_rcu, fuse_i_callback);
@@ -167,6 +165,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
fi->attr_version = ++fc->attr_version;
fi->i_time = attr_valid;
+ WRITE_ONCE(fi->inval_mask, 0);
inode->i_ino = fuse_squash_ino(attr->ino);
inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -594,9 +593,11 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq)
static void fuse_pqueue_init(struct fuse_pqueue *fpq)
{
- memset(fpq, 0, sizeof(struct fuse_pqueue));
+ unsigned int i;
+
spin_lock_init(&fpq->lock);
- INIT_LIST_HEAD(&fpq->processing);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&fpq->processing[i]);
INIT_LIST_HEAD(&fpq->io);
fpq->connected = 1;
}
@@ -605,6 +606,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
+ spin_lock_init(&fc->bg_lock);
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
@@ -852,6 +854,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
+ spin_lock(&fc->bg_lock);
if (arg->max_background) {
fc->max_background = arg->max_background;
@@ -865,6 +868,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
fc->congestion_threshold > max_user_congthresh)
fc->congestion_threshold = max_user_congthresh;
}
+ spin_unlock(&fc->bg_lock);
}
static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
@@ -924,8 +928,15 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->posix_acl = 1;
fc->sb->s_xattr = fuse_acl_xattr_handlers;
}
+ if (arg->flags & FUSE_CACHE_SYMLINKS)
+ fc->cache_symlinks = 1;
if (arg->flags & FUSE_ABORT_ERROR)
fc->abort_err = 1;
+ if (arg->flags & FUSE_MAX_PAGES) {
+ fc->max_pages =
+ min_t(unsigned int, FUSE_MAX_MAX_PAGES,
+ max_t(unsigned int, arg->max_pages, 1));
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -957,7 +968,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
- FUSE_ABORT_ERROR;
+ FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1022,17 +1033,26 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
{
struct fuse_dev *fud;
+ struct list_head *pq;
fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
- if (fud) {
- fud->fc = fuse_conn_get(fc);
- fuse_pqueue_init(&fud->pq);
+ if (!fud)
+ return NULL;
- spin_lock(&fc->lock);
- list_add_tail(&fud->entry, &fc->devices);
- spin_unlock(&fc->lock);
+ pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
+ if (!pq) {
+ kfree(fud);
+ return NULL;
}
+ fud->pq.processing = pq;
+ fud->fc = fuse_conn_get(fc);
+ fuse_pqueue_init(&fud->pq);
+
+ spin_lock(&fc->lock);
+ list_add_tail(&fud->entry, &fc->devices);
+ spin_unlock(&fc->lock);
+
return fud;
}
EXPORT_SYMBOL_GPL(fuse_dev_alloc);
@@ -1141,6 +1161,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->user_id = d.user_id;
fc->group_id = d.group_id;
fc->max_read = max_t(unsigned, 4096, d.max_read);
+ fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
/* Used by get_root_inode() */
sb->s_fs_info = fc;
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
new file mode 100644
index 000000000000..ab18b78f4755
--- /dev/null
+++ b/fs/fuse/readdir.c
@@ -0,0 +1,569 @@
+/*
+ FUSE: Filesystem in Userspace
+ Copyright (C) 2001-2018 Miklos Szeredi <miklos@szeredi.hu>
+
+ This program can be distributed under the terms of the GNU GPL.
+ See the file COPYING.
+*/
+
+
+#include "fuse_i.h"
+#include <linux/iversion.h>
+#include <linux/posix_acl.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+
+static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
+{
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ struct fuse_inode *fi = get_fuse_inode(dir);
+
+ if (!fc->do_readdirplus)
+ return false;
+ if (!fc->readdirplus_auto)
+ return true;
+ if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
+ return true;
+ if (ctx->pos == 0)
+ return true;
+ return false;
+}
+
+static void fuse_add_dirent_to_cache(struct file *file,
+ struct fuse_dirent *dirent, loff_t pos)
+{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ pgoff_t index;
+ struct page *page;
+ loff_t size;
+ u64 version;
+ unsigned int offset;
+ void *addr;
+
+ spin_lock(&fi->rdc.lock);
+ /*
+ * Is cache already completed? Or this entry does not go at the end of
+ * cache?
+ */
+ if (fi->rdc.cached || pos != fi->rdc.pos) {
+ spin_unlock(&fi->rdc.lock);
+ return;
+ }
+ version = fi->rdc.version;
+ size = fi->rdc.size;
+ offset = size & ~PAGE_MASK;
+ index = size >> PAGE_SHIFT;
+ /* Dirent doesn't fit in current page? Jump to next page. */
+ if (offset + reclen > PAGE_SIZE) {
+ index++;
+ offset = 0;
+ }
+ spin_unlock(&fi->rdc.lock);
+
+ if (offset) {
+ page = find_lock_page(file->f_mapping, index);
+ } else {
+ page = find_or_create_page(file->f_mapping, index,
+ mapping_gfp_mask(file->f_mapping));
+ }
+ if (!page)
+ return;
+
+ spin_lock(&fi->rdc.lock);
+ /* Raced with another readdir */
+ if (fi->rdc.version != version || fi->rdc.size != size ||
+ WARN_ON(fi->rdc.pos != pos))
+ goto unlock;
+
+ addr = kmap_atomic(page);
+ if (!offset)
+ clear_page(addr);
+ memcpy(addr + offset, dirent, reclen);
+ kunmap_atomic(addr);
+ fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen;
+ fi->rdc.pos = dirent->off;
+unlock:
+ spin_unlock(&fi->rdc.lock);
+ unlock_page(page);
+ put_page(page);
+}
+
+static void fuse_readdir_cache_end(struct file *file, loff_t pos)
+{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
+ loff_t end;
+
+ spin_lock(&fi->rdc.lock);
+ /* does cache end position match current position? */
+ if (fi->rdc.pos != pos) {
+ spin_unlock(&fi->rdc.lock);
+ return;
+ }
+
+ fi->rdc.cached = true;
+ end = ALIGN(fi->rdc.size, PAGE_SIZE);
+ spin_unlock(&fi->rdc.lock);
+
+ /* truncate unused tail of cache */
+ truncate_inode_pages(file->f_mapping, end);
+}
+
+static bool fuse_emit(struct file *file, struct dir_context *ctx,
+ struct fuse_dirent *dirent)
+{
+ struct fuse_file *ff = file->private_data;
+
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ fuse_add_dirent_to_cache(file, dirent, ctx->pos);
+
+ return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
+ dirent->type);
+}
+
+static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
+ struct dir_context *ctx)
+{
+ while (nbytes >= FUSE_NAME_OFFSET) {
+ struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
+ return -EIO;
+ if (reclen > nbytes)
+ break;
+ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+ return -EIO;
+
+ if (!fuse_emit(file, ctx, dirent))
+ break;
+
+ buf += reclen;
+ nbytes -= reclen;
+ ctx->pos = dirent->off;
+ }
+
+ return 0;
+}
+
+static int fuse_direntplus_link(struct file *file,
+ struct fuse_direntplus *direntplus,
+ u64 attr_version)
+{
+ struct fuse_entry_out *o = &direntplus->entry_out;
+ struct fuse_dirent *dirent = &direntplus->dirent;
+ struct dentry *parent = file->f_path.dentry;
+ struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = d_inode(parent);
+ struct fuse_conn *fc;
+ struct inode *inode;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (!o->nodeid) {
+ /*
+ * Unlike in the case of fuse_lookup, zero nodeid does not mean
+ * ENOENT. Instead, it only means the userspace filesystem did
+ * not want to return attributes/handle for this entry.
+ *
+ * So do nothing.
+ */
+ return 0;
+ }
+
+ if (name.name[0] == '.') {
+ /*
+ * We could potentially refresh the attributes of the directory
+ * and its parent?
+ */
+ if (name.len == 1)
+ return 0;
+ if (name.name[1] == '.' && name.len == 2)
+ return 0;
+ }
+
+ if (invalid_nodeid(o->nodeid))
+ return -EIO;
+ if (!fuse_valid_type(o->attr.mode))
+ return -EIO;
+
+ fc = get_fuse_conn(dir);
+
+ name.hash = full_name_hash(parent, name.name, name.len);
+ dentry = d_lookup(parent, &name);
+ if (!dentry) {
+retry:
+ dentry = d_alloc_parallel(parent, &name, &wq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+ if (!d_in_lookup(dentry)) {
+ struct fuse_inode *fi;
+ inode = d_inode(dentry);
+ if (!inode ||
+ get_node_id(inode) != o->nodeid ||
+ ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
+ d_invalidate(dentry);
+ dput(dentry);
+ goto retry;
+ }
+ if (is_bad_inode(inode)) {
+ dput(dentry);
+ return -EIO;
+ }
+
+ fi = get_fuse_inode(inode);
+ spin_lock(&fc->lock);
+ fi->nlookup++;
+ spin_unlock(&fc->lock);
+
+ forget_all_cached_acls(inode);
+ fuse_change_attributes(inode, &o->attr,
+ entry_attr_timeout(o),
+ attr_version);
+ /*
+ * The other branch comes via fuse_iget()
+ * which bumps nlookup inside
+ */
+ } else {
+ inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
+ &o->attr, entry_attr_timeout(o),
+ attr_version);
+ if (!inode)
+ inode = ERR_PTR(-ENOMEM);
+
+ alias = d_splice_alias(inode, dentry);
+ d_lookup_done(dentry);
+ if (alias) {
+ dput(dentry);
+ dentry = alias;
+ }
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+ if (fc->readdirplus_auto)
+ set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+ fuse_change_entry_timeout(dentry, o);
+
+ dput(dentry);
+ return 0;
+}
+
+static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
+ struct dir_context *ctx, u64 attr_version)
+{
+ struct fuse_direntplus *direntplus;
+ struct fuse_dirent *dirent;
+ size_t reclen;
+ int over = 0;
+ int ret;
+
+ while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
+ direntplus = (struct fuse_direntplus *) buf;
+ dirent = &direntplus->dirent;
+ reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
+
+ if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
+ return -EIO;
+ if (reclen > nbytes)
+ break;
+ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+ return -EIO;
+
+ if (!over) {
+ /* We fill entries into dstbuf only as much as
+ it can hold. But we still continue iterating
+ over remaining entries to link them. If not,
+ we need to send a FORGET for each of those
+ which we did not link.
+ */
+ over = !fuse_emit(file, ctx, dirent);
+ if (!over)
+ ctx->pos = dirent->off;
+ }
+
+ buf += reclen;
+ nbytes -= reclen;
+
+ ret = fuse_direntplus_link(file, direntplus, attr_version);
+ if (ret)
+ fuse_force_forget(file, direntplus->entry_out.nodeid);
+ }
+
+ return 0;
+}
+
+static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
+{
+ int plus, err;
+ size_t nbytes;
+ struct page *page;
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_req *req;
+ u64 attr_version = 0;
+ bool locked;
+
+ req = fuse_get_req(fc, 1);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ fuse_put_request(fc, req);
+ return -ENOMEM;
+ }
+
+ plus = fuse_use_readdirplus(inode, ctx);
+ req->out.argpages = 1;
+ req->num_pages = 1;
+ req->pages[0] = page;
+ req->page_descs[0].length = PAGE_SIZE;
+ if (plus) {
+ attr_version = fuse_get_attr_version(fc);
+ fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIRPLUS);
+ } else {
+ fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIR);
+ }
+ locked = fuse_lock_inode(inode);
+ fuse_request_send(fc, req);
+ fuse_unlock_inode(inode, locked);
+ nbytes = req->out.args[0].size;
+ err = req->out.h.error;
+ fuse_put_request(fc, req);
+ if (!err) {
+ if (!nbytes) {
+ struct fuse_file *ff = file->private_data;
+
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ fuse_readdir_cache_end(file, ctx->pos);
+ } else if (plus) {
+ err = parse_dirplusfile(page_address(page), nbytes,
+ file, ctx, attr_version);
+ } else {
+ err = parse_dirfile(page_address(page), nbytes, file,
+ ctx);
+ }
+ }
+
+ __free_page(page);
+ fuse_invalidate_atime(inode);
+ return err;
+}
+
+enum fuse_parse_result {
+ FOUND_ERR = -1,
+ FOUND_NONE = 0,
+ FOUND_SOME,
+ FOUND_ALL,
+};
+
+static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
+ void *addr, unsigned int size,
+ struct dir_context *ctx)
+{
+ unsigned int offset = ff->readdir.cache_off & ~PAGE_MASK;
+ enum fuse_parse_result res = FOUND_NONE;
+
+ WARN_ON(offset >= size);
+
+ for (;;) {
+ struct fuse_dirent *dirent = addr + offset;
+ unsigned int nbytes = size - offset;
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+
+ if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
+ break;
+
+ if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
+ return FOUND_ERR;
+ if (WARN_ON(reclen > nbytes))
+ return FOUND_ERR;
+ if (WARN_ON(memchr(dirent->name, '/', dirent->namelen) != NULL))
+ return FOUND_ERR;
+
+ if (ff->readdir.pos == ctx->pos) {
+ res = FOUND_SOME;
+ if (!dir_emit(ctx, dirent->name, dirent->namelen,
+ dirent->ino, dirent->type))
+ return FOUND_ALL;
+ ctx->pos = dirent->off;
+ }
+ ff->readdir.pos = dirent->off;
+ ff->readdir.cache_off += reclen;
+
+ offset += reclen;
+ }
+
+ return res;
+}
+
+static void fuse_rdc_reset(struct inode *inode)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ fi->rdc.cached = false;
+ fi->rdc.version++;
+ fi->rdc.size = 0;
+ fi->rdc.pos = 0;
+}
+
+#define UNCACHED 1
+
+static int fuse_readdir_cached(struct file *file, struct dir_context *ctx)
+{
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ enum fuse_parse_result res;
+ pgoff_t index;
+ unsigned int size;
+ struct page *page;
+ void *addr;
+
+ /* Seeked? If so, reset the cache stream */
+ if (ff->readdir.pos != ctx->pos) {
+ ff->readdir.pos = 0;
+ ff->readdir.cache_off = 0;
+ }
+
+ /*
+ * We're just about to start reading into the cache or reading the
+ * cache; both cases require an up-to-date mtime value.
+ */
+ if (!ctx->pos && fc->auto_inval_data) {
+ int err = fuse_update_attributes(inode, file);
+
+ if (err)
+ return err;
+ }
+
+retry:
+ spin_lock(&fi->rdc.lock);
+retry_locked:
+ if (!fi->rdc.cached) {
+ /* Starting cache? Set cache mtime. */
+ if (!ctx->pos && !fi->rdc.size) {
+ fi->rdc.mtime = inode->i_mtime;
+ fi->rdc.iversion = inode_query_iversion(inode);
+ }
+ spin_unlock(&fi->rdc.lock);
+ return UNCACHED;
+ }
+ /*
+ * When at the beginning of the directory (i.e. just after opendir(3) or
+ * rewinddir(3)), then need to check whether directory contents have
+ * changed, and reset the cache if so.
+ */
+ if (!ctx->pos) {
+ if (inode_peek_iversion(inode) != fi->rdc.iversion ||
+ !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) {
+ fuse_rdc_reset(inode);
+ goto retry_locked;
+ }
+ }
+
+ /*
+ * If cache version changed since the last getdents() call, then reset
+ * the cache stream.
+ */
+ if (ff->readdir.version != fi->rdc.version) {
+ ff->readdir.pos = 0;
+ ff->readdir.cache_off = 0;
+ }
+ /*
+ * If at the beginning of the cache, than reset version to
+ * current.
+ */
+ if (ff->readdir.pos == 0)
+ ff->readdir.version = fi->rdc.version;
+
+ WARN_ON(fi->rdc.size < ff->readdir.cache_off);
+
+ index = ff->readdir.cache_off >> PAGE_SHIFT;
+
+ if (index == (fi->rdc.size >> PAGE_SHIFT))
+ size = fi->rdc.size & ~PAGE_MASK;
+ else
+ size = PAGE_SIZE;
+ spin_unlock(&fi->rdc.lock);
+
+ /* EOF? */
+ if ((ff->readdir.cache_off & ~PAGE_MASK) == size)
+ return 0;
+
+ page = find_get_page_flags(file->f_mapping, index,
+ FGP_ACCESSED | FGP_LOCK);
+ spin_lock(&fi->rdc.lock);
+ if (!page) {
+ /*
+ * Uh-oh: page gone missing, cache is useless
+ */
+ if (fi->rdc.version == ff->readdir.version)
+ fuse_rdc_reset(inode);
+ goto retry_locked;
+ }
+
+ /* Make sure it's still the same version after getting the page. */
+ if (ff->readdir.version != fi->rdc.version) {
+ spin_unlock(&fi->rdc.lock);
+ unlock_page(page);
+ put_page(page);
+ goto retry;
+ }
+ spin_unlock(&fi->rdc.lock);
+
+ /*
+ * Contents of the page are now protected against changing by holding
+ * the page lock.
+ */
+ addr = kmap(page);
+ res = fuse_parse_cache(ff, addr, size, ctx);
+ kunmap(page);
+ unlock_page(page);
+ put_page(page);
+
+ if (res == FOUND_ERR)
+ return -EIO;
+
+ if (res == FOUND_ALL)
+ return 0;
+
+ if (size == PAGE_SIZE) {
+ /* We hit end of page: skip to next page. */
+ ff->readdir.cache_off = ALIGN(ff->readdir.cache_off, PAGE_SIZE);
+ goto retry;
+ }
+
+ /*
+ * End of cache reached. If found position, then we are done, otherwise
+ * need to fall back to uncached, since the position we were looking for
+ * wasn't in the cache.
+ */
+ return res == FOUND_SOME ? 0 : UNCACHED;
+}
+
+int fuse_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file_inode(file);
+ int err;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ mutex_lock(&ff->readdir.lock);
+
+ err = UNCACHED;
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ err = fuse_readdir_cached(file, ctx);
+ if (err == UNCACHED)
+ err = fuse_readdir_uncached(file, ctx);
+
+ mutex_unlock(&ff->readdir.lock);
+
+ return err;
+}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 31e8270d0b26..8afbb35559b9 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -366,7 +366,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
pgoff_t done_index;
int cycled;
int range_whole = 0;
- int tag;
+ xa_mark_t tag;
pagevec_init(&pvec);
if (wbc->range_cyclic) {
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 84544a4f012d..a683d9b27d76 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1566,7 +1566,7 @@ more_rgrps:
continue;
}
if (bstart) {
- __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
(*btotal) += blen;
gfs2_add_inode_blocks(&ip->i_inode, -blen);
}
@@ -1574,7 +1574,7 @@ more_rgrps:
blen = 1;
}
if (bstart) {
- __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
(*btotal) += blen;
gfs2_add_inode_blocks(&ip->i_inode, -blen);
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index e37002560c11..daa14ab4e31b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -506,7 +506,8 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
* For now the most important thing is to check that the various sizes
* are correct.
*/
-static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
+static int gfs2_check_dirent(struct gfs2_sbd *sdp,
+ struct gfs2_dirent *dent, unsigned int offset,
unsigned int size, unsigned int len, int first)
{
const char *msg = "gfs2_dirent too small";
@@ -528,12 +529,12 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
goto error;
return 0;
error:
- pr_warn("%s: %s (%s)\n",
+ fs_warn(sdp, "%s: %s (%s)\n",
__func__, msg, first ? "first in block" : "not first in block");
return -EIO;
}
-static int gfs2_dirent_offset(const void *buf)
+static int gfs2_dirent_offset(struct gfs2_sbd *sdp, const void *buf)
{
const struct gfs2_meta_header *h = buf;
int offset;
@@ -552,7 +553,8 @@ static int gfs2_dirent_offset(const void *buf)
}
return offset;
wrong_type:
- pr_warn("%s: wrong block type %u\n", __func__, be32_to_cpu(h->mh_type));
+ fs_warn(sdp, "%s: wrong block type %u\n", __func__,
+ be32_to_cpu(h->mh_type));
return -1;
}
@@ -566,7 +568,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
unsigned size;
int ret = 0;
- ret = gfs2_dirent_offset(buf);
+ ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
if (ret < 0)
goto consist_inode;
@@ -574,7 +576,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(dent, offset, size, len, 1))
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
goto consist_inode;
do {
ret = scan(dent, name, opaque);
@@ -586,7 +588,8 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
prev = dent;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(dent, offset, size, len, 0))
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
+ len, 0))
goto consist_inode;
} while(1);
@@ -1043,7 +1046,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
- pr_warn("i_depth %u lf_depth %u index %u\n",
+ fs_warn(GFS2_SB(inode), "i_depth %u lf_depth %u index %u\n",
dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
gfs2_consist_inode(dip);
error = -EIO;
@@ -1351,7 +1354,7 @@ static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh,
if (!sdp->sd_args.ar_loccookie)
continue;
offset = (char *)(darr[i]) -
- (bh->b_data + gfs2_dirent_offset(bh->b_data));
+ (bh->b_data + gfs2_dirent_offset(sdp, bh->b_data));
offset /= GFS2_MIN_DIRENT_SIZE;
offset += leaf_nr * sdp->sd_max_dents_per_leaf;
if (offset >= GFS2_USE_HASH_FLAG ||
@@ -2018,7 +2021,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++;
}
- gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
+ gfs2_rlist_alloc(&rlist);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
@@ -2039,6 +2042,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
+ struct gfs2_rgrpd *rgd;
+
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
@@ -2049,7 +2054,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (blk != leaf_no)
brelse(bh);
- gfs2_free_meta(dip, blk, 1);
+ rgd = gfs2_blk2rgrpd(sdp, blk, true);
+ gfs2_free_meta(dip, rgd, blk, 1);
gfs2_add_inode_blocks(&dip->i_inode, -1);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 08369c6cd127..45a17b770d97 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -314,6 +314,17 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
return do_gfs2_set_flags(filp, gfsflags, mask);
}
+static int gfs2_getlabel(struct file *filp, char __user *label)
+{
+ struct inode *inode = file_inode(filp);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+
+ if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
+ return -EFAULT;
+
+ return 0;
+}
+
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
@@ -323,7 +334,10 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return gfs2_set_flags(filp, (u32 __user *)arg);
case FITRIM:
return gfs2_fitrim(filp, (void __user *)arg);
+ case FS_IOC_GETFSLABEL:
+ return gfs2_getlabel(filp, (char __user *)arg);
}
+
return -ENOTTY;
}
@@ -347,8 +361,8 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
int hint = min_t(size_t, INT_MAX, blks);
- if (hint > atomic_read(&ip->i_res.rs_sizehint))
- atomic_set(&ip->i_res.rs_sizehint, hint);
+ if (hint > atomic_read(&ip->i_sizehint))
+ atomic_set(&ip->i_sizehint, hint);
}
/**
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4614ee25f621..05431324b262 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -494,7 +494,8 @@ retry:
do_xmote(gl, gh, LM_ST_UNLOCKED);
break;
default: /* Everything else */
- pr_err("wanted %u got %u\n", gl->gl_target, state);
+ fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
+ gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_lockref.lock);
@@ -577,7 +578,7 @@ __acquires(&gl->gl_lockref.lock)
gfs2_glock_queue_work(gl, 0);
}
else if (ret) {
- pr_err("lm_lock ret %d\n", ret);
+ fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
&sdp->sd_flags));
}
@@ -1064,13 +1065,13 @@ do_cancel:
return;
trap_recursive:
- pr_err("original: %pSR\n", (void *)gh2->gh_ip);
- pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
- pr_err("lock type: %d req lock state : %d\n",
+ fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
+ fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
- pr_err("new: %pSR\n", (void *)gh->gh_ip);
- pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
- pr_err("lock type: %d req lock state : %d\n",
+ fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
+ fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
gh->gh_gl->gl_name.ln_type, gh->gh_state);
gfs2_dump_glock(NULL, gl);
BUG();
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index b96d39c28e17..888b62cfd6d1 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -92,7 +92,7 @@ struct gfs2_bitmap {
unsigned long bi_flags;
u32 bi_offset;
u32 bi_start;
- u32 bi_len;
+ u32 bi_bytes;
u32 bi_blocks;
};
@@ -309,10 +309,6 @@ struct gfs2_qadata { /* quota allocation data */
*/
struct gfs2_blkreserv {
- /* components used during write (step 1): */
- atomic_t rs_sizehint; /* hint of the write size */
-
- struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
struct rb_node rs_node; /* link to other block reservations */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
@@ -417,8 +413,10 @@ struct gfs2_inode {
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_qadata *i_qadata; /* quota allocation data */
+ struct gfs2_holder i_rgd_gh;
struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
u64 i_goal; /* goal block for allocations */
+ atomic_t i_sizehint; /* hint of the write size */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
struct list_head i_trunc_list;
@@ -623,6 +621,7 @@ enum {
SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9,
+ SDF_AIL1_IO_ERROR = 10,
};
enum gfs2_freeze_state {
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index ac7caa267ed6..31df26ed7854 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -177,14 +177,14 @@ static void gdlm_bast(void *arg, int mode)
gfs2_glock_cb(gl, LM_ST_SHARED);
break;
default:
- pr_err("unknown bast mode %d\n", mode);
+ fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
BUG();
}
}
/* convert gfs lock-state to dlm lock-mode */
-static int make_mode(const unsigned int lmstate)
+static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
{
switch (lmstate) {
case LM_ST_UNLOCKED:
@@ -196,7 +196,7 @@ static int make_mode(const unsigned int lmstate)
case LM_ST_SHARED:
return DLM_LOCK_PR;
}
- pr_err("unknown LM state %d\n", lmstate);
+ fs_err(sdp, "unknown LM state %d\n", lmstate);
BUG();
return -1;
}
@@ -257,7 +257,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
u32 lkf;
char strname[GDLM_STRNAME_BYTES] = "";
- req = make_mode(req_state);
+ req = make_mode(gl->gl_name.ln_sbd, req_state);
lkf = make_flags(gl, flags, req);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -309,7 +309,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
NULL, gl);
if (error) {
- pr_err("gdlm_unlock %x,%llx err=%d\n",
+ fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ee20ea42e7b5..99dd58694ba1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -108,7 +108,9 @@ __acquires(&sdp->sd_ail_lock)
gfs2_assert(sdp, bd->bd_tr == tr);
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh)) {
+ if (!buffer_uptodate(bh) &&
+ !test_and_set_bit(SDF_AIL1_IO_ERROR,
+ &sdp->sd_flags)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
@@ -206,7 +208,8 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
gfs2_assert(sdp, bd->bd_tr == tr);
if (buffer_busy(bh))
continue;
- if (!buffer_uptodate(bh)) {
+ if (!buffer_uptodate(bh) &&
+ !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
@@ -618,7 +621,7 @@ void gfs2_write_revokes(struct gfs2_sbd *sdp)
gfs2_ail1_empty(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+ list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
if (list_empty(&bd->bd_list)) {
have_revokes = 1;
@@ -642,7 +645,7 @@ done:
}
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+ list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
if (max_revokes == 0)
goto out_of_blocks;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index f2567f958d00..4c7069b8f3c1 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -81,7 +81,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
if (sdp->sd_args.ar_discard)
gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
memcpy(bi->bi_clone + bi->bi_offset,
- bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
+ bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
clear_bit(GBF_FULL, &bi->bi_flags);
rgd->rd_free_clone = rgd->rd_free;
rgd->rd_extfail_pt = rgd->rd_free;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 2d55e2c3333c..c7603063f861 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -39,9 +39,11 @@ static void gfs2_init_inode_once(void *foo)
struct gfs2_inode *ip = foo;
inode_init_once(&ip->i_inode);
+ atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_qadata = NULL;
+ gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_hash_cache = NULL;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c2469833b4fb..b041cb8ae383 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -72,13 +72,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
if (!sdp)
return NULL;
- sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
if (!sdp->sd_lkstats) {
kfree(sdp);
return NULL;
}
+ sb->s_fs_info = sdp;
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
@@ -1333,6 +1333,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
struct path path;
int error;
+ if (!dev_name || !*dev_name)
+ return ERR_PTR(-EINVAL);
+
error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 0efae7a0ee80..2ae5a109eea7 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1183,7 +1183,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
*
* Returns: 0 on success.
* min_req = ap->min_target ? ap->min_target : ap->target;
- * quota must allow atleast min_req blks for success and
+ * quota must allow at least min_req blks for success and
* ap->allowed is set to the number of blocks allowed
*
* -EDQUOT otherwise, quota violation. ap->allowed is set to number
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 1ad3256b9cbc..ffe3032b1043 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -90,7 +90,7 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
{
unsigned char *byte1, *byte2, *end, cur_state;
struct gfs2_bitmap *bi = rbm_bi(rbm);
- unsigned int buflen = bi->bi_len;
+ unsigned int buflen = bi->bi_bytes;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
@@ -101,12 +101,16 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
if (unlikely(!valid_change[new_state * 4 + cur_state])) {
- pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
+ struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
+
+ fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
rbm->offset, cur_state, new_state);
- pr_warn("rgrp=0x%llx bi_start=0x%x\n",
- (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
- pr_warn("bi_offset=0x%x bi_len=0x%x\n",
- bi->bi_offset, bi->bi_len);
+ fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
+ (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
+ (unsigned long long)bi->bi_bh->b_blocknr);
+ fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
+ bi->bi_offset, bi->bi_bytes,
+ (unsigned long long)gfs2_rbm_to_block(rbm));
dump_stack();
gfs2_consist_rgrpd(rbm->rgd);
return;
@@ -269,15 +273,10 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
- u64 rblock = block - rbm->rgd->rd_data0;
-
- if (WARN_ON_ONCE(rblock > UINT_MAX))
- return -EINVAL;
- if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
+ if (!rgrp_contains_block(rbm->rgd, block))
return -E2BIG;
-
rbm->bii = 0;
- rbm->offset = (u32)(rblock);
+ rbm->offset = block - rbm->rgd->rd_data0;
/* Check if the block is within the first block */
if (rbm->offset < rbm_bi(rbm)->bi_blocks)
return 0;
@@ -382,7 +381,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
if (bi->bi_clone)
start = bi->bi_clone;
start += bi->bi_offset;
- end = start + bi->bi_len;
+ end = start + bi->bi_bytes;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -467,7 +466,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
count[x] += gfs2_bitcount(rgd,
bi->bi_bh->b_data +
bi->bi_offset,
- bi->bi_len, x);
+ bi->bi_bytes, x);
}
if (count[0] != rgd->rd_free) {
@@ -642,7 +641,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+ u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
+ rs->rs_free - 1;
+ struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
+ struct gfs2_bitmap *start, *last;
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
@@ -653,7 +655,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
it will force the number to be recalculated later. */
rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &bi->bi_flags);
+ if (gfs2_rbm_from_block(&last_rbm, last_block))
+ return;
+ start = rbm_bi(&rs->rs_rbm);
+ last = rbm_bi(&last_rbm);
+ do
+ clear_bit(GBF_FULL, &start->bi_flags);
+ while (start++ != last);
}
}
@@ -738,11 +746,13 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
- pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
- pr_info("ri_length = %u\n", rgd->rd_length);
- pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
- pr_info("ri_data = %u\n", rgd->rd_data);
- pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
+
+ fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
+ fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
+ fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
+ fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
+ fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
}
/**
@@ -780,21 +790,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* header block */
} else if (x == 0) {
bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* last block */
} else if (x + 1 == length) {
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* other blocks */
} else {
@@ -802,7 +812,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
sizeof(struct gfs2_meta_header);
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
}
@@ -814,11 +824,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
return -EIO;
}
bi = rgd->rd_bits + (length - 1);
- if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
+ if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
if (gfs2_consist_rgrpd(rgd)) {
gfs2_rindex_print(rgd);
fs_err(sdp, "start=%u len=%u offset=%u\n",
- bi->bi_start, bi->bi_len, bi->bi_offset);
+ bi->bi_start, bi->bi_bytes, bi->bi_offset);
}
return -EIO;
}
@@ -1103,12 +1113,35 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
{
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
+ int valid = 1;
- if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
- rgl->rl_dinodes != str->rg_dinodes ||
- rgl->rl_igeneration != str->rg_igeneration)
- return 0;
- return 1;
+ if (rgl->rl_flags != str->rg_flags) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb flag mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
+ valid = 0;
+ }
+ if (rgl->rl_free != str->rg_free) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb free mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
+ valid = 0;
+ }
+ if (rgl->rl_dinodes != str->rg_dinodes) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_dinodes),
+ be32_to_cpu(str->rg_dinodes));
+ valid = 0;
+ }
+ if (rgl->rl_igeneration != str->rg_igeneration) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb igen mismatch "
+ "%llu/%llu", (unsigned long long)rgd->rd_addr,
+ (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
+ (unsigned long long)be64_to_cpu(str->rg_igeneration));
+ valid = 0;
+ }
+ return valid;
}
static u32 count_unlinked(struct gfs2_rgrpd *rgd)
@@ -1122,8 +1155,8 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
goal = 0;
buffer = bi->bi_bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bi->bi_bh));
- while (goal < bi->bi_len * GFS2_NBBY) {
- goal = gfs2_bitfit(buffer, bi->bi_len, goal,
+ while (goal < bi->bi_blocks) {
+ goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
GFS2_BLKST_UNLINKED);
if (goal == BFITNOENT)
break;
@@ -1226,7 +1259,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
rl_flags &= ~GFS2_RDF_MASK;
rgd->rd_flags &= GFS2_RDF_MASK;
- rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
+ rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
if (rgd->rd_rgl->rl_unlinked == 0)
rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
@@ -1295,7 +1328,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
u32 trimmed = 0;
u8 diff;
- for (x = 0; x < bi->bi_len; x++) {
+ for (x = 0; x < bi->bi_bytes; x++) {
const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
clone += bi->bi_offset;
clone += x;
@@ -1541,8 +1574,8 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
- extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+ extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
+ extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
return;
@@ -1728,7 +1761,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
initial_offset = rbm->offset;
- offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
+ offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
@@ -1999,7 +2032,7 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* We try our best to find an rgrp that has at least ap->target blocks
* available. After a couple of passes (loops == 2), the prospects of finding
* such an rgrp diminish. At this stage, we return the first rgrp that has
- * atleast ap->min_target blocks available. Either way, we set ap->allowed to
+ * at least ap->min_target blocks available. Either way, we set ap->allowed to
* the number of blocks available in the chosen rgrp.
*
* Returns: 0 on success,
@@ -2053,7 +2086,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
}
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
LM_ST_EXCLUSIVE, flags,
- &rs->rs_rgd_gh);
+ &ip->i_rgd_gh);
if (unlikely(error))
return error;
if (!gfs2_rs_active(rs) && (loops < 2) &&
@@ -2062,13 +2095,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rbm.rgd);
if (unlikely(error)) {
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
return error;
}
}
}
- /* Skip unuseable resource groups */
+ /* Skip unusable resource groups */
if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
GFS2_RDF_ERROR)) ||
(loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
@@ -2105,7 +2138,7 @@ skip_rgrp:
/* Unlock rgrp if required */
if (!rg_locked)
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
next_rgrp:
/* Find the next rgrp, and continue looking */
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
@@ -2142,10 +2175,8 @@ next_rgrp:
void gfs2_inplace_release(struct gfs2_inode *ip)
{
- struct gfs2_blkreserv *rs = &ip->i_res;
-
- if (gfs2_holder_initialized(&rs->rs_rgd_gh))
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ if (gfs2_holder_initialized(&ip->i_rgd_gh))
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
}
/**
@@ -2184,27 +2215,21 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
/**
* rgblk_free - Change alloc state of given block(s)
* @sdp: the filesystem
+ * @rgd: the resource group the blocks are in
* @bstart: the start of a run of blocks to free
* @blen: the length of the block run (all must lie within ONE RG!)
* @new_state: GFS2_BLKST_XXX the after-allocation block state
- *
- * Returns: Resource group containing the block(s)
*/
-static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
- u32 blen, unsigned char new_state)
+static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
struct gfs2_bitmap *bi, *bi_prev = NULL;
- rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
- if (!rbm.rgd) {
- if (gfs2_consist(sdp))
- fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
- return NULL;
- }
-
- gfs2_rbm_from_block(&rbm, bstart);
+ rbm.rgd = rgd;
+ if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
+ return;
while (blen--) {
bi = rbm_bi(&rbm);
if (bi != bi_prev) {
@@ -2213,7 +2238,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
GFP_NOFS | __GFP_NOFAIL);
memcpy(bi->bi_clone + bi->bi_offset,
bi->bi_bh->b_data + bi->bi_offset,
- bi->bi_len);
+ bi->bi_bytes);
}
gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
bi_prev = bi;
@@ -2221,8 +2246,6 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
gfs2_setbit(&rbm, false, new_state);
gfs2_rbm_incr(&rbm);
}
-
- return rbm.rgd;
}
/**
@@ -2244,6 +2267,14 @@ void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_reserved, rgd->rd_extfail_pt);
+ if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+
+ gfs2_print_dbg(seq, " L: f:%02x b:%u i:%u\n",
+ be32_to_cpu(rgl->rl_flags),
+ be32_to_cpu(rgl->rl_free),
+ be32_to_cpu(rgl->rl_dinodes));
+ }
spin_lock(&rgd->rd_rsspin);
for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
@@ -2295,7 +2326,7 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
goto out;
/* We used up our block reservation, so we should
reserve more blocks next time. */
- atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
+ atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
}
__rs_deltree(rs);
}
@@ -2329,7 +2360,10 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
else
goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
- gfs2_rbm_from_block(rbm, goal);
+ if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
+ rbm->bii = 0;
+ rbm->offset = 0;
+ }
}
/**
@@ -2392,7 +2426,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
}
}
if (rbm.rgd->rd_free < *nblocks) {
- pr_warn("nblocks=%u\n", *nblocks);
+ fs_warn(sdp, "nblocks=%u\n", *nblocks);
goto rgrp_error;
}
@@ -2427,20 +2461,19 @@ rgrp_error:
/**
* __gfs2_free_blocks - free a contiguous run of block(s)
* @ip: the inode these blocks are being freed from
+ * @rgd: the resource group the blocks are in
* @bstart: first block of a run of contiguous blocks
* @blen: the length of the block run
* @meta: 1 if the blocks represent metadata
*
*/
-void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
+void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd;
- rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
- if (!rgd)
- return;
+ rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
rgd->rd_free += blen;
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
@@ -2455,16 +2488,18 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
/**
* gfs2_free_meta - free a contiguous run of data block(s)
* @ip: the inode these blocks are being freed from
+ * @rgd: the resource group the blocks are in
* @bstart: first block of a run of contiguous blocks
* @blen: the length of the block run
*
*/
-void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
+void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- __gfs2_free_blocks(ip, bstart, blen, 1);
+ __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
gfs2_statfs_change(sdp, 0, +blen, 0);
gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
}
@@ -2476,9 +2511,10 @@ void gfs2_unlink_di(struct inode *inode)
struct gfs2_rgrpd *rgd;
u64 blkno = ip->i_no_addr;
- rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
+ rgd = gfs2_blk2rgrpd(sdp, blkno, true);
if (!rgd)
return;
+ rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
@@ -2488,13 +2524,8 @@ void gfs2_unlink_di(struct inode *inode)
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
- struct gfs2_rgrpd *tmp_rgd;
-
- tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
- if (!tmp_rgd)
- return;
- gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
+ rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
if (!rgd->rd_dinodes)
gfs2_consist_rgrpd(rgd);
rgd->rd_dinodes--;
@@ -2538,7 +2569,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
rbm.rgd = rgd;
error = gfs2_rbm_from_block(&rbm, no_addr);
- WARN_ON_ONCE(error != 0);
+ if (WARN_ON_ONCE(error))
+ goto fail;
if (gfs2_testbit(&rbm, false) != type)
error = -ESTALE;
@@ -2624,13 +2656,12 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
* gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
* and initialize an array of glock holders for them
* @rlist: the list of resource groups
- * @state: the lock state to acquire the RG lock in
*
* FIXME: Don't use NOFAIL
*
*/
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
{
unsigned int x;
@@ -2639,7 +2670,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
- state, 0,
+ LM_ST_EXCLUSIVE, 0,
&rlist->rl_ghs[x]);
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index e90478e2f545..b596c3d17988 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -18,8 +18,7 @@
* By reserving 32 blocks at a time, we can optimize / shortcut how we search
* through the bitmaps by looking a word at a time.
*/
-#define RGRP_RSRV_MINBYTES 8
-#define RGRP_RSRV_MINBLKS ((u32)(RGRP_RSRV_MINBYTES * GFS2_NBBY))
+#define RGRP_RSRV_MINBLKS 32
#define RGRP_RSRV_ADDBLKS 64
struct gfs2_rgrpd;
@@ -52,8 +51,10 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
extern int gfs2_rsqa_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
extern void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount);
-extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
-extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta);
+extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
extern void gfs2_unlink_di(struct inode *inode);
extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
@@ -68,7 +69,7 @@ struct gfs2_rgrp_list {
extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
+extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c212893534ed..ca71163ff7cf 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -854,10 +854,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return error;
+ flush_workqueue(gfs2_delete_workqueue);
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
- flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
@@ -971,7 +971,7 @@ void gfs2_freeze_func(struct work_struct *work)
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
&freeze_gh);
if (error) {
- printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
+ printk(KERN_INFO "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0);
}
else {
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 064c9a0ef046..423bc2d03dd8 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -74,13 +74,13 @@ fail:
return error;
}
-static void gfs2_print_trans(const struct gfs2_trans *tr)
+static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
{
- pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip);
- pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n",
+ fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
+ fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags));
- pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+ fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
tr->tr_num_revoke, tr->tr_num_revoke_rm);
@@ -109,7 +109,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
(tr->tr_num_revoke <= tr->tr_revokes)))
- gfs2_print_trans(tr);
+ gfs2_print_trans(sdp, tr);
gfs2_log_commit(sdp, tr);
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
@@ -225,12 +225,13 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
- pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n",
+ fs_err(sdp, "Attempting to add uninitialised block to "
+ "journal (inplace block=%lld)\n",
(unsigned long long)bd->bd_bh->b_blocknr);
BUG();
}
if (unlikely(state == SFS_FROZEN)) {
- printk(KERN_INFO "GFS2:adding buf while frozen\n");
+ fs_info(sdp, "GFS2:adding buf while frozen\n");
gfs2_assert_withdraw(sdp, 0);
}
gfs2_pin(sdp, bd->bd_bh);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 59c811de0dc7..0a814ccac41d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -19,6 +19,7 @@
#include "gfs2.h"
#include "incore.h"
#include "glock.h"
+#include "rgrp.h"
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
@@ -181,6 +182,8 @@ int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
int rv;
+
+ gfs2_rgrp_dump(NULL, rgd->rd_gl);
rv = gfs2_lm_withdraw(sdp,
"fatal: filesystem consistency error\n"
" RG = %llu\n"
@@ -256,12 +259,13 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line,
bool withdraw)
{
- fs_err(sdp,
- "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr,
- function, file, line);
+ if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
+ fs_err(sdp,
+ "fatal: I/O error\n"
+ " block = %llu\n"
+ " function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
+ function, file, line);
if (withdraw)
gfs2_lm_withdraw(sdp, NULL);
}
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 96ac4aba4738..9278fecba632 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -86,7 +86,7 @@ static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
u32 magic = be32_to_cpu(mh->mh_magic);
if (unlikely(magic != GFS2_MAGIC)) {
- pr_err("Magic number missing at %llu\n",
+ fs_err(sdp, "Magic number missing at %llu\n",
(unsigned long long)bh->b_blocknr);
return -EIO;
}
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 38515988aaf7..996c915a9c97 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -283,7 +283,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
blen++;
else {
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
bstart = bn;
blen = 1;
}
@@ -292,7 +292,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
if (prev && !leave) {
u32 len;
@@ -1250,6 +1250,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
+ struct gfs2_rgrpd *rgd;
struct buffer_head *indbh, *dibh;
__be64 *eablk, *end;
unsigned int rg_blocks = 0;
@@ -1299,11 +1300,10 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else
goto out;
- gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
+ gfs2_rlist_alloc(&rlist);
for (x = 0; x < rlist.rl_rgrps; x++) {
- struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
-
+ rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rg_blocks += rgd->rd_length;
}
@@ -1320,6 +1320,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
bstart = 0;
+ rgd = NULL;
blen = 0;
for (; eablk < end; eablk++) {
@@ -1333,8 +1334,9 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
blen++;
else {
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
bstart = bn;
+ rgd = gfs2_blk2rgrpd(sdp, bstart, true);
blen = 1;
}
@@ -1342,7 +1344,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
@@ -1391,7 +1393,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
if (error)
goto out_gunlock;
- gfs2_free_meta(ip, ip->i_eattr, 1);
+ gfs2_free_meta(ip, rgd, ip->i_eattr, 1);
ip->i_eattr = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 9a8772465a90..896396554bcc 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -425,6 +425,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -441,6 +445,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 374b5688e29e..98b96ffb95ed 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -220,25 +220,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
-
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- u32 count;
- int res;
+ struct inode *inode = tree->inode;
+ u32 count;
+ int res;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfs_extend_file(inode);
if (res)
- return ERR_PTR(res);
+ return res;
HFS_I(inode)->phys_size = inode->i_size =
(loff_t)HFS_I(inode)->alloc_blocks *
HFS_SB(tree->sb)->alloc_blksz;
@@ -246,9 +238,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
tree->sb->s_blocksize_bits;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index c8b252dbb26c..dcc2aab1b2c4 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -82,6 +82,7 @@ struct hfs_find_data {
extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
extern void hfs_btree_close(struct hfs_btree *);
extern void hfs_btree_write(struct hfs_btree *);
+extern int hfs_bmap_reserve(struct hfs_btree *, int);
extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
extern void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 8a66405b0f8b..d365bf0b8c77 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
HFS_CDR_THD : HFS_CDR_FTH,
@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 5d0182654580..263d5028d9d1 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
} else {
@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
return 0;
blocks = 0;
- for (i = 0; i < 3; extent++, i++)
+ for (i = 0; i < 3; i++)
blocks += be16_to_cpu(extent[i].count);
res = hfs_free_extents(sb, extent, blocks, blocks);
@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
ablock = (u32)block / HFS_SB(sb)->fs_div;
if (block >= HFS_I(inode)->fs_blocks) {
- if (block > HFS_I(inode)->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (block > HFS_I(inode)->fs_blocks)
return -EIO;
if (ablock >= HFS_I(inode)->alloc_blocks) {
res = hfs_extend_file(inode);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a2dfa1b2a89c..da243c84e93b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -642,6 +642,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
+ inode->i_atime = inode->i_mtime = inode->i_ctime =
+ current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index 2bab6b3cdba4..e6d554476db4 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -217,6 +217,11 @@ int hfsplus_create_attr(struct inode *inode,
if (err)
goto failed_init_create_attr;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
+ if (err)
+ goto failed_create_attr;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
@@ -313,6 +318,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
if (err)
return err;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
+ if (err)
+ goto out;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index ed8eacb34452..1918544a7871 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -429,6 +429,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -445,6 +449,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index de14b2b6881b..236efe51eca6 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -342,26 +342,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
+ struct inode *inode = tree->inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ u32 count;
+ int res;
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
- u32 count;
- int res;
+ if (rsvd_nodes <= 0)
+ return 0;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
if (res)
- return ERR_PTR(res);
+ return res;
hip->phys_size = inode->i_size =
(loff_t)hip->alloc_blocks <<
HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
@@ -369,9 +364,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index a196369ba779..35472cba750e 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -265,6 +265,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
@@ -333,6 +341,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
+ if (err)
+ goto out;
+
if (!str) {
int len;
@@ -433,6 +449,14 @@ int hfsplus_rename_cat(u32 cnid,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most twice.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 8e0f59767694..a930ddd15681 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -100,6 +100,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, hip->cached_extents,
sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
@@ -233,7 +237,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
ablock = iblock >> sbi->fs_shift;
if (iblock >= hip->fs_blocks) {
- if (iblock > hip->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (iblock > hip->fs_blocks)
return -EIO;
if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode, false);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 8e039435958a..dd7ad9f13e3a 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
#define hfs_btree_open hfsplus_btree_open
#define hfs_btree_close hfsplus_btree_close
#define hfs_btree_write hfsplus_btree_write
+#define hfs_bmap_reserve hfsplus_bmap_reserve
#define hfs_bmap_alloc hfsplus_bmap_alloc
#define hfs_bmap_free hfsplus_bmap_free
#define hfs_bnode_read hfsplus_bnode_read
@@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 8e9427a42b81..d7ab9d8c4b67 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -261,6 +261,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/inode.c b/fs/inode.c
index 42f6d25f32a5..9e198f00b64c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -10,7 +10,7 @@
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
@@ -349,7 +349,7 @@ EXPORT_SYMBOL(inc_nlink);
static void __address_space_init_once(struct address_space *mapping)
{
- INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT);
+ xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ);
init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2005529af560..0400297c8d72 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -669,6 +669,9 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
return ioctl_fiemap(filp, arg);
case FIGETBSZ:
+ /* anon_bdev filesystems may not have a block size */
+ if (!inode->i_sb->s_blocksize)
+ return -EINVAL;
return put_user(inode->i_sb->s_blocksize, argp);
case FICLONE:
diff --git a/fs/iomap.c b/fs/iomap.c
index 2c53400aa802..970ee3964e98 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1057,7 +1057,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
return length;
}
-int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index 947ce22f5b3c..f0fe641893a5 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -46,7 +46,7 @@ int isofs_name_translate(struct iso_directory_record *de, char *new, struct inod
return i;
}
-/* Acorn extensions written by Matthew Wilcox <willy@bofh.ai> 1998 */
+/* Acorn extensions written by Matthew Wilcox <willy@infradead.org> 1998 */
int get_acorn_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index c125d662777c..26f8d7e46462 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -251,8 +251,8 @@ restart:
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
@@ -333,8 +333,8 @@ restart2:
jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 453a6a1fff34..2b4d5013dc5d 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -125,7 +125,7 @@ static int jffs2_garbage_collect_thread(void *_c)
if (try_to_freeze())
goto again;
- signr = kernel_dequeue_signal(NULL);
+ signr = kernel_dequeue_signal();
switch(signr) {
case SIGSTOP:
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 87bdf0f4cba1..902a7dd10e5c 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = c;
ret = jffs2_parse_options(c, data);
- if (ret) {
- kfree(c);
+ if (ret)
return -EINVAL;
- }
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 2e71b6e7e646..8c06a6ea862d 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -146,12 +146,16 @@ int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
if (default_acl) {
rc = __jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, default_acl);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!rc)
rc = __jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 054cc761b426..805ae9e8944a 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -166,7 +166,6 @@ void jfs_evict_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
- dquot_initialize(inode);
dquot_free_inode(inode);
}
} else {
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 09da5cf14e27..65d8fc87ab11 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -247,7 +247,7 @@ static const match_table_t tokens = {
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
{Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
+ {Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_uid, "uid=%u"},
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index ff2716f9322e..fdf527b6d79c 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -236,6 +236,9 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
sb->s_export_op = &kernfs_export_ops;
sb->s_time_gran = 1;
+ /* sysfs dentries and inodes don't require IO to create */
+ sb->s_shrink.seeks = 0;
+
/* get root inode, initialize and unlock it */
mutex_lock(&kernfs_mutex);
inode = kernfs_get_inode(sb, info->root->kn);
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 305b220af45d..162f43b80c84 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -72,6 +72,9 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (base == kn)
break;
+ if ((s - path) + 3 >= PATH_MAX)
+ return -ENAMETOOLONG;
+
strcpy(s, "../");
s += 3;
base = base->parent;
@@ -88,7 +91,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (len < 2)
return -EINVAL;
len--;
- if ((s - path) + len > PATH_MAX)
+ if ((s - path) + len >= PATH_MAX)
return -ENAMETOOLONG;
/* reverse fillup of target string from target to base */
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index d35cd6be0675..93fb7cf0b92b 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -341,7 +341,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
};
struct lockd_net *ln = net_generic(net, lockd_net_id);
- dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
+ dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
diff --git a/fs/namespace.c b/fs/namespace.c
index 99186556f8d3..98d27da43304 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/task_work.h>
#include <linux/sched/task.h>
@@ -2642,6 +2642,7 @@ static long exact_copy_from_user(void *to, const void __user * from,
if (!access_ok(VERIFY_READ, from, n))
return n;
+ current->kernel_uaccess_faults_ok++;
while (n) {
if (__get_user(c, f)) {
memset(t, 0, n);
@@ -2651,6 +2652,7 @@ static long exact_copy_from_user(void *to, const void __user * from,
f++;
n--;
}
+ current->kernel_uaccess_faults_ok--;
return n;
}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 06cb0c1d9aee..d3781cd983f6 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -896,7 +896,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (end != inode->i_mapping->nrpages) {
rcu_read_lock();
- end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
+ end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
rcu_read_unlock();
}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 060c658eab66..a7d3df85736d 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -65,6 +65,7 @@ struct nfs_dns_ent {
struct sockaddr_storage addr;
size_t addrlen;
+ struct rcu_head rcu_head;
};
@@ -101,15 +102,23 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
}
}
-static void nfs_dns_ent_put(struct kref *ref)
+static void nfs_dns_ent_free_rcu(struct rcu_head *head)
{
struct nfs_dns_ent *item;
- item = container_of(ref, struct nfs_dns_ent, h.ref);
+ item = container_of(head, struct nfs_dns_ent, rcu_head);
kfree(item->hostname);
kfree(item);
}
+static void nfs_dns_ent_put(struct kref *ref)
+{
+ struct nfs_dns_ent *item;
+
+ item = container_of(ref, struct nfs_dns_ent, h.ref);
+ call_rcu(&item->rcu_head, nfs_dns_ent_free_rcu);
+}
+
static struct cache_head *nfs_dns_ent_alloc(void)
{
struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
@@ -195,7 +204,7 @@ static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
{
struct cache_head *ch;
- ch = sunrpc_cache_lookup(cd,
+ ch = sunrpc_cache_lookup_rcu(cd,
&key->h,
nfs_dns_hash(key));
if (!ch)
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index b7559c6f2b97..4a98537efb0f 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -19,18 +19,22 @@
* is much larger than a sockaddr_in6.
*/
struct svc_cacherep {
- struct list_head c_lru;
+ struct {
+ /* Keep often-read xid, csum in the same cache line: */
+ __be32 k_xid;
+ __wsum k_csum;
+ u32 k_proc;
+ u32 k_prot;
+ u32 k_vers;
+ unsigned int k_len;
+ struct sockaddr_in6 k_addr;
+ } c_key;
+ struct rb_node c_node;
+ struct list_head c_lru;
unsigned char c_state, /* unused, inprog, done */
c_type, /* status, buffer */
c_secure : 1; /* req came from port < 1024 */
- struct sockaddr_in6 c_addr;
- __be32 c_xid;
- u32 c_prot;
- u32 c_proc;
- u32 c_vers;
- unsigned int c_len;
- __wsum c_csum;
unsigned long c_timestamp;
union {
struct kvec u_vec;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index a1143f7c2201..802993d8912f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -46,7 +46,7 @@ static void expkey_put(struct kref *ref)
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree(key);
+ kfree_rcu(key, ek_rcu);
}
static void expkey_request(struct cache_detail *cd,
@@ -265,7 +265,7 @@ svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
struct cache_head *ch;
int hash = svc_expkey_hash(item);
- ch = sunrpc_cache_lookup(cd, &item->h, hash);
+ ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
@@ -314,7 +314,7 @@ static void svc_export_put(struct kref *ref)
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
kfree(exp->ex_uuid);
- kfree(exp);
+ kfree_rcu(exp, ex_rcu);
}
static void svc_export_request(struct cache_detail *cd,
@@ -780,7 +780,7 @@ svc_export_lookup(struct svc_export *exp)
struct cache_head *ch;
int hash = svc_export_hash(exp);
- ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
+ ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
@@ -1216,9 +1216,9 @@ static int e_show(struct seq_file *m, void *p)
}
const struct seq_operations nfs_exports_op = {
- .start = cache_seq_start,
- .next = cache_seq_next,
- .stop = cache_seq_stop,
+ .start = cache_seq_start_rcu,
+ .next = cache_seq_next_rcu,
+ .stop = cache_seq_stop_rcu,
.show = e_show,
};
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index c8b74126ddaa..e7daa1f246f0 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -61,6 +61,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
+ struct rcu_head ex_rcu;
};
/* an "export key" (expkey) maps a filehandlefragement to an
@@ -75,6 +76,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
+ struct rcu_head ek_rcu;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 426f55005697..32cb8c027483 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -123,6 +123,14 @@ struct nfsd_net {
wait_queue_head_t ntf_wq;
atomic_t ntf_refcnt;
+
+ /*
+ * clientid and stateid data for construction of net unique COPY
+ * stateids.
+ */
+ u32 s2s_cp_cl_id;
+ struct idr s2s_cp_stateids;
+ spinlock_t s2s_cp_lock;
};
/* Simple check to find out if a given net was properly initialized */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 601bf33c26a0..25987bcdf96f 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -39,6 +39,7 @@
#include "state.h"
#include "netns.h"
#include "xdr4cb.h"
+#include "xdr4.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -105,6 +106,7 @@ enum nfs_cb_opnum4 {
OP_CB_WANTS_CANCELLED = 12,
OP_CB_NOTIFY_LOCK = 13,
OP_CB_NOTIFY_DEVICEID = 14,
+ OP_CB_OFFLOAD = 15,
OP_CB_ILLEGAL = 10044
};
@@ -683,6 +685,101 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
}
/*
+ * struct write_response4 {
+ * stateid4 wr_callback_id<1>;
+ * length4 wr_count;
+ * stable_how4 wr_committed;
+ * verifier4 wr_writeverf;
+ * };
+ * union offload_info4 switch (nfsstat4 coa_status) {
+ * case NFS4_OK:
+ * write_response4 coa_resok4;
+ * default:
+ * length4 coa_bytes_copied;
+ * };
+ * struct CB_OFFLOAD4args {
+ * nfs_fh4 coa_fh;
+ * stateid4 coa_stateid;
+ * offload_info4 coa_offload_info;
+ * };
+ */
+static void encode_offload_info4(struct xdr_stream *xdr,
+ __be32 nfserr,
+ const struct nfsd4_copy *cp)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = nfserr;
+ if (!nfserr) {
+ p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
+ p = xdr_encode_empty_array(p);
+ p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
+ *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
+ p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
+ NFS4_VERIFIER_SIZE);
+ } else {
+ p = xdr_reserve_space(xdr, 8);
+ /* We always return success if bytes were written */
+ p = xdr_encode_hyper(p, 0);
+ }
+}
+
+static void encode_cb_offload4args(struct xdr_stream *xdr,
+ __be32 nfserr,
+ const struct knfsd_fh *fh,
+ const struct nfsd4_copy *cp,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = cpu_to_be32(OP_CB_OFFLOAD);
+ encode_nfs_fh4(xdr, fh);
+ encode_stateid4(xdr, &cp->cp_res.cb_stateid);
+ encode_offload_info4(xdr, nfserr, cp);
+
+ hdr->nops++;
+}
+
+static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ const struct nfsd4_copy *cp =
+ container_of(cb, struct nfsd4_copy, cp_cb);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = 0,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
+ encode_cb_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+
+ if (cb) {
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+ }
+ return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
+}
+/*
* RPC procedure tables
*/
#define PROC(proc, call, argtype, restype) \
@@ -703,6 +800,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
#endif
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
+ PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index a5bb76593ce7..bf137fec33ff 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -65,6 +65,7 @@ struct ent {
u32 id;
char name[IDMAP_NAMESZ];
char authname[IDMAP_NAMESZ];
+ struct rcu_head rcu_head;
};
/* Common entry handling */
@@ -89,7 +90,7 @@ static void
ent_put(struct kref *ref)
{
struct ent *map = container_of(ref, struct ent, h.ref);
- kfree(map);
+ kfree_rcu(map, rcu_head);
}
static struct cache_head *
@@ -264,8 +265,8 @@ out:
static struct ent *
idtoname_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
- idtoname_hash(item));
+ struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
+ idtoname_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
@@ -422,8 +423,8 @@ out:
static struct ent *
nametoid_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
- nametoid_hash(item));
+ struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
+ nametoid_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index b7bc6e1a85ac..edff074d38c7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
#include "idmap.h"
#include "cache.h"
@@ -1089,36 +1090,254 @@ out:
return status;
}
+void nfs4_put_copy(struct nfsd4_copy *copy)
+{
+ if (!refcount_dec_and_test(&copy->refcount))
+ return;
+ kfree(copy);
+}
+
+static bool
+check_and_set_stop_copy(struct nfsd4_copy *copy)
+{
+ bool value;
+
+ spin_lock(&copy->cp_clp->async_lock);
+ value = copy->stopped;
+ if (!copy->stopped)
+ copy->stopped = true;
+ spin_unlock(&copy->cp_clp->async_lock);
+ return value;
+}
+
+static void nfsd4_stop_copy(struct nfsd4_copy *copy)
+{
+ /* only 1 thread should stop the copy */
+ if (!check_and_set_stop_copy(copy))
+ kthread_stop(copy->copy_task);
+ nfs4_put_copy(copy);
+}
+
+static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
+{
+ struct nfsd4_copy *copy = NULL;
+
+ spin_lock(&clp->async_lock);
+ if (!list_empty(&clp->async_copies)) {
+ copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
+ copies);
+ refcount_inc(&copy->refcount);
+ }
+ spin_unlock(&clp->async_lock);
+ return copy;
+}
+
+void nfsd4_shutdown_copy(struct nfs4_client *clp)
+{
+ struct nfsd4_copy *copy;
+
+ while ((copy = nfsd4_get_copy(clp)) != NULL)
+ nfsd4_stop_copy(copy);
+}
+
+static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
+{
+ struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb);
+
+ nfs4_put_copy(copy);
+}
+
+static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
+ struct rpc_task *task)
+{
+ return 1;
+}
+
+static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
+ .release = nfsd4_cb_offload_release,
+ .done = nfsd4_cb_offload_done
+};
+
+static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
+{
+ copy->cp_res.wr_stable_how = NFS_UNSTABLE;
+ copy->cp_synchronous = sync;
+ gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
+}
+
+static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
+{
+ ssize_t bytes_copied = 0;
+ size_t bytes_total = copy->cp_count;
+ u64 src_pos = copy->cp_src_pos;
+ u64 dst_pos = copy->cp_dst_pos;
+
+ do {
+ if (kthread_should_stop())
+ break;
+ bytes_copied = nfsd_copy_file_range(copy->file_src, src_pos,
+ copy->file_dst, dst_pos, bytes_total);
+ if (bytes_copied <= 0)
+ break;
+ bytes_total -= bytes_copied;
+ copy->cp_res.wr_bytes_written += bytes_copied;
+ src_pos += bytes_copied;
+ dst_pos += bytes_copied;
+ } while (bytes_total > 0 && !copy->cp_synchronous);
+ return bytes_copied;
+}
+
+static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
+{
+ __be32 status;
+ ssize_t bytes;
+
+ bytes = _nfsd_copy_file_range(copy);
+ /* for async copy, we ignore the error, client can always retry
+ * to get the error
+ */
+ if (bytes < 0 && !copy->cp_res.wr_bytes_written)
+ status = nfserrno(bytes);
+ else {
+ nfsd4_init_copy_res(copy, sync);
+ status = nfs_ok;
+ }
+
+ fput(copy->file_src);
+ fput(copy->file_dst);
+ return status;
+}
+
+static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+{
+ dst->cp_src_pos = src->cp_src_pos;
+ dst->cp_dst_pos = src->cp_dst_pos;
+ dst->cp_count = src->cp_count;
+ dst->cp_synchronous = src->cp_synchronous;
+ memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
+ memcpy(&dst->fh, &src->fh, sizeof(src->fh));
+ dst->cp_clp = src->cp_clp;
+ dst->file_dst = get_file(src->file_dst);
+ dst->file_src = get_file(src->file_src);
+ memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
+}
+
+static void cleanup_async_copy(struct nfsd4_copy *copy)
+{
+ nfs4_free_cp_state(copy);
+ fput(copy->file_dst);
+ fput(copy->file_src);
+ spin_lock(&copy->cp_clp->async_lock);
+ list_del(&copy->copies);
+ spin_unlock(&copy->cp_clp->async_lock);
+ nfs4_put_copy(copy);
+}
+
+static int nfsd4_do_async_copy(void *data)
+{
+ struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
+ struct nfsd4_copy *cb_copy;
+
+ copy->nfserr = nfsd4_do_copy(copy, 0);
+ cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!cb_copy)
+ goto out;
+ memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
+ cb_copy->cp_clp = copy->cp_clp;
+ cb_copy->nfserr = copy->nfserr;
+ memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
+ nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
+ &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
+ nfsd4_run_cb(&cb_copy->cp_cb);
+out:
+ cleanup_async_copy(copy);
+ return 0;
+}
+
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_copy *copy = &u->copy;
- struct file *src, *dst;
__be32 status;
- ssize_t bytes;
+ struct nfsd4_copy *async_copy = NULL;
- status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, &src,
- &copy->cp_dst_stateid, &dst);
+ status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
+ &copy->file_src, &copy->cp_dst_stateid,
+ &copy->file_dst);
if (status)
goto out;
- bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
- dst, copy->cp_dst_pos, copy->cp_count);
+ copy->cp_clp = cstate->clp;
+ memcpy(&copy->fh, &cstate->current_fh.fh_handle,
+ sizeof(struct knfsd_fh));
+ if (!copy->cp_synchronous) {
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- if (bytes < 0)
- status = nfserrno(bytes);
- else {
- copy->cp_res.wr_bytes_written = bytes;
- copy->cp_res.wr_stable_how = NFS_UNSTABLE;
- copy->cp_synchronous = 1;
- gen_boot_verifier(&copy->cp_res.wr_verifier, SVC_NET(rqstp));
+ status = nfserrno(-ENOMEM);
+ async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!async_copy)
+ goto out;
+ if (!nfs4_init_cp_state(nn, copy)) {
+ kfree(async_copy);
+ goto out;
+ }
+ refcount_set(&async_copy->refcount, 1);
+ memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
+ sizeof(copy->cp_stateid));
+ dup_copy_fields(copy, async_copy);
+ async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+ async_copy, "%s", "copy thread");
+ if (IS_ERR(async_copy->copy_task))
+ goto out_err;
+ spin_lock(&async_copy->cp_clp->async_lock);
+ list_add(&async_copy->copies,
+ &async_copy->cp_clp->async_copies);
+ spin_unlock(&async_copy->cp_clp->async_lock);
+ wake_up_process(async_copy->copy_task);
status = nfs_ok;
+ } else
+ status = nfsd4_do_copy(copy, 1);
+out:
+ return status;
+out_err:
+ cleanup_async_copy(async_copy);
+ goto out;
+}
+
+struct nfsd4_copy *
+find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
+{
+ struct nfsd4_copy *copy;
+
+ spin_lock(&clp->async_lock);
+ list_for_each_entry(copy, &clp->async_copies, copies) {
+ if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE))
+ continue;
+ refcount_inc(&copy->refcount);
+ spin_unlock(&clp->async_lock);
+ return copy;
}
+ spin_unlock(&clp->async_lock);
+ return NULL;
+}
+
+static __be32
+nfsd4_offload_cancel(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_offload_status *os = &u->offload_status;
+ __be32 status = 0;
+ struct nfsd4_copy *copy;
+ struct nfs4_client *clp = cstate->clp;
+
+ copy = find_async_copy(clp, &os->stateid);
+ if (copy)
+ nfsd4_stop_copy(copy);
+ else
+ status = nfserr_bad_stateid;
- fput(src);
- fput(dst);
-out:
return status;
}
@@ -1144,6 +1363,25 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fput(file);
return status;
}
+static __be32
+nfsd4_offload_status(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_offload_status *os = &u->offload_status;
+ __be32 status = 0;
+ struct nfsd4_copy *copy;
+ struct nfs4_client *clp = cstate->clp;
+
+ copy = find_async_copy(clp, &os->stateid);
+ if (copy) {
+ os->count = copy->cp_res.wr_bytes_written;
+ nfs4_put_copy(copy);
+ } else
+ status = nfserr_bad_stateid;
+
+ return status;
+}
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -2047,6 +2285,14 @@ static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
1 /* cr_synchronous */) * sizeof(__be32);
}
+static inline u32 nfsd4_offload_status_rsize(struct svc_rqst *rqstp,
+ struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 2 /* osr_count */ +
+ 1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
+}
+
#ifdef CONFIG_NFSD_PNFS
static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
@@ -2460,6 +2706,17 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_SEEK",
.op_rsize_bop = nfsd4_seek_rsize,
},
+ [OP_OFFLOAD_STATUS] = {
+ .op_func = nfsd4_offload_status,
+ .op_name = "OP_OFFLOAD_STATUS",
+ .op_rsize_bop = nfsd4_offload_status_rsize,
+ },
+ [OP_OFFLOAD_CANCEL] = {
+ .op_func = nfsd4_offload_cancel,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_OFFLOAD_CANCEL",
+ .op_rsize_bop = nfsd4_only_status_rsize,
+ },
};
/**
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b0ca0efd2875..f093fbe47133 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -713,6 +713,36 @@ out_free:
return NULL;
}
+/*
+ * Create a unique stateid_t to represent each COPY.
+ */
+int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+{
+ int new_id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&nn->s2s_cp_lock);
+ new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
+ spin_unlock(&nn->s2s_cp_lock);
+ idr_preload_end();
+ if (new_id < 0)
+ return 0;
+ copy->cp_stateid.si_opaque.so_id = new_id;
+ copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
+ copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+ return 1;
+}
+
+void nfs4_free_cp_state(struct nfsd4_copy *copy)
+{
+ struct nfsd_net *nn;
+
+ nn = net_generic(copy->cp_clp->net, nfsd_net_id);
+ spin_lock(&nn->s2s_cp_lock);
+ idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
+ spin_unlock(&nn->s2s_cp_lock);
+}
+
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{
struct nfs4_stid *stid;
@@ -1827,6 +1857,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
#endif
+ INIT_LIST_HEAD(&clp->async_copies);
+ spin_lock_init(&clp->async_lock);
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
@@ -1942,6 +1974,7 @@ __destroy_client(struct nfs4_client *clp)
}
}
nfsd4_return_all_client_layouts(clp);
+ nfsd4_shutdown_copy(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -2475,7 +2508,8 @@ static bool client_has_state(struct nfs4_client *clp)
|| !list_empty(&clp->cl_lo_states)
#endif
|| !list_empty(&clp->cl_delegations)
- || !list_empty(&clp->cl_sessions);
+ || !list_empty(&clp->cl_sessions)
+ || !list_empty(&clp->async_copies);
}
__be32
@@ -4364,7 +4398,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
if (!fl)
- goto out_stid;
+ goto out_clnt_odstate;
status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
if (fl)
@@ -4389,7 +4423,6 @@ out_unlock:
vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
out_clnt_odstate:
put_clnt_odstate(dp->dl_clnt_odstate);
-out_stid:
nfs4_put_stid(&dp->dl_stid);
out_delegees:
put_deleg_file(fp);
@@ -7161,6 +7194,8 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
spin_lock_init(&nn->client_lock);
+ spin_lock_init(&nn->s2s_cp_lock);
+ idr_init(&nn->s2s_cp_stateids);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 418fa9c78186..3de42a729093 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1768,6 +1768,13 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
}
static __be32
+nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
+ struct nfsd4_offload_status *os)
+{
+ return nfsd4_decode_stateid(argp, &os->stateid);
+}
+
+static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
@@ -1873,8 +1880,8 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_offload_status,
+ [OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_offload_status,
[OP_READ_PLUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek,
[OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -4224,15 +4231,27 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
#endif /* CONFIG_NFSD_PNFS */
static __be32
-nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
+nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
+ struct nfsd42_write_res *write, bool sync)
{
__be32 *p;
+ p = xdr_reserve_space(&resp->xdr, 4);
+ if (!p)
+ return nfserr_resource;
- p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
+ if (sync)
+ *p++ = cpu_to_be32(0);
+ else {
+ __be32 nfserr;
+ *p++ = cpu_to_be32(1);
+ nfserr = nfsd4_encode_stateid(&resp->xdr, &write->cb_stateid);
+ if (nfserr)
+ return nfserr;
+ }
+ p = xdr_reserve_space(&resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
- *p++ = cpu_to_be32(0);
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
@@ -4246,7 +4265,8 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
{
__be32 *p;
- nfserr = nfsd42_encode_write_res(resp, &copy->cp_res);
+ nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
+ copy->cp_synchronous);
if (nfserr)
return nfserr;
@@ -4257,6 +4277,22 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
}
static __be32
+nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_offload_status *os)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 8 + 4);
+ if (!p)
+ return nfserr_resource;
+ p = xdr_encode_hyper(p, os->count);
+ *p++ = cpu_to_be32(0);
+
+ return nfserr;
+}
+
+static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
{
@@ -4359,7 +4395,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
[OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTSTATS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_CANCEL] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_offload_status,
[OP_READ_PLUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek,
[OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index dbdeb9d6af03..e2fe0e9ce0df 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -30,6 +30,7 @@
#define TARGET_BUCKET_SIZE 64
struct nfsd_drc_bucket {
+ struct rb_root rb_head;
struct list_head lru_head;
spinlock_t cache_lock;
};
@@ -121,7 +122,7 @@ nfsd_cache_hash(__be32 xid)
}
static struct svc_cacherep *
-nfsd_reply_cache_alloc(void)
+nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
{
struct svc_cacherep *rp;
@@ -129,21 +130,35 @@ nfsd_reply_cache_alloc(void)
if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
+ RB_CLEAR_NODE(&rp->c_node);
INIT_LIST_HEAD(&rp->c_lru);
+
+ memset(&rp->c_key, 0, sizeof(rp->c_key));
+ rp->c_key.k_xid = rqstp->rq_xid;
+ rp->c_key.k_proc = rqstp->rq_proc;
+ rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
+ rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
+ rp->c_key.k_prot = rqstp->rq_prot;
+ rp->c_key.k_vers = rqstp->rq_vers;
+ rp->c_key.k_len = rqstp->rq_arg.len;
+ rp->c_key.k_csum = csum;
}
return rp;
}
static void
-nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
drc_mem_usage -= rp->c_replvec.iov_len;
kfree(rp->c_replvec.iov_base);
}
- list_del(&rp->c_lru);
- atomic_dec(&num_drc_entries);
- drc_mem_usage -= sizeof(*rp);
+ if (rp->c_state != RC_UNUSED) {
+ rb_erase(&rp->c_node, &b->rb_head);
+ list_del(&rp->c_lru);
+ atomic_dec(&num_drc_entries);
+ drc_mem_usage -= sizeof(*rp);
+ }
kmem_cache_free(drc_slab, rp);
}
@@ -151,7 +166,7 @@ static void
nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
{
spin_lock(&b->cache_lock);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
spin_unlock(&b->cache_lock);
}
@@ -207,7 +222,7 @@ void nfsd_reply_cache_shutdown(void)
struct list_head *head = &drc_hashtbl[i].lru_head;
while (!list_empty(head)) {
rp = list_first_entry(head, struct svc_cacherep, c_lru);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(&drc_hashtbl[i], rp);
}
}
@@ -246,7 +261,7 @@ prune_bucket(struct nfsd_drc_bucket *b)
if (atomic_read(&num_drc_entries) <= max_drc_entries &&
time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
break;
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
freed++;
}
return freed;
@@ -318,51 +333,48 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
return csum;
}
-static bool
-nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
+static int
+nfsd_cache_key_cmp(const struct svc_cacherep *key, const struct svc_cacherep *rp)
{
- /* Check RPC XID first */
- if (rqstp->rq_xid != rp->c_xid)
- return false;
- /* compare checksum of NFS data */
- if (csum != rp->c_csum) {
+ if (key->c_key.k_xid == rp->c_key.k_xid &&
+ key->c_key.k_csum != rp->c_key.k_csum)
++payload_misses;
- return false;
- }
- /* Other discriminators */
- if (rqstp->rq_proc != rp->c_proc ||
- rqstp->rq_prot != rp->c_prot ||
- rqstp->rq_vers != rp->c_vers ||
- rqstp->rq_arg.len != rp->c_len ||
- !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
- rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
- return false;
-
- return true;
+ return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
/*
* Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or
- * NULL on failure.
+ * inserts an empty key on failure.
*/
static struct svc_cacherep *
-nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
- __wsum csum)
+nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key)
{
- struct svc_cacherep *rp, *ret = NULL;
- struct list_head *rh = &b->lru_head;
+ struct svc_cacherep *rp, *ret = key;
+ struct rb_node **p = &b->rb_head.rb_node,
+ *parent = NULL;
unsigned int entries = 0;
+ int cmp;
- list_for_each_entry(rp, rh, c_lru) {
+ while (*p != NULL) {
++entries;
- if (nfsd_cache_match(rqstp, csum, rp)) {
+ parent = *p;
+ rp = rb_entry(parent, struct svc_cacherep, c_node);
+
+ cmp = nfsd_cache_key_cmp(key, rp);
+ if (cmp < 0)
+ p = &parent->rb_left;
+ else if (cmp > 0)
+ p = &parent->rb_right;
+ else {
ret = rp;
- break;
+ goto out;
}
}
-
+ rb_link_node(&key->c_node, parent, p);
+ rb_insert_color(&key->c_node, &b->rb_head);
+out:
/* tally hash chain length stats */
if (entries > longest_chain) {
longest_chain = entries;
@@ -374,6 +386,7 @@ nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
atomic_read(&num_drc_entries));
}
+ lru_put_end(b, ret);
return ret;
}
@@ -389,9 +402,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct svc_cacherep *rp, *found;
__be32 xid = rqstp->rq_xid;
- u32 proto = rqstp->rq_prot,
- vers = rqstp->rq_vers,
- proc = rqstp->rq_proc;
__wsum csum;
u32 hash = nfsd_cache_hash(xid);
struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
@@ -410,60 +420,38 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* Since the common case is a cache miss followed by an insert,
* preallocate an entry.
*/
- rp = nfsd_reply_cache_alloc();
- spin_lock(&b->cache_lock);
- if (likely(rp)) {
- atomic_inc(&num_drc_entries);
- drc_mem_usage += sizeof(*rp);
+ rp = nfsd_reply_cache_alloc(rqstp, csum);
+ if (!rp) {
+ dprintk("nfsd: unable to allocate DRC entry!\n");
+ return rtn;
}
- /* go ahead and prune the cache */
- prune_bucket(b);
-
- found = nfsd_cache_search(b, rqstp, csum);
- if (found) {
- if (likely(rp))
- nfsd_reply_cache_free_locked(rp);
+ spin_lock(&b->cache_lock);
+ found = nfsd_cache_insert(b, rp);
+ if (found != rp) {
+ nfsd_reply_cache_free_locked(NULL, rp);
rp = found;
goto found_entry;
}
- if (!rp) {
- dprintk("nfsd: unable to allocate DRC entry!\n");
- goto out;
- }
-
nfsdstats.rcmisses++;
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
- rp->c_xid = xid;
- rp->c_proc = proc;
- rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
- rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
- rp->c_prot = proto;
- rp->c_vers = vers;
- rp->c_len = rqstp->rq_arg.len;
- rp->c_csum = csum;
- lru_put_end(b, rp);
+ atomic_inc(&num_drc_entries);
+ drc_mem_usage += sizeof(*rp);
- /* release any buffer */
- if (rp->c_type == RC_REPLBUFF) {
- drc_mem_usage -= rp->c_replvec.iov_len;
- kfree(rp->c_replvec.iov_base);
- rp->c_replvec.iov_base = NULL;
- }
- rp->c_type = RC_NOCACHE;
+ /* go ahead and prune the cache */
+ prune_bucket(b);
out:
spin_unlock(&b->cache_lock);
return rtn;
found_entry:
- nfsdstats.rchits++;
/* We found a matching entry which is either in progress or done. */
- lru_put_end(b, rp);
-
+ nfsdstats.rchits++;
rtn = RC_DROPIT;
+
/* Request being processed */
if (rp->c_state == RC_INPROG)
goto out;
@@ -489,7 +477,7 @@ found_entry:
break;
default:
printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
}
goto out;
@@ -524,7 +512,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
if (!rp)
return;
- hash = nfsd_cache_hash(rp->c_xid);
+ hash = nfsd_cache_hash(rp->c_key.k_xid);
b = &drc_hashtbl[hash];
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7fb9f7c667b1..6384c9b94898 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1242,6 +1242,7 @@ static __net_init int nfsd_init_net(struct net *net)
nn->somebody_reclaimed = false;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
+ nn->s2s_cp_cl_id = nn->clientid_counter++;
atomic_set(&nn->ntf_refcnt, 0);
init_waitqueue_head(&nn->ntf_wq);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 0b15dac7e609..6aacb325b6a0 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -355,6 +355,8 @@ struct nfs4_client {
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
struct net *net;
+ struct list_head async_copies; /* list of async copies */
+ spinlock_t async_lock; /* lock for async copies */
};
/* struct nfs4_client_reset
@@ -573,6 +575,7 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
NFSPROC4_CLNT_CB_LAYOUT,
+ NFSPROC4_CLNT_CB_OFFLOAD,
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
};
@@ -599,6 +602,7 @@ struct nfsd4_blocked_lock {
struct nfsd4_compound_state;
struct nfsd_net;
+struct nfsd4_copy;
extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
@@ -608,6 +612,8 @@ __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
struct nfs4_stid **s, struct nfsd_net *nn);
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *));
+int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
+void nfs4_free_cp_state(struct nfsd4_copy *copy);
void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
@@ -626,6 +632,7 @@ extern void nfsd4_run_cb(struct nfsd4_callback *cb);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
+extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
struct nfsd_net *nn);
@@ -633,6 +640,9 @@ extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
struct nfs4_file *find_file(struct knfsd_fh *fh);
void put_nfs4_file(struct nfs4_file *fi);
+extern void nfs4_put_copy(struct nfsd4_copy *copy);
+extern struct nfsd4_copy *
+find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
static inline void get_nfs4_file(struct nfs4_file *fi)
{
refcount_inc(&fi->fi_ref);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8b90f5480904..fb28be653014 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1276,7 +1276,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild = NULL;
- struct inode *dirp;
__be32 err;
int host_err;
@@ -1288,7 +1287,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
return err;
dentry = fhp->fh_dentry;
- dirp = d_inode(dentry);
host_err = fh_want_write(fhp);
if (host_err)
@@ -1409,6 +1407,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*created = 1;
break;
}
+ /* fall through */
case NFS4_CREATE_EXCLUSIVE4_1:
if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
&& d_inode(dchild)->i_atime.tv_sec == v_atime
@@ -1417,7 +1416,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*created = 1;
goto set_attr;
}
- /* fallthru */
+ /* fall through */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 17c453a7999c..feeb6d4bdffd 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -511,6 +511,7 @@ struct nfsd42_write_res {
u64 wr_bytes_written;
u32 wr_stable_how;
nfs4_verifier wr_verifier;
+ stateid_t cb_stateid;
};
struct nfsd4_copy {
@@ -526,6 +527,23 @@ struct nfsd4_copy {
/* response */
struct nfsd42_write_res cp_res;
+
+ /* for cb_offload */
+ struct nfsd4_callback cp_cb;
+ __be32 nfserr;
+ struct knfsd_fh fh;
+
+ struct nfs4_client *cp_clp;
+
+ struct file *file_src;
+ struct file *file_dst;
+
+ stateid_t cp_stateid;
+
+ struct list_head copies;
+ struct task_struct *copy_task;
+ refcount_t refcount;
+ bool stopped;
};
struct nfsd4_seek {
@@ -539,6 +557,15 @@ struct nfsd4_seek {
loff_t seek_pos;
};
+struct nfsd4_offload_status {
+ /* request */
+ stateid_t stateid;
+
+ /* response */
+ u64 count;
+ u32 status;
+};
+
struct nfsd4_op {
int opnum;
const struct nfsd4_operation * opdesc;
@@ -597,6 +624,7 @@ struct nfsd4_op {
struct nfsd4_fallocate deallocate;
struct nfsd4_clone clone;
struct nfsd4_copy copy;
+ struct nfsd4_offload_status offload_status;
struct nfsd4_seek seek;
} u;
struct nfs4_replay * replay;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index 517239af0302..547cf07cf4e0 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -38,3 +38,13 @@
#define NFS4_dec_cb_notify_lock_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+#define enc_cb_offload_info_sz (1 + 1 + 2 + 1 + \
+ XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define NFS4_enc_cb_offload_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ enc_nfs4_fh_sz + \
+ enc_stateid_sz + \
+ enc_cb_offload_info_sz)
+#define NFS4_dec_cb_offload_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
+ op_dec_sz)
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index ebb24a314f43..de99db518571 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -168,24 +168,18 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_SHIFT) {
- lock_page(obh->b_page);
- /*
- * We cannot call radix_tree_preload for the kernels older
- * than 2.6.23, because it is not exported for modules.
- */
+ struct page *opage = obh->b_page;
+ lock_page(opage);
retry:
- err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
- if (err)
- goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
- if (unlikely(oldkey != obh->b_page->index))
- NILFS_PAGE_BUG(obh->b_page,
+ if (unlikely(oldkey != opage->index))
+ NILFS_PAGE_BUG(opage,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
xa_lock_irq(&btnc->i_pages);
- err = radix_tree_insert(&btnc->i_pages, newkey, obh->b_page);
+ err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS);
xa_unlock_irq(&btnc->i_pages);
/*
* Note: page->index will not change to newkey until
@@ -193,7 +187,6 @@ retry:
* To protect the page in intermediate state, the page lock
* is held.
*/
- radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
@@ -203,7 +196,7 @@ retry:
if (!err)
goto retry;
/* fallback to copy mode */
- unlock_page(obh->b_page);
+ unlock_page(opage);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
@@ -243,9 +236,8 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
mark_buffer_dirty(obh);
xa_lock_irq(&btnc->i_pages);
- radix_tree_delete(&btnc->i_pages, oldkey);
- radix_tree_tag_set(&btnc->i_pages, newkey,
- PAGECACHE_TAG_DIRTY);
+ __xa_erase(&btnc->i_pages, oldkey);
+ __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&btnc->i_pages);
opage->index = obh->b_blocknr = newkey;
@@ -275,7 +267,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
if (nbh == NULL) { /* blocksize == pagesize */
xa_lock_irq(&btnc->i_pages);
- radix_tree_delete(&btnc->i_pages, newkey);
+ __xa_erase(&btnc->i_pages, newkey);
xa_unlock_irq(&btnc->i_pages);
unlock_page(ctxt->bh->b_page);
} else
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 329a056b73b1..d7fc8d369d89 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -289,7 +289,7 @@ repeat:
* @dmap: destination page cache
* @smap: source page cache
*
- * No pages must no be added to the cache during this process.
+ * No pages must be added to the cache during this process.
* This must be ensured by the caller.
*/
void nilfs_copy_back_pages(struct address_space *dmap,
@@ -298,7 +298,6 @@ void nilfs_copy_back_pages(struct address_space *dmap,
struct pagevec pvec;
unsigned int i, n;
pgoff_t index = 0;
- int err;
pagevec_init(&pvec);
repeat:
@@ -313,35 +312,34 @@ repeat:
lock_page(page);
dpage = find_lock_page(dmap, offset);
if (dpage) {
- /* override existing page on the destination cache */
+ /* overwrite existing page in the destination cache */
WARN_ON(PageDirty(dpage));
nilfs_copy_page(dpage, page, 0);
unlock_page(dpage);
put_page(dpage);
+ /* Do we not need to remove page from smap here? */
} else {
- struct page *page2;
+ struct page *p;
/* move the page to the destination cache */
xa_lock_irq(&smap->i_pages);
- page2 = radix_tree_delete(&smap->i_pages, offset);
- WARN_ON(page2 != page);
-
+ p = __xa_erase(&smap->i_pages, offset);
+ WARN_ON(page != p);
smap->nrpages--;
xa_unlock_irq(&smap->i_pages);
xa_lock_irq(&dmap->i_pages);
- err = radix_tree_insert(&dmap->i_pages, offset, page);
- if (unlikely(err < 0)) {
- WARN_ON(err == -EEXIST);
+ p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
+ if (unlikely(p)) {
+ /* Probably -ENOMEM */
page->mapping = NULL;
- put_page(page); /* for cache */
+ put_page(page);
} else {
page->mapping = dmap;
dmap->nrpages++;
if (PageDirty(page))
- radix_tree_tag_set(&dmap->i_pages,
- offset,
- PAGECACHE_TAG_DIRTY);
+ __xa_set_mark(&dmap->i_pages, offset,
+ PAGECACHE_TAG_DIRTY);
}
xa_unlock_irq(&dmap->i_pages);
}
@@ -467,8 +465,7 @@ int __nilfs_clear_page_dirty(struct page *page)
if (mapping) {
xa_lock_irq(&mapping->i_pages);
if (test_bit(PG_dirty, &page->flags)) {
- radix_tree_tag_clear(&mapping->i_pages,
- page_index(page),
+ __xa_clear_mark(&mapping->i_pages, page_index(page),
PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&mapping->i_pages);
return clear_page_dirty_for_io(page);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 94b52157bf8d..5769cf3ff035 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -25,7 +25,7 @@ static bool should_merge(struct fsnotify_event *old_fsn,
old = FANOTIFY_E(old_fsn);
new = FANOTIFY_E(new_fsn);
- if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
+ if (old_fsn->inode == new_fsn->inode && old->pid == new->pid &&
old->path.mnt == new->path.mnt &&
old->path.dentry == new->path.dentry)
return true;
@@ -131,8 +131,8 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
!(marks_mask & FS_ISDIR & ~marks_ignored_mask))
return false;
- if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
- ~marks_ignored_mask)
+ if (event_mask & FANOTIFY_OUTGOING_EVENTS &
+ marks_mask & ~marks_ignored_mask)
return true;
return false;
@@ -171,7 +171,10 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
goto out;
init: __maybe_unused
fsnotify_init_event(&event->fse, inode, mask);
- event->tgid = get_pid(task_tgid(current));
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
+ event->pid = get_pid(task_pid(current));
+ else
+ event->pid = get_pid(task_tgid(current));
if (path) {
event->path = *path;
path_get(&event->path);
@@ -205,6 +208,8 @@ static int fanotify_handle_event(struct fsnotify_group *group,
BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 10);
+
if (!fanotify_should_send_event(iter_info, mask, data, data_type))
return 0;
@@ -236,7 +241,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
if (ret) {
/* Permission events shouldn't be merged */
- BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
+ BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
/* Our event wasn't used in the end. Free it. */
fsnotify_destroy_event(group, fsn_event);
@@ -268,7 +273,7 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event)
event = FANOTIFY_E(fsn_event);
path_put(&event->path);
- put_pid(event->tgid);
+ put_pid(event->pid);
if (fanotify_is_perm_event(fsn_event->mask)) {
kmem_cache_free(fanotify_perm_event_cachep,
FANOTIFY_PE(fsn_event));
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 8609ba06f474..ea05b8a401e7 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -19,7 +19,7 @@ struct fanotify_event_info {
* during this object's lifetime
*/
struct path path;
- struct pid *tgid;
+ struct pid *pid;
};
/*
@@ -44,7 +44,7 @@ FANOTIFY_PE(struct fsnotify_event *fse)
static inline bool fanotify_is_perm_event(u32 mask)
{
return IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS) &&
- mask & FAN_ALL_PERM_EVENTS;
+ mask & FANOTIFY_PERM_EVENTS;
}
static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 69054886915b..e03be5071362 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -131,8 +131,8 @@ static int fill_event_metadata(struct fsnotify_group *group,
metadata->metadata_len = FAN_EVENT_METADATA_LEN;
metadata->vers = FANOTIFY_METADATA_VERSION;
metadata->reserved = 0;
- metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
- metadata->pid = pid_vnr(event->tgid);
+ metadata->mask = fsn_event->mask & FANOTIFY_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->pid);
if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
metadata->fd = FAN_NOFD;
else {
@@ -191,7 +191,7 @@ static int process_access_response(struct fsnotify_group *group,
if (fd < 0)
return -EINVAL;
- if ((response & FAN_AUDIT) && !group->fanotify_data.audit)
+ if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT))
return -EINVAL;
event = dequeue_event(group, fd);
@@ -395,7 +395,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
*/
while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group);
- if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
+ if (!(fsn_event->mask & FANOTIFY_PERM_EVENTS)) {
spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, fsn_event);
spin_lock(&group->notification_lock);
@@ -506,18 +506,10 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
- __u32 tmask = fsn_mark->mask & ~mask;
-
- if (flags & FAN_MARK_ONDIR)
- tmask &= ~FAN_ONDIR;
-
oldmask = fsn_mark->mask;
- fsn_mark->mask = tmask;
+ fsn_mark->mask &= ~mask;
} else {
- __u32 tmask = fsn_mark->ignored_mask & ~mask;
- if (flags & FAN_MARK_ONDIR)
- tmask &= ~FAN_ONDIR;
- fsn_mark->ignored_mask = tmask;
+ fsn_mark->ignored_mask &= ~mask;
}
*destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
spin_unlock(&fsn_mark->lock);
@@ -563,6 +555,13 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
mask, flags);
}
+static int fanotify_remove_sb_mark(struct fsnotify_group *group,
+ struct super_block *sb, __u32 mask,
+ unsigned int flags)
+{
+ return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, flags);
+}
+
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
@@ -579,19 +578,10 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
- __u32 tmask = fsn_mark->mask | mask;
-
- if (flags & FAN_MARK_ONDIR)
- tmask |= FAN_ONDIR;
-
oldmask = fsn_mark->mask;
- fsn_mark->mask = tmask;
+ fsn_mark->mask |= mask;
} else {
- __u32 tmask = fsn_mark->ignored_mask | mask;
- if (flags & FAN_MARK_ONDIR)
- tmask |= FAN_ONDIR;
-
- fsn_mark->ignored_mask = tmask;
+ fsn_mark->ignored_mask |= mask;
if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
}
@@ -658,6 +648,14 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags);
}
+static int fanotify_add_sb_mark(struct fsnotify_group *group,
+ struct super_block *sb, __u32 mask,
+ unsigned int flags)
+{
+ return fanotify_add_mark(group, &sb->s_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_SB, mask, flags);
+}
+
static int fanotify_add_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
@@ -686,16 +684,16 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
struct user_struct *user;
struct fanotify_event_info *oevent;
- pr_debug("%s: flags=%d event_f_flags=%d\n",
- __func__, flags, event_f_flags);
+ pr_debug("%s: flags=%x event_f_flags=%x\n",
+ __func__, flags, event_f_flags);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
#ifdef CONFIG_AUDITSYSCALL
- if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT))
+ if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT))
#else
- if (flags & ~FAN_ALL_INIT_FLAGS)
+ if (flags & ~FANOTIFY_INIT_FLAGS)
#endif
return -EINVAL;
@@ -731,6 +729,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
}
group->fanotify_data.user = user;
+ group->fanotify_data.flags = flags;
atomic_inc(&user->fanotify_listeners);
group->memcg = get_mem_cgroup_from_mm(current->mm);
@@ -746,7 +745,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.f_flags = event_f_flags;
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
- switch (flags & FAN_ALL_CLASS_BITS) {
+ switch (flags & FANOTIFY_CLASS_BITS) {
case FAN_CLASS_NOTIF:
group->priority = FS_PRIO_0;
break;
@@ -783,7 +782,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
fd = -EPERM;
if (!capable(CAP_AUDIT_WRITE))
goto out_destroy_group;
- group->fanotify_data.audit = true;
}
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
@@ -805,7 +803,8 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
struct fsnotify_group *group;
struct fd f;
struct path path;
- u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
+ u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
+ unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
int ret;
pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
@@ -815,8 +814,18 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (mask & ((__u64)0xffffffff << 32))
return -EINVAL;
- if (flags & ~FAN_ALL_MARK_FLAGS)
+ if (flags & ~FANOTIFY_MARK_FLAGS)
+ return -EINVAL;
+
+ switch (mark_type) {
+ case FAN_MARK_INODE:
+ case FAN_MARK_MOUNT:
+ case FAN_MARK_FILESYSTEM:
+ break;
+ default:
return -EINVAL;
+ }
+
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
case FAN_MARK_ADD: /* fallthrough */
case FAN_MARK_REMOVE:
@@ -824,20 +833,15 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
return -EINVAL;
break;
case FAN_MARK_FLUSH:
- if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
+ if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH))
return -EINVAL;
break;
default:
return -EINVAL;
}
- if (mask & FAN_ONDIR) {
- flags |= FAN_MARK_ONDIR;
- mask &= ~FAN_ONDIR;
- }
-
if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
- valid_mask |= FAN_ALL_PERM_EVENTS;
+ valid_mask |= FANOTIFY_PERM_EVENTS;
if (mask & ~valid_mask)
return -EINVAL;
@@ -857,14 +861,16 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
* allowed to set permissions events.
*/
ret = -EINVAL;
- if (mask & FAN_ALL_PERM_EVENTS &&
+ if (mask & FANOTIFY_PERM_EVENTS &&
group->priority == FS_PRIO_0)
goto fput_and_out;
if (flags & FAN_MARK_FLUSH) {
ret = 0;
- if (flags & FAN_MARK_MOUNT)
+ if (mark_type == FAN_MARK_MOUNT)
fsnotify_clear_vfsmount_marks_by_group(group);
+ else if (mark_type == FAN_MARK_FILESYSTEM)
+ fsnotify_clear_sb_marks_by_group(group);
else
fsnotify_clear_inode_marks_by_group(group);
goto fput_and_out;
@@ -875,7 +881,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
- if (!(flags & FAN_MARK_MOUNT))
+ if (mark_type == FAN_MARK_INODE)
inode = path.dentry->d_inode;
else
mnt = path.mnt;
@@ -883,14 +889,18 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
/* create/update an inode mark */
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
case FAN_MARK_ADD:
- if (flags & FAN_MARK_MOUNT)
+ if (mark_type == FAN_MARK_MOUNT)
ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
+ else if (mark_type == FAN_MARK_FILESYSTEM)
+ ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, flags);
else
ret = fanotify_add_inode_mark(group, inode, mask, flags);
break;
case FAN_MARK_REMOVE:
- if (flags & FAN_MARK_MOUNT)
+ if (mark_type == FAN_MARK_MOUNT)
ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
+ else if (mark_type == FAN_MARK_FILESYSTEM)
+ ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, flags);
else
ret = fanotify_remove_inode_mark(group, inode, mask, flags);
break;
@@ -934,6 +944,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
*/
static int __init fanotify_user_setup(void)
{
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 7);
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
+
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
SLAB_PANIC|SLAB_ACCOUNT);
fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 86fcf5814279..348a184bcdda 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -131,37 +131,20 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n",
mnt->mnt_id, mflags, mark->mask, mark->ignored_mask);
+ } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_SB) {
+ struct super_block *sb = fsnotify_conn_sb(mark->connector);
+
+ seq_printf(m, "fanotify sdev:%x mflags:%x mask:%x ignored_mask:%x\n",
+ sb->s_dev, mflags, mark->mask, mark->ignored_mask);
}
}
void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
{
struct fsnotify_group *group = f->private_data;
- unsigned int flags = 0;
-
- switch (group->priority) {
- case FS_PRIO_0:
- flags |= FAN_CLASS_NOTIF;
- break;
- case FS_PRIO_1:
- flags |= FAN_CLASS_CONTENT;
- break;
- case FS_PRIO_2:
- flags |= FAN_CLASS_PRE_CONTENT;
- break;
- }
-
- if (group->max_events == UINT_MAX)
- flags |= FAN_UNLIMITED_QUEUE;
-
- if (group->fanotify_data.max_marks == UINT_MAX)
- flags |= FAN_UNLIMITED_MARKS;
-
- if (group->fanotify_data.audit)
- flags |= FAN_ENABLE_AUDIT;
seq_printf(m, "fanotify flags:%x event-flags:%x\n",
- flags, group->fanotify_data.f_flags);
+ group->fanotify_data.flags, group->fanotify_data.f_flags);
show_fdinfo(m, f, fanotify_fdinfo);
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ababdbfab537..2172ba516c61 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -48,7 +48,7 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
* Called during unmount with no locks held, so needs to be safe against
* concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
*/
-void fsnotify_unmount_inodes(struct super_block *sb)
+static void fsnotify_unmount_inodes(struct super_block *sb)
{
struct inode *inode, *iput_inode = NULL;
@@ -96,6 +96,15 @@ void fsnotify_unmount_inodes(struct super_block *sb)
if (iput_inode)
iput(iput_inode);
+ /* Wait for outstanding inode references from connectors */
+ wait_var_event(&sb->s_fsnotify_inode_refs,
+ !atomic_long_read(&sb->s_fsnotify_inode_refs));
+}
+
+void fsnotify_sb_delete(struct super_block *sb)
+{
+ fsnotify_unmount_inodes(sb);
+ fsnotify_clear_marks_by_sb(sb);
}
/*
@@ -190,7 +199,7 @@ static int send_to_group(struct inode *to_tell,
struct fsnotify_iter_info *iter_info)
{
struct fsnotify_group *group = NULL;
- __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
__u32 marks_mask = 0;
__u32 marks_ignored_mask = 0;
struct fsnotify_mark *mark;
@@ -319,15 +328,17 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{
struct fsnotify_iter_info iter_info = {};
- struct mount *mnt;
+ struct super_block *sb = NULL;
+ struct mount *mnt = NULL;
+ __u32 mnt_or_sb_mask = 0;
int ret = 0;
- /* global tests shouldn't care about events on child only the specific event */
- __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
- if (data_is == FSNOTIFY_EVENT_PATH)
+ if (data_is == FSNOTIFY_EVENT_PATH) {
mnt = real_mount(((const struct path *)data)->mnt);
- else
- mnt = NULL;
+ sb = mnt->mnt.mnt_sb;
+ mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
+ }
/*
* Optimization: srcu_read_lock() has a memory barrier which can
@@ -337,16 +348,15 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
* need SRCU to keep them "alive".
*/
if (!to_tell->i_fsnotify_marks &&
- (!mnt || !mnt->mnt_fsnotify_marks))
+ (!mnt || (!mnt->mnt_fsnotify_marks && !sb->s_fsnotify_marks)))
return 0;
/*
* if this is a modify event we may need to clear the ignored masks
- * otherwise return if neither the inode nor the vfsmount care about
+ * otherwise return if neither the inode nor the vfsmount/sb care about
* this type of event.
*/
if (!(mask & FS_MODIFY) &&
- !(test_mask & to_tell->i_fsnotify_mask) &&
- !(mnt && test_mask & mnt->mnt_fsnotify_mask))
+ !(test_mask & (to_tell->i_fsnotify_mask | mnt_or_sb_mask)))
return 0;
iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
@@ -356,11 +366,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
if (mnt) {
iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
+ iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
+ fsnotify_first_mark(&sb->s_fsnotify_marks);
}
/*
- * We need to merge inode & vfsmount mark lists so that inode mark
- * ignore masks are properly reflected for mount mark notifications.
+ * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
+ * ignore masks are properly reflected for mount/sb mark notifications.
* That's why this traversal is so complicated...
*/
while (fsnotify_iter_select_report_types(&iter_info)) {
@@ -386,7 +398,7 @@ static __init int fsnotify_init(void)
{
int ret;
- BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 7902653dd577..5a00121fb219 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -21,6 +21,12 @@ static inline struct mount *fsnotify_conn_mount(
return container_of(conn->obj, struct mount, mnt_fsnotify_marks);
}
+static inline struct super_block *fsnotify_conn_sb(
+ struct fsnotify_mark_connector *conn)
+{
+ return container_of(conn->obj, struct super_block, s_fsnotify_marks);
+}
+
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
@@ -43,6 +49,11 @@ static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{
fsnotify_destroy_marks(&real_mount(mnt)->mnt_fsnotify_marks);
}
+/* run the list of all marks associated with sb and destroy them */
+static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
+{
+ fsnotify_destroy_marks(&sb->s_fsnotify_marks);
+}
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ac6978d3208c..105576daca4a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -815,7 +815,7 @@ static int __init inotify_user_setup(void)
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
- BUG_ON(hweight32(ALL_INOTIFY_BITS) != 22);
+ BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
SLAB_PANIC|SLAB_ACCOUNT);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 59cdb27826de..d2dd16cb5989 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -115,6 +115,8 @@ static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
return &fsnotify_conn_inode(conn)->i_fsnotify_mask;
else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
+ else if (conn->type == FSNOTIFY_OBJ_TYPE_SB)
+ return &fsnotify_conn_sb(conn)->s_fsnotify_mask;
return NULL;
}
@@ -179,19 +181,24 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
}
}
-static struct inode *fsnotify_detach_connector_from_object(
- struct fsnotify_mark_connector *conn)
+static void *fsnotify_detach_connector_from_object(
+ struct fsnotify_mark_connector *conn,
+ unsigned int *type)
{
struct inode *inode = NULL;
+ *type = conn->type;
if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED)
return NULL;
if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
inode = fsnotify_conn_inode(conn);
inode->i_fsnotify_mask = 0;
+ atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
} else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
+ } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
+ fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
}
rcu_assign_pointer(*(conn->obj), NULL);
@@ -211,10 +218,29 @@ static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
fsnotify_put_group(group);
}
+/* Drop object reference originally held by a connector */
+static void fsnotify_drop_object(unsigned int type, void *objp)
+{
+ struct inode *inode;
+ struct super_block *sb;
+
+ if (!objp)
+ return;
+ /* Currently only inode references are passed to be dropped */
+ if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
+ return;
+ inode = objp;
+ sb = inode->i_sb;
+ iput(inode);
+ if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
+ wake_up_var(&sb->s_fsnotify_inode_refs);
+}
+
void fsnotify_put_mark(struct fsnotify_mark *mark)
{
struct fsnotify_mark_connector *conn;
- struct inode *inode = NULL;
+ void *objp = NULL;
+ unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
bool free_conn = false;
/* Catch marks that were actually never attached to object */
@@ -234,7 +260,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
conn = mark->connector;
hlist_del_init_rcu(&mark->obj_list);
if (hlist_empty(&conn->list)) {
- inode = fsnotify_detach_connector_from_object(conn);
+ objp = fsnotify_detach_connector_from_object(conn, &type);
free_conn = true;
} else {
__fsnotify_recalc_mask(conn);
@@ -242,7 +268,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
mark->connector = NULL;
spin_unlock(&conn->lock);
- iput(inode);
+ fsnotify_drop_object(type, objp);
if (free_conn) {
spin_lock(&destroy_lock);
@@ -709,7 +735,8 @@ void fsnotify_destroy_marks(fsnotify_connp_t *connp)
{
struct fsnotify_mark_connector *conn;
struct fsnotify_mark *mark, *old_mark = NULL;
- struct inode *inode;
+ void *objp;
+ unsigned int type;
conn = fsnotify_grab_connector(connp);
if (!conn)
@@ -735,11 +762,11 @@ void fsnotify_destroy_marks(fsnotify_connp_t *connp)
* mark references get dropped. It would lead to strange results such
* as delaying inode deletion or blocking unmount.
*/
- inode = fsnotify_detach_connector_from_object(conn);
+ objp = fsnotify_detach_connector_from_object(conn, &type);
spin_unlock(&conn->lock);
if (old_mark)
fsnotify_put_mark(old_mark);
- iput(inode);
+ fsnotify_drop_object(type, objp);
}
/*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index a342f008e42f..d1cbb27808e2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5106,8 +5106,6 @@ int ocfs2_split_extent(handle_t *handle,
* rightmost extent list.
*/
if (path->p_tree_depth) {
- struct ocfs2_extent_block *eb;
-
ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
@@ -5115,8 +5113,6 @@ int ocfs2_split_extent(handle_t *handle,
mlog_errno(ret);
goto out;
}
-
- eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
}
if (rec->e_cpos == split_rec->e_cpos &&
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 302cd7caa4a7..da578ad4c08f 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1392,8 +1392,7 @@ retry:
unlock:
spin_unlock(&oi->ip_lock);
out:
- if (new)
- kfree(new);
+ kfree(new);
return ret;
}
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 9b984cae4c4e..1d6dc8422899 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
{
char *buf;
- buf = (char *) get_zeroed_page(GFP_NOFS);
+ buf = (char *) get_zeroed_page(GFP_ATOMIC);
if (buf) {
dump_mle(mle, buf, PAGE_SIZE - 1);
free_page((unsigned long)buf);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 838a06d4066a..074d5de17bb2 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -531,7 +531,7 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock);
/* don't shuffle secondary queues */
- if ((res->owner == dlm->node_num)) {
+ if (res->owner == dlm->node_num) {
if (res->state & (DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_BLOCK_DIRTY))
return;
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 7a5ee145c733..1114ef02e780 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4135,7 +4135,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
- struct ocfs2_refcount_block *rb;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_refcount_tree *ref_tree;
@@ -4162,7 +4161,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
mlog_errno(ret);
goto out;
}
- rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
&ref_tree->rf_ci, ref_root_bh,
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 10587413b20e..72d2ff17d27b 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -167,12 +167,16 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
error = __orangefs_set_acl(inode, default_acl,
ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
/* If mode of the inode was changed, then do a forcible ->setattr */
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 136a8bdc1d91..fe53381b26b1 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -405,7 +405,11 @@ struct inode *orangefs_iget(struct super_block *sb,
orangefs_test_inode,
orangefs_set_inode,
ref);
- if (!inode || !(inode->i_state & I_NEW))
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(inode->i_state & I_NEW))
return inode;
error = orangefs_inode_getattr(inode, 1, 1, STATX_ALL);
@@ -448,7 +452,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
inode = new_inode(sb);
if (!inode)
- return NULL;
+ return ERR_PTR(-ENOMEM);
orangefs_set_inode(inode, ref);
inode->i_ino = hash; /* needed for stat etc */
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 625b0580f9be..c8676c996249 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -58,7 +58,6 @@ static int orangefs_create(struct inode *dir,
goto out;
ref = new_op->downcall.resp.create.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -92,6 +91,7 @@ static int orangefs_create(struct inode *dir,
mark_inode_dirty_sync(dir);
ret = 0;
out:
+ op_release(new_op);
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: %pd: returning %d\n",
__func__,
@@ -157,7 +157,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
new_op->downcall.resp.lookup.refn.fs_id,
ret);
- if (ret >= 0) {
+ if (ret == 0) {
orangefs_set_timeout(dentry);
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
} else if (ret == -ENOENT) {
@@ -269,7 +269,6 @@ static int orangefs_symlink(struct inode *dir,
}
ref = new_op->downcall.resp.sym.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -307,6 +306,7 @@ static int orangefs_symlink(struct inode *dir,
mark_inode_dirty_sync(dir);
ret = 0;
out:
+ op_release(new_op);
return ret;
}
@@ -346,7 +346,6 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
}
ref = new_op->downcall.resp.mkdir.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -379,6 +378,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
out:
+ op_release(new_op);
return ret;
}
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index dd28079f518c..19739aaee675 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -323,7 +323,7 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj,
/* Can't do a service_operation if the client is not running... */
rc = is_daemon_in_service();
if (rc) {
- pr_info("%s: Client not running :%d:\n",
+ pr_info_ratelimited("%s: Client not running :%d:\n",
__func__,
is_daemon_in_service());
goto out;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 1cc797a08a5b..d6a3346e2672 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -395,7 +395,6 @@ struct ovl_copy_up_ctx {
struct dentry *destdir;
struct qstr destname;
struct dentry *workdir;
- bool tmpfile;
bool origin;
bool indexed;
bool metacopy;
@@ -440,63 +439,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
return err;
}
-static int ovl_install_temp(struct ovl_copy_up_ctx *c, struct dentry *temp,
- struct dentry **newdentry)
-{
- int err;
- struct dentry *upper;
- struct inode *udir = d_inode(c->destdir);
-
- upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
- if (IS_ERR(upper))
- return PTR_ERR(upper);
-
- if (c->tmpfile)
- err = ovl_do_link(temp, udir, upper);
- else
- err = ovl_do_rename(d_inode(c->workdir), temp, udir, upper, 0);
-
- if (!err)
- *newdentry = dget(c->tmpfile ? upper : temp);
- dput(upper);
-
- return err;
-}
-
-static struct dentry *ovl_get_tmpfile(struct ovl_copy_up_ctx *c)
-{
- int err;
- struct dentry *temp;
- const struct cred *old_creds = NULL;
- struct cred *new_creds = NULL;
- struct ovl_cattr cattr = {
- /* Can't properly set mode on creation because of the umask */
- .mode = c->stat.mode & S_IFMT,
- .rdev = c->stat.rdev,
- .link = c->link
- };
-
- err = security_inode_copy_up(c->dentry, &new_creds);
- temp = ERR_PTR(err);
- if (err < 0)
- goto out;
-
- if (new_creds)
- old_creds = override_creds(new_creds);
-
- if (c->tmpfile)
- temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
- else
- temp = ovl_create_temp(c->workdir, &cattr);
-out:
- if (new_creds) {
- revert_creds(old_creds);
- put_cred(new_creds);
- }
-
- return temp;
-}
-
static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
{
int err;
@@ -548,51 +490,148 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
return err;
}
-static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c)
+struct ovl_cu_creds {
+ const struct cred *old;
+ struct cred *new;
+};
+
+static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
+{
+ int err;
+
+ cc->old = cc->new = NULL;
+ err = security_inode_copy_up(dentry, &cc->new);
+ if (err < 0)
+ return err;
+
+ if (cc->new)
+ cc->old = override_creds(cc->new);
+
+ return 0;
+}
+
+static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
+{
+ if (cc->new) {
+ revert_creds(cc->old);
+ put_cred(cc->new);
+ }
+}
+
+/*
+ * Copyup using workdir to prepare temp file. Used when copying up directories,
+ * special files or when upper fs doesn't support O_TMPFILE.
+ */
+static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
{
- struct inode *udir = c->destdir->d_inode;
struct inode *inode;
- struct dentry *newdentry = NULL;
- struct dentry *temp;
+ struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
+ struct dentry *temp, *upper;
+ struct ovl_cu_creds cc;
int err;
+ struct ovl_cattr cattr = {
+ /* Can't properly set mode on creation because of the umask */
+ .mode = c->stat.mode & S_IFMT,
+ .rdev = c->stat.rdev,
+ .link = c->link
+ };
- temp = ovl_get_tmpfile(c);
+ err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+ if (err)
+ return err;
+
+ err = ovl_prep_cu_creds(c->dentry, &cc);
+ if (err)
+ goto unlock;
+
+ temp = ovl_create_temp(c->workdir, &cattr);
+ ovl_revert_cu_creds(&cc);
+
+ err = PTR_ERR(temp);
if (IS_ERR(temp))
- return PTR_ERR(temp);
+ goto unlock;
err = ovl_copy_up_inode(c, temp);
if (err)
- goto out;
+ goto cleanup;
if (S_ISDIR(c->stat.mode) && c->indexed) {
err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
if (err)
- goto out;
+ goto cleanup;
}
- if (c->tmpfile) {
- inode_lock_nested(udir, I_MUTEX_PARENT);
- err = ovl_install_temp(c, temp, &newdentry);
- inode_unlock(udir);
- } else {
- err = ovl_install_temp(c, temp, &newdentry);
- }
+ upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ err = PTR_ERR(upper);
+ if (IS_ERR(upper))
+ goto cleanup;
+
+ err = ovl_do_rename(wdir, temp, udir, upper, 0);
+ dput(upper);
if (err)
- goto out;
+ goto cleanup;
if (!c->metacopy)
ovl_set_upperdata(d_inode(c->dentry));
inode = d_inode(c->dentry);
- ovl_inode_update(inode, newdentry);
+ ovl_inode_update(inode, temp);
if (S_ISDIR(inode->i_mode))
ovl_set_flag(OVL_WHITEOUTS, inode);
+unlock:
+ unlock_rename(c->workdir, c->destdir);
-out:
- if (err && !c->tmpfile)
- ovl_cleanup(d_inode(c->workdir), temp);
- dput(temp);
return err;
+cleanup:
+ ovl_cleanup(wdir, temp);
+ dput(temp);
+ goto unlock;
+}
+
+/* Copyup using O_TMPFILE which does not require cross dir locking */
+static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
+{
+ struct inode *udir = d_inode(c->destdir);
+ struct dentry *temp, *upper;
+ struct ovl_cu_creds cc;
+ int err;
+
+ err = ovl_prep_cu_creds(c->dentry, &cc);
+ if (err)
+ return err;
+
+ temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
+ ovl_revert_cu_creds(&cc);
+
+ if (IS_ERR(temp))
+ return PTR_ERR(temp);
+
+ err = ovl_copy_up_inode(c, temp);
+ if (err)
+ goto out_dput;
+
+ inode_lock_nested(udir, I_MUTEX_PARENT);
+
+ upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ err = PTR_ERR(upper);
+ if (!IS_ERR(upper)) {
+ err = ovl_do_link(temp, udir, upper);
+ dput(upper);
+ }
+ inode_unlock(udir);
+
+ if (err)
+ goto out_dput;
+
+ if (!c->metacopy)
+ ovl_set_upperdata(d_inode(c->dentry));
+ ovl_inode_update(d_inode(c->dentry), temp);
+
+ return 0;
+
+out_dput:
+ dput(temp);
+ return err;
}
/*
@@ -646,18 +685,10 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
}
/* Should we copyup with O_TMPFILE or with workdir? */
- if (S_ISREG(c->stat.mode) && ofs->tmpfile) {
- c->tmpfile = true;
- err = ovl_copy_up_locked(c);
- } else {
- err = ovl_lock_rename_workdir(c->workdir, c->destdir);
- if (!err) {
- err = ovl_copy_up_locked(c);
- unlock_rename(c->workdir, c->destdir);
- }
- }
-
-
+ if (S_ISREG(c->stat.mode) && ofs->tmpfile)
+ err = ovl_copy_up_tmpfile(c);
+ else
+ err = ovl_copy_up_workdir(c);
if (err)
goto out;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 276914ae3c60..c6289147c787 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -414,13 +414,12 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
return 0;
- size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+ size = posix_acl_xattr_size(acl->a_count);
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- err = size;
+ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
if (err < 0)
goto out_free;
@@ -463,6 +462,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (IS_ERR(upper))
goto out_unlock;
+ err = -ESTALE;
+ if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
+ goto out_dput;
+
newdentry = ovl_create_temp(workdir, cattr);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
@@ -652,7 +655,6 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
struct dentry *new)
{
int err;
- bool locked = false;
struct inode *inode;
err = ovl_want_write(old);
@@ -663,13 +665,17 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
goto out_drop_write;
+ err = ovl_copy_up(new->d_parent);
+ if (err)
+ goto out_drop_write;
+
if (ovl_is_metacopy_dentry(old)) {
err = ovl_set_redirect(old, false);
if (err)
goto out_drop_write;
}
- err = ovl_nlink_start(old, &locked);
+ err = ovl_nlink_start(old);
if (err)
goto out_drop_write;
@@ -682,7 +688,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
iput(inode);
- ovl_nlink_end(old, locked);
+ ovl_nlink_end(old);
out_drop_write:
ovl_drop_write(old);
out:
@@ -807,7 +813,6 @@ static bool ovl_pure_upper(struct dentry *dentry)
static int ovl_do_remove(struct dentry *dentry, bool is_dir)
{
int err;
- bool locked = false;
const struct cred *old_cred;
struct dentry *upperdentry;
bool lower_positive = ovl_lower_positive(dentry);
@@ -828,7 +833,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
if (err)
goto out_drop_write;
- err = ovl_nlink_start(dentry, &locked);
+ err = ovl_nlink_start(dentry);
if (err)
goto out_drop_write;
@@ -844,7 +849,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
else
drop_nlink(dentry->d_inode);
}
- ovl_nlink_end(dentry, locked);
+ ovl_nlink_end(dentry);
/*
* Copy ctime
@@ -1008,7 +1013,6 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
unsigned int flags)
{
int err;
- bool locked = false;
struct dentry *old_upperdir;
struct dentry *new_upperdir;
struct dentry *olddentry;
@@ -1017,6 +1021,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
bool old_opaque;
bool new_opaque;
bool cleanup_whiteout = false;
+ bool update_nlink = false;
bool overwrite = !(flags & RENAME_EXCHANGE);
bool is_dir = d_is_dir(old);
bool new_is_dir = d_is_dir(new);
@@ -1074,10 +1079,12 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
err = ovl_copy_up(new);
if (err)
goto out_drop_write;
- } else {
- err = ovl_nlink_start(new, &locked);
+ } else if (d_inode(new)) {
+ err = ovl_nlink_start(new);
if (err)
goto out_drop_write;
+
+ update_nlink = true;
}
old_cred = ovl_override_creds(old->d_sb);
@@ -1206,7 +1213,8 @@ out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
revert_creds(old_cred);
- ovl_nlink_end(new, locked);
+ if (update_nlink)
+ ovl_nlink_end(new);
out_drop_write:
ovl_drop_write(old);
out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b7ed5d2279c..6bcc9dedc342 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -286,13 +286,22 @@ int ovl_permission(struct inode *inode, int mask)
if (err)
return err;
- old_cred = ovl_override_creds(inode->i_sb);
- if (!upperinode &&
- !special_file(realinode->i_mode) && mask & MAY_WRITE) {
+ /* No need to do any access on underlying for special files */
+ if (special_file(realinode->i_mode))
+ return 0;
+
+ /* No need to access underlying for execute */
+ mask &= ~MAY_EXEC;
+ if ((mask & (MAY_READ | MAY_WRITE)) == 0)
+ return 0;
+
+ /* Lower files get copied up, so turn write access into read */
+ if (!upperinode && mask & MAY_WRITE) {
mask &= ~(MAY_WRITE | MAY_APPEND);
- /* Make sure mounter can read file for copy up later */
mask |= MAY_READ;
}
+
+ old_cred = ovl_override_creds(inode->i_sb);
err = inode_permission(realinode, mask);
revert_creds(old_cred);
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9c0ca6a7becf..efd372312ef1 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -422,8 +422,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
fh = ovl_encode_real_fh(real, is_upper);
err = PTR_ERR(fh);
- if (IS_ERR(fh))
+ if (IS_ERR(fh)) {
+ fh = NULL;
goto fail;
+ }
err = ovl_verify_fh(dentry, name, fh);
if (set && err == -ENODATA)
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index a3c0d9584312..5e45cb3630a0 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -271,8 +271,8 @@ bool ovl_test_flag(unsigned long flag, struct inode *inode);
bool ovl_inuse_trylock(struct dentry *dentry);
void ovl_inuse_unlock(struct dentry *dentry);
bool ovl_need_index(struct dentry *dentry);
-int ovl_nlink_start(struct dentry *dentry, bool *locked);
-void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_nlink_start(struct dentry *dentry);
+void ovl_nlink_end(struct dentry *dentry);
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
int ovl_check_metacopy_xattr(struct dentry *dentry);
bool ovl_is_metacopy_dentry(struct dentry *dentry);
@@ -290,6 +290,16 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
return ofs->xino_bits;
}
+static inline int ovl_inode_lock(struct inode *inode)
+{
+ return mutex_lock_interruptible(&OVL_I(inode)->lock);
+}
+
+static inline void ovl_inode_unlock(struct inode *inode)
+{
+ mutex_unlock(&OVL_I(inode)->lock);
+}
+
/* namei.c */
int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 30adc9d408a0..0116735cc321 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -472,6 +472,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
{
char *p;
int err;
+ bool metacopy_opt = false, redirect_opt = false;
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
@@ -516,6 +517,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
config->redirect_mode = match_strdup(&args[0]);
if (!config->redirect_mode)
return -ENOMEM;
+ redirect_opt = true;
break;
case OPT_INDEX_ON:
@@ -548,6 +550,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_METACOPY_ON:
config->metacopy = true;
+ metacopy_opt = true;
break;
case OPT_METACOPY_OFF:
@@ -572,13 +575,32 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
if (err)
return err;
- /* metacopy feature with upper requires redirect_dir=on */
- if (config->upperdir && config->metacopy && !config->redirect_dir) {
- pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n");
- config->metacopy = false;
- } else if (config->metacopy && !config->redirect_follow) {
- pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n");
- config->metacopy = false;
+ /*
+ * This is to make the logic below simpler. It doesn't make any other
+ * difference, since config->redirect_dir is only used for upper.
+ */
+ if (!config->upperdir && config->redirect_follow)
+ config->redirect_dir = true;
+
+ /* Resolve metacopy -> redirect_dir dependency */
+ if (config->metacopy && !config->redirect_dir) {
+ if (metacopy_opt && redirect_opt) {
+ pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+ config->redirect_mode);
+ return -EINVAL;
+ }
+ if (redirect_opt) {
+ /*
+ * There was an explicit redirect_dir=... that resulted
+ * in this conflict.
+ */
+ pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+ config->redirect_mode);
+ config->metacopy = false;
+ } else {
+ /* Automatically enable redirect otherwise. */
+ config->redirect_follow = config->redirect_dir = true;
+ }
}
return 0;
@@ -1175,9 +1197,29 @@ out:
return err;
}
+static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
+{
+ unsigned int i;
+
+ if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt))
+ return true;
+
+ for (i = 0; i < ofs->numlowerfs; i++) {
+ /*
+ * We use uuid to associate an overlay lower file handle with a
+ * lower layer, so we can accept lower fs with null uuid as long
+ * as all lower layers with null uuid are on the same fs.
+ */
+ if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid))
+ return false;
+ }
+ return true;
+}
+
/* Get a unique fsid for the layer */
-static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
+static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
{
+ struct super_block *sb = path->mnt->mnt_sb;
unsigned int i;
dev_t dev;
int err;
@@ -1191,6 +1233,14 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
return i + 1;
}
+ if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
+ ofs->config.index = false;
+ ofs->config.nfs_export = false;
+ pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+ uuid_is_null(&sb->s_uuid) ? "null" : "conflicting",
+ path->dentry);
+ }
+
err = get_anon_bdev(&dev);
if (err) {
pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
@@ -1225,7 +1275,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
struct vfsmount *mnt;
int fsid;
- err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb);
+ err = fsid = ovl_get_fsid(ofs, &stack[i]);
if (err < 0)
goto out;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index ace4fe4c39a9..7c01327b1852 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -65,8 +65,7 @@ struct super_block *ovl_same_sb(struct super_block *sb)
*/
int ovl_can_decode_fh(struct super_block *sb)
{
- if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry ||
- uuid_is_null(&sb->s_uuid))
+ if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry)
return 0;
return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
@@ -522,13 +521,13 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags)
int ovl_copy_up_start(struct dentry *dentry, int flags)
{
- struct ovl_inode *oi = OVL_I(d_inode(dentry));
+ struct inode *inode = d_inode(dentry);
int err;
- err = mutex_lock_interruptible(&oi->lock);
+ err = ovl_inode_lock(inode);
if (!err && ovl_already_copied_up_locked(dentry, flags)) {
err = 1; /* Already copied up */
- mutex_unlock(&oi->lock);
+ ovl_inode_unlock(inode);
}
return err;
@@ -536,7 +535,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
void ovl_copy_up_end(struct dentry *dentry)
{
- mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+ ovl_inode_unlock(d_inode(dentry));
}
bool ovl_check_origin_xattr(struct dentry *dentry)
@@ -739,14 +738,14 @@ fail:
* Operations that change overlay inode and upper inode nlink need to be
* synchronized with copy up for persistent nlink accounting.
*/
-int ovl_nlink_start(struct dentry *dentry, bool *locked)
+int ovl_nlink_start(struct dentry *dentry)
{
- struct ovl_inode *oi = OVL_I(d_inode(dentry));
+ struct inode *inode = d_inode(dentry);
const struct cred *old_cred;
int err;
- if (!d_inode(dentry))
- return 0;
+ if (WARN_ON(!inode))
+ return -ENOENT;
/*
* With inodes index is enabled, we store the union overlay nlink
@@ -768,11 +767,11 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
return err;
}
- err = mutex_lock_interruptible(&oi->lock);
+ err = ovl_inode_lock(inode);
if (err)
return err;
- if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
+ if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode))
goto out;
old_cred = ovl_override_creds(dentry->d_sb);
@@ -787,27 +786,24 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
out:
if (err)
- mutex_unlock(&oi->lock);
- else
- *locked = true;
+ ovl_inode_unlock(inode);
return err;
}
-void ovl_nlink_end(struct dentry *dentry, bool locked)
+void ovl_nlink_end(struct dentry *dentry)
{
- if (locked) {
- if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) &&
- d_inode(dentry)->i_nlink == 0) {
- const struct cred *old_cred;
+ struct inode *inode = d_inode(dentry);
- old_cred = ovl_override_creds(dentry->d_sb);
- ovl_cleanup_index(dentry);
- revert_creds(old_cred);
- }
+ if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) {
+ const struct cred *old_cred;
- mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ ovl_cleanup_index(dentry);
+ revert_creds(old_cred);
}
+
+ ovl_inode_unlock(inode);
}
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7e9f07bf260d..ce3465479447 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2905,6 +2905,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_STACKLEAK_METRICS
+static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ unsigned long prev_depth = THREAD_SIZE -
+ (task->prev_lowest_stack & (THREAD_SIZE - 1));
+ unsigned long depth = THREAD_SIZE -
+ (task->lowest_stack & (THREAD_SIZE - 1));
+
+ seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n",
+ prev_depth, depth);
+ return 0;
+}
+#endif /* CONFIG_STACKLEAK_METRICS */
+
/*
* Thread groups
*/
@@ -3006,6 +3021,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_LIVEPATCH
ONE("patch_state", S_IRUSR, proc_pid_patch_state),
#endif
+#ifdef CONFIG_STACKLEAK_METRICS
+ ONE("stack_depth", S_IRUGO, proc_stack_depth),
+#endif
};
static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index fc5306a31a1d..5792f9e39466 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -516,6 +516,9 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
*/
s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+ /* procfs dentries and inodes don't require IO to create */
+ s->s_shrink.seeks = 0;
+
pde_get(&proc_root);
root_inode = proc_get_inode(s, &proc_root);
if (!root_inode) {
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d297fe4472a9..bbcc185062bb 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,7 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index d06694757201..8468baee951d 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -10,9 +10,6 @@
#include <linux/seqlock.h>
#include <linux/time.h>
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
static int loadavg_proc_show(struct seq_file *m, void *v)
{
unsigned long avnrun[3];
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index edda898714eb..568d90e17c17 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -38,6 +38,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
long cached;
long available;
unsigned long pages[NR_LRU_LISTS];
+ unsigned long sreclaimable, sunreclaim;
int lru;
si_meminfo(&i);
@@ -53,6 +54,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
available = si_mem_available();
+ sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE);
+ sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE);
show_val_kb(m, "MemTotal: ", i.totalram);
show_val_kb(m, "MemFree: ", i.freeram);
@@ -94,14 +97,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Mapped: ",
global_node_page_state(NR_FILE_MAPPED));
show_val_kb(m, "Shmem: ", i.sharedram);
- show_val_kb(m, "Slab: ",
- global_node_page_state(NR_SLAB_RECLAIMABLE) +
- global_node_page_state(NR_SLAB_UNRECLAIMABLE));
-
- show_val_kb(m, "SReclaimable: ",
- global_node_page_state(NR_SLAB_RECLAIMABLE));
- show_val_kb(m, "SUnreclaim: ",
- global_node_page_state(NR_SLAB_UNRECLAIMABLE));
+ show_val_kb(m, "KReclaimable: ", sreclaimable +
+ global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE));
+ show_val_kb(m, "Slab: ", sreclaimable + sunreclaim);
+ show_val_kb(m, "SReclaimable: ", sreclaimable);
+ show_val_kb(m, "SUnreclaim: ", sunreclaim);
seq_printf(m, "KernelStack: %8lu kB\n",
global_zone_page_state(NR_KERNEL_STACK_KB));
show_val_kb(m, "PageTables: ",
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 792c78a49174..6c517b11acf8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/init.h>
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5ea1d64cb0b4..47c3764c469b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -521,7 +521,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- if (radix_tree_exceptional_entry(page))
+ if (xa_is_value(page))
mss->swap += PAGE_SIZE;
else
put_page(page);
@@ -713,6 +713,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
smaps_walk.private = mss;
#ifdef CONFIG_SHMEM
+ /* In case of smaps_rollup, reset the value from previous vma */
+ mss->check_shmem_swap = false;
if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
/*
* For shared or readonly shmem mappings we know that all
@@ -728,7 +730,7 @@ static void smap_gather_stats(struct vm_area_struct *vma,
if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
!(vma->vm_flags & VM_WRITE)) {
- mss->swap = shmem_swapped;
+ mss->swap += shmem_swapped;
} else {
mss->check_shmem_swap = true;
smaps_walk.pte_hole = smaps_pte_hole;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index cbde728f8ac6..3fe90443c1bb 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/crash_dump.h>
#include <linux/list.h>
@@ -24,6 +24,8 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/mem_encrypt.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
#include "internal.h"
@@ -98,7 +100,8 @@ static int pfn_is_ram(unsigned long pfn)
/* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf)
+ u64 *ppos, int userbuf,
+ bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
@@ -120,8 +123,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
if (pfn_is_ram(pfn) == 0)
memset(buf, 0, nr_bytes);
else {
- tmp = copy_oldmem_page(pfn, buf, nr_bytes,
- offset, userbuf);
+ if (encrypted)
+ tmp = copy_oldmem_page_encrypted(pfn, buf,
+ nr_bytes,
+ offset,
+ userbuf);
+ else
+ tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+ offset, userbuf);
+
if (tmp < 0)
return tmp;
}
@@ -155,7 +165,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0);
+ return read_from_oldmem(buf, count, ppos, 0, false);
}
/*
@@ -163,7 +173,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0);
+ return read_from_oldmem(buf, count, ppos, 0, sme_active());
}
/*
@@ -173,10 +183,21 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
+ prot = pgprot_encrypted(prot);
return remap_pfn_range(vma, from, pfn, size, prot);
}
/*
+ * Architectures which support memory encryption override this.
+ */
+ssize_t __weak
+copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
+}
+
+/*
* Copy to either kernel or user space
*/
static int copy_to(void *target, void *src, size_t size, int userbuf)
@@ -351,7 +372,8 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
m->offset + m->size - *fpos,
buflen);
start = m->paddr + *fpos - m->offset;
- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
+ tmp = read_from_oldmem(buffer, tsz, &start,
+ userbuf, sme_active());
if (tmp < 0)
return tmp;
buflen -= tsz;
@@ -401,7 +423,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
if (rc < 0) {
unlock_page(page);
put_page(page);
- return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return vmf_error(rc);
}
SetPageUptodate(page);
}
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 503086f7f7c1..0d19d191ae70 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -141,7 +141,6 @@ config PSTORE_RAM
tristate "Log panic/oops to a RAM buffer"
depends on PSTORE
depends on HAS_IOMEM
- depends on HAVE_MEMBLOCK
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 5fcb845b9fec..8cf2218b46a7 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
.kill_sb = pstore_kill_sb,
};
-static int __init init_pstore_fs(void)
+int __init pstore_init_fs(void)
{
int err;
- pstore_choose_compression();
-
/* Create a convenient mount point for people to access pstore */
err = sysfs_create_mount_point(fs_kobj, "pstore");
if (err)
@@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
out:
return err;
}
-module_init(init_pstore_fs)
-static void __exit exit_pstore_fs(void)
+void __exit pstore_exit_fs(void)
{
unregister_filesystem(&pstore_fs_type);
sysfs_remove_mount_point(fs_kobj, "pstore");
}
-module_exit(exit_pstore_fs)
-
-MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
-MODULE_LICENSE("GPL");
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index fb767e28aeb2..7062ea4bc57c 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi);
-/* Called during module_init() */
-extern void __init pstore_choose_compression(void);
+/* Called during pstore init/exit. */
+int __init pstore_init_fs(void);
+void __exit pstore_exit_fs(void);
#endif
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 15e99d5a681d..b821054ca3ed 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
static void allocate_buf_for_compression(void)
{
+ struct crypto_comp *ctx;
+ int size;
+ char *buf;
+
+ /* Skip if not built-in or compression backend not selected yet. */
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
return;
+ /* Skip if no pstore backend yet or compression init already done. */
+ if (!psinfo || tfm)
+ return;
+
if (!crypto_has_comp(zbackend->name, 0, 0)) {
- pr_err("No %s compression\n", zbackend->name);
+ pr_err("Unknown compression: %s\n", zbackend->name);
return;
}
- big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize);
- if (big_oops_buf_sz <= 0)
+ size = zbackend->zbufsize(psinfo->bufsize);
+ if (size <= 0) {
+ pr_err("Invalid compression size for %s: %d\n",
+ zbackend->name, size);
return;
+ }
- big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (!big_oops_buf) {
- pr_err("allocate compression buffer error!\n");
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Failed %d byte compression buffer allocation for: %s\n",
+ size, zbackend->name);
return;
}
- tfm = crypto_alloc_comp(zbackend->name, 0, 0);
- if (IS_ERR_OR_NULL(tfm)) {
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- pr_err("crypto_alloc_comp() failed!\n");
+ ctx = crypto_alloc_comp(zbackend->name, 0, 0);
+ if (IS_ERR_OR_NULL(ctx)) {
+ kfree(buf);
+ pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
+ PTR_ERR(ctx));
return;
}
+
+ /* A non-NULL big_oops_buf indicates compression is available. */
+ tfm = ctx;
+ big_oops_buf_sz = size;
+ big_oops_buf = buf;
+
+ pr_info("Using compression: %s\n", zbackend->name);
}
static void free_buf_for_compression(void)
{
- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm))
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
crypto_free_comp(tfm);
kfree(big_oops_buf);
big_oops_buf = NULL;
@@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
for (step = zbackends; step->name; step++) {
if (!strcmp(compress, step->name)) {
zbackend = step;
- pr_info("using %s compression\n", zbackend->name);
return;
}
}
}
+static int __init pstore_init(void)
+{
+ int ret;
+
+ pstore_choose_compression();
+
+ /*
+ * Check if any pstore backends registered earlier but did not
+ * initialize compression because crypto was not ready. If so,
+ * initialize compression now.
+ */
+ allocate_buf_for_compression();
+
+ ret = pstore_init_fs();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+late_initcall(pstore_init);
+
+static void __exit pstore_exit(void)
+{
+ pstore_exit_fs();
+}
+module_exit(pstore_exit)
+
module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "Pstore compression to use");
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use");
+
+MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index f4fd2e72add4..ffcff6516e89 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
goto fail;
for (i = 0; i < *cnt; i++) {
+ char *label;
+
+ if (*cnt == 1)
+ label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
+ else
+ label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
+ name, i, *cnt - 1);
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
- &cxt->ecc_info,
- cxt->memtype, flags);
+ &cxt->ecc_info,
+ cxt->memtype, flags, label);
if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
struct persistent_ram_zone **prz,
phys_addr_t *paddr, size_t sz, u32 sig)
{
+ char *label;
+
if (!sz)
return 0;
@@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
return -ENOMEM;
}
+ label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
- cxt->memtype, 0);
+ cxt->memtype, 0, label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
@@ -962,7 +972,7 @@ static int __init ramoops_init(void)
return ret;
}
-late_initcall(ramoops_init);
+postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 0792595ebcfb..12e21f789194 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
}
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
- unsigned int memtype)
+ unsigned int memtype, char *label)
{
void *va;
- if (!request_mem_region(start, size, "persistent_ram")) {
+ if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n",
(unsigned long long)size, (unsigned long long)start);
return NULL;
@@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
if (pfn_valid(start >> PAGE_SHIFT))
prz->vaddr = persistent_ram_vmap(start, size, memtype);
else
- prz->vaddr = persistent_ram_iomap(start, size, memtype);
+ prz->vaddr = persistent_ram_iomap(start, size, memtype,
+ prz->label);
if (!prz->vaddr) {
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
prz->ecc_info.par = NULL;
persistent_ram_free_old(prz);
+ kfree(prz->label);
kfree(prz);
}
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags)
+ unsigned int memtype, u32 flags, char *label)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
@@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
/* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
+ prz->label = label;
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
diff --git a/fs/read_write.c b/fs/read_write.c
index 8a2737f0d61d..603794b207eb 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -331,7 +331,7 @@ COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned i
}
#endif
-#ifdef __ARCH_WANT_SYS_LLSEEK
+#if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
unsigned int, whence)
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index a39a562c1c10..bd29c58ccbd8 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -26,14 +26,5 @@ ifeq ($(CONFIG_REISERFS_FS_POSIX_ACL),y)
reiserfs-objs += xattr_acl.o
endif
-# gcc -O2 (the kernel default) is overaggressive on ppc32 when many inline
-# functions are used. This causes the compiler to advance the stack
-# pointer out of the available stack space, corrupting kernel space,
-# and causing a panic. Since this behavior only affects ppc32, this ifeq
-# will work around it. If any other architecture displays this behavior,
-# add it here.
-ccflags-$(CONFIG_PPC32) := $(call cc-ifversion, -lt, 0400, -O1)
-
TAGS:
etags *.c
-
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 48cdfc81fe10..32d8986c26fb 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -185,6 +185,7 @@ struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
int count;
+ int err;
struct dentry *dentries[8];
};
@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen);
if (IS_ERR(dentry)) {
+ dbuf->err = PTR_ERR(dentry);
return PTR_ERR(dentry);
} else if (d_really_is_negative(dentry)) {
/* A directory entry exists, but no file? */
@@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
"not found for file %pd.\n",
dentry, dbuf->xadir);
dput(dentry);
+ dbuf->err = -EIO;
return -EIO;
}
@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
if (err)
break;
+ if (buf.err) {
+ err = buf.err;
+ break;
+ }
if (!buf.count)
break;
for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
diff --git a/fs/select.c b/fs/select.c
index 4a6b6e4b21cb..22b3bf89f051 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1120,7 +1120,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
ts.tv_sec = ts.tv_nsec = 0;
if (timeval) {
- struct compat_timeval rtv;
+ struct old_timeval32 rtv;
rtv.tv_sec = ts.tv_sec;
rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -1128,7 +1128,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
if (!copy_to_user(p, &rtv, sizeof(rtv)))
return ret;
} else {
- if (!compat_put_timespec64(&ts, p))
+ if (!put_old_timespec32(&ts, p))
return ret;
}
/*
@@ -1257,10 +1257,10 @@ out_nofds:
static int do_compat_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp)
+ struct old_timeval32 __user *tvp)
{
struct timespec64 end_time, *to = NULL;
- struct compat_timeval tv;
+ struct old_timeval32 tv;
int ret;
if (tvp) {
@@ -1282,7 +1282,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timeval __user *, tvp)
+ struct old_timeval32 __user *, tvp)
{
return do_compat_select(n, inp, outp, exp, tvp);
}
@@ -1307,7 +1307,7 @@ COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
static long do_compat_pselect(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
+ struct old_timespec32 __user *tsp, compat_sigset_t __user *sigmask,
compat_size_t sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1315,7 +1315,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
@@ -1355,7 +1355,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timespec __user *, tsp, void __user *, sig)
+ struct old_timespec32 __user *, tsp, void __user *, sig)
{
compat_size_t sigsetsize = 0;
compat_uptr_t up = 0;
@@ -1373,7 +1373,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
}
COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
- unsigned int, nfds, struct compat_timespec __user *, tsp,
+ unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1381,7 +1381,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 4fcd1498acf5..757afc7c5895 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -79,7 +79,7 @@ static __poll_t signalfd_poll(struct file *file, poll_table *wait)
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
- siginfo_t const *kinfo)
+ kernel_siginfo_t const *kinfo)
{
struct signalfd_siginfo new;
@@ -163,7 +163,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
return sizeof(*uinfo);
}
-static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
+static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
int nonblock)
{
ssize_t ret;
@@ -215,7 +215,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
struct signalfd_siginfo __user *siginfo;
int nonblock = file->f_flags & O_NONBLOCK;
ssize_t ret, total = 0;
- siginfo_t info;
+ kernel_siginfo_t info;
count /= sizeof(struct signalfd_siginfo);
if (!count)
diff --git a/fs/stat.c b/fs/stat.c
index f8e6fb2c3657..adbfcd86c81b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -280,6 +280,8 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
#endif /* __ARCH_WANT_OLD_STAT */
+#ifdef __ARCH_WANT_NEW_STAT
+
#if BITS_PER_LONG == 32
# define choose_32_64(a,b) a
#else
@@ -378,6 +380,7 @@ SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
return error;
}
+#endif
static int do_readlinkat(int dfd, const char __user *pathname,
char __user *buf, int bufsiz)
diff --git a/fs/super.c b/fs/super.c
index f3a8c008e164..ca53a08497ed 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -442,7 +442,7 @@ void generic_shutdown_super(struct super_block *sb)
sync_filesystem(sb);
sb->s_flags &= ~SB_ACTIVE;
- fsnotify_unmount_inodes(sb);
+ fsnotify_sb_delete(sb);
cgroup_writeback_umount();
evict_inodes(sb);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d69ad801eb80..803ca070d42e 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -561,29 +561,29 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct compat_itimerspec __user *, utmr,
- struct compat_itimerspec __user *, otmr)
+ const struct old_itimerspec32 __user *, utmr,
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
- if (get_compat_itimerspec64(&new, utmr))
+ if (get_old_itimerspec32(&new, utmr))
return -EFAULT;
ret = do_timerfd_settime(ufd, flags, &new, &old);
if (ret)
return ret;
- if (otmr && put_compat_itimerspec64(&old, otmr))
+ if (otmr && put_old_itimerspec32(&old, otmr))
return -EFAULT;
return ret;
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
- struct compat_itimerspec __user *, otmr)
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
- return put_compat_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
+ return put_old_itimerspec32(&kotmr, otmr) ? -EFAULT : 0;
}
#endif
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index fcda0fc97b90..ec85aeaed54a 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -175,8 +175,8 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
{
struct udf_sb_info *sbi = UDF_SB(sb);
int alloc_count = 0;
- int bit, block, block_group, group_start;
- int nr_groups, bitmap_nr;
+ int bit, block, block_group;
+ int bitmap_nr;
struct buffer_head *bh;
__u32 part_len;
@@ -189,10 +189,8 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
block_count = part_len - first_block;
do {
- nr_groups = udf_compute_nr_groups(sb, partition);
block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
block_group = block >> (sb->s_blocksize_bits + 3);
- group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
if (bitmap_nr < 0)
@@ -652,12 +650,6 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode,
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
udf_table_free_blocks(sb, map->s_uspace.s_table,
bloc, offset, count);
- } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
- udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
- bloc, offset, count);
- } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
- udf_table_free_blocks(sb, map->s_fspace.s_table,
- bloc, offset, count);
}
if (inode) {
@@ -684,16 +676,6 @@ inline int udf_prealloc_blocks(struct super_block *sb,
map->s_uspace.s_table,
partition, first_block,
block_count);
- else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- allocated = udf_bitmap_prealloc_blocks(sb,
- map->s_fspace.s_bitmap,
- partition, first_block,
- block_count);
- else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- allocated = udf_table_prealloc_blocks(sb,
- map->s_fspace.s_table,
- partition, first_block,
- block_count);
else
return 0;
@@ -717,14 +699,6 @@ inline udf_pblk_t udf_new_block(struct super_block *sb,
block = udf_table_new_block(sb,
map->s_uspace.s_table,
partition, goal, err);
- else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- block = udf_bitmap_new_block(sb,
- map->s_fspace.s_bitmap,
- partition, goal, err);
- else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- block = udf_table_new_block(sb,
- map->s_fspace.s_table,
- partition, goal, err);
else {
*err = -EIO;
return 0;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6f515651a2c2..8f2f56d9a1bb 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -290,12 +290,8 @@ static void udf_free_partition(struct udf_part_map *map)
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
iput(map->s_uspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- iput(map->s_fspace.s_table);
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
udf_sb_free_bitmap(map->s_uspace.s_bitmap);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- udf_sb_free_bitmap(map->s_fspace.s_bitmap);
if (map->s_partition_type == UDF_SPARABLE_MAP15)
for (i = 0; i < 4; i++)
brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
@@ -613,14 +609,11 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
struct udf_options uopt;
struct udf_sb_info *sbi = UDF_SB(sb);
int error = 0;
- struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
+
+ if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
+ return -EACCES;
sync_filesystem(sb);
- if (lvidiu) {
- int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
- if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
- return -EACCES;
- }
uopt.flags = sbi->s_flags;
uopt.uid = sbi->s_uid;
@@ -988,12 +981,62 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
return bitmap;
}
+static int check_partition_desc(struct super_block *sb,
+ struct partitionDesc *p,
+ struct udf_part_map *map)
+{
+ bool umap, utable, fmap, ftable;
+ struct partitionHeaderDesc *phd;
+
+ switch (le32_to_cpu(p->accessType)) {
+ case PD_ACCESS_TYPE_READ_ONLY:
+ case PD_ACCESS_TYPE_WRITE_ONCE:
+ case PD_ACCESS_TYPE_REWRITABLE:
+ case PD_ACCESS_TYPE_NONE:
+ goto force_ro;
+ }
+
+ /* No Partition Header Descriptor? */
+ if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
+ strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
+ goto force_ro;
+
+ phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
+ utable = phd->unallocSpaceTable.extLength;
+ umap = phd->unallocSpaceBitmap.extLength;
+ ftable = phd->freedSpaceTable.extLength;
+ fmap = phd->freedSpaceBitmap.extLength;
+
+ /* No allocation info? */
+ if (!utable && !umap && !ftable && !fmap)
+ goto force_ro;
+
+ /* We don't support blocks that require erasing before overwrite */
+ if (ftable || fmap)
+ goto force_ro;
+ /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
+ if (utable && umap)
+ goto force_ro;
+
+ if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
+ map->s_partition_type == UDF_VIRTUAL_MAP20)
+ goto force_ro;
+
+ return 0;
+force_ro:
+ if (!sb_rdonly(sb))
+ return -EACCES;
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
+ return 0;
+}
+
static int udf_fill_partdesc_info(struct super_block *sb,
struct partitionDesc *p, int p_index)
{
struct udf_part_map *map;
struct udf_sb_info *sbi = UDF_SB(sb);
struct partitionHeaderDesc *phd;
+ int err;
map = &sbi->s_partmaps[p_index];
@@ -1013,8 +1056,16 @@ static int udf_fill_partdesc_info(struct super_block *sb,
p_index, map->s_partition_type,
map->s_partition_root, map->s_partition_len);
- if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
- strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
+ err = check_partition_desc(sb, p, map);
+ if (err)
+ return err;
+
+ /*
+ * Skip loading allocation info it we cannot ever write to the fs.
+ * This is a correctness thing as we may have decided to force ro mount
+ * to avoid allocation info we don't support.
+ */
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
return 0;
phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
@@ -1050,40 +1101,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
p_index, bitmap->s_extPosition);
}
- if (phd->partitionIntegrityTable.extLength)
- udf_debug("partitionIntegrityTable (part %d)\n", p_index);
-
- if (phd->freedSpaceTable.extLength) {
- struct kernel_lb_addr loc = {
- .logicalBlockNum = le32_to_cpu(
- phd->freedSpaceTable.extPosition),
- .partitionReferenceNum = p_index,
- };
- struct inode *inode;
-
- inode = udf_iget_special(sb, &loc);
- if (IS_ERR(inode)) {
- udf_debug("cannot load freedSpaceTable (part %d)\n",
- p_index);
- return PTR_ERR(inode);
- }
- map->s_fspace.s_table = inode;
- map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
- udf_debug("freedSpaceTable (part %d) @ %lu\n",
- p_index, map->s_fspace.s_table->i_ino);
- }
-
- if (phd->freedSpaceBitmap.extLength) {
- struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
- if (!bitmap)
- return -ENOMEM;
- map->s_fspace.s_bitmap = bitmap;
- bitmap->s_extPosition = le32_to_cpu(
- phd->freedSpaceBitmap.extPosition);
- map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
- udf_debug("freedSpaceBitmap (part %d) @ %u\n",
- p_index, bitmap->s_extPosition);
- }
return 0;
}
@@ -1257,6 +1274,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
ret = -EACCES;
goto out_bh;
}
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
ret = udf_load_vat(sb, i, type1_idx);
if (ret < 0)
goto out_bh;
@@ -2155,10 +2173,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
UDF_MAX_READ_VERSION);
ret = -EINVAL;
goto error_out;
- } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
- !sb_rdonly(sb)) {
- ret = -EACCES;
- goto error_out;
+ } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
+ if (!sb_rdonly(sb)) {
+ ret = -EACCES;
+ goto error_out;
+ }
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
}
sbi->s_udfrev = minUDFWriteRev;
@@ -2176,10 +2196,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
}
if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
- UDF_PART_FLAG_READ_ONLY &&
- !sb_rdonly(sb)) {
- ret = -EACCES;
- goto error_out;
+ UDF_PART_FLAG_READ_ONLY) {
+ if (!sb_rdonly(sb)) {
+ ret = -EACCES;
+ goto error_out;
+ }
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
}
if (udf_find_fileset(sb, &fileset, &rootdir)) {
@@ -2433,10 +2455,6 @@ static unsigned int udf_count_free(struct super_block *sb)
accum += udf_count_free_bitmap(sb,
map->s_uspace.s_bitmap);
}
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
- accum += udf_count_free_bitmap(sb,
- map->s_fspace.s_bitmap);
- }
if (accum)
return accum;
@@ -2444,11 +2462,6 @@ static unsigned int udf_count_free(struct super_block *sb)
accum += udf_count_free_table(sb,
map->s_uspace.s_table);
}
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
- accum += udf_count_free_table(sb,
- map->s_fspace.s_table);
- }
-
return accum;
}
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 9424d7cab790..3d83be54c474 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -30,11 +30,11 @@
#define UDF_FLAG_LASTBLOCK_SET 16
#define UDF_FLAG_BLOCKSIZE_SET 17
#define UDF_FLAG_INCONSISTENT 18
+#define UDF_FLAG_RW_INCOMPAT 19 /* Set when we find RW incompatible
+ * feature */
#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
-#define UDF_PART_FLAG_FREED_BITMAP 0x0004
-#define UDF_PART_FLAG_FREED_TABLE 0x0008
#define UDF_PART_FLAG_READ_ONLY 0x0010
#define UDF_PART_FLAG_WRITE_ONCE 0x0020
#define UDF_PART_FLAG_REWRITABLE 0x0040
@@ -50,8 +50,6 @@
#define UDF_INVALID_MODE ((umode_t)-1)
-#pragma pack(1) /* XXX(hch): Why? This file just defines in-core structures */
-
#define MF_DUPLICATE_MD 0x01
#define MF_MIRROR_FE_LOADED 0x02
@@ -93,10 +91,6 @@ struct udf_part_map {
struct udf_bitmap *s_bitmap;
struct inode *s_table;
} s_uspace;
- union {
- struct udf_bitmap *s_bitmap;
- struct inode *s_table;
- } s_fspace;
__u32 s_partition_root;
__u32 s_partition_len;
__u16 s_partition_type;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index bfa0ec69f924..356d2b8568c1 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1026,7 +1026,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
struct userfaultfd_ctx *fork_nctx = NULL;
/* always take the fd_wqh lock before the fault_pending_wqh lock */
- spin_lock(&ctx->fd_wqh.lock);
+ spin_lock_irq(&ctx->fd_wqh.lock);
__add_wait_queue(&ctx->fd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1112,13 +1112,13 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
ret = -EAGAIN;
break;
}
- spin_unlock(&ctx->fd_wqh.lock);
+ spin_unlock_irq(&ctx->fd_wqh.lock);
schedule();
- spin_lock(&ctx->fd_wqh.lock);
+ spin_lock_irq(&ctx->fd_wqh.lock);
}
__remove_wait_queue(&ctx->fd_wqh, &wait);
__set_current_state(TASK_RUNNING);
- spin_unlock(&ctx->fd_wqh.lock);
+ spin_unlock_irq(&ctx->fd_wqh.lock);
if (!ret && msg->event == UFFD_EVENT_FORK) {
ret = resolve_userfault_fork(ctx, fork_nctx, msg);
diff --git a/fs/utimes.c b/fs/utimes.c
index 69d4b6ba1bfb..bdcf2daf39c1 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -8,35 +8,6 @@
#include <linux/compat.h>
#include <asm/unistd.h>
-#ifdef __ARCH_WANT_SYS_UTIME
-
-/*
- * sys_utime() can be implemented in user-level using sys_utimes().
- * Is this for backwards compatibility? If so, why not move it
- * into the appropriate arch directory (for those architectures that
- * need it).
- */
-
-/* If times==NULL, set access and modification to current time,
- * must be owner or have write permission.
- * Else, update from *times, must be owner or super user.
- */
-SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
-{
- struct timespec64 tv[2];
-
- if (times) {
- if (get_user(tv[0].tv_sec, &times->actime) ||
- get_user(tv[1].tv_sec, &times->modtime))
- return -EFAULT;
- tv[0].tv_nsec = 0;
- tv[1].tv_nsec = 0;
- }
- return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
-}
-
-#endif
-
static bool nsec_valid(long nsec)
{
if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
@@ -166,7 +137,7 @@ out:
}
SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
- struct timespec __user *, utimes, int, flags)
+ struct __kernel_timespec __user *, utimes, int, flags)
{
struct timespec64 tstimes[2];
@@ -184,6 +155,13 @@ SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME
+/*
+ * futimesat(), utimes() and utime() are older versions of utimensat()
+ * that are provided for compatibility with traditional C libraries.
+ * On modern architectures, we always use libc wrappers around
+ * utimensat() instead.
+ */
static long do_futimesat(int dfd, const char __user *filename,
struct timeval __user *utimes)
{
@@ -225,13 +203,29 @@ SYSCALL_DEFINE2(utimes, char __user *, filename,
return do_futimesat(AT_FDCWD, filename, utimes);
}
-#ifdef CONFIG_COMPAT
+SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
+{
+ struct timespec64 tv[2];
+
+ if (times) {
+ if (get_user(tv[0].tv_sec, &times->actime) ||
+ get_user(tv[1].tv_sec, &times->modtime))
+ return -EFAULT;
+ tv[0].tv_nsec = 0;
+ tv[1].tv_nsec = 0;
+ }
+ return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
+}
+#endif
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
/*
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
*/
+#ifdef __ARCH_WANT_SYS_UTIME32
COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
- struct compat_utimbuf __user *, t)
+ struct old_utimbuf32 __user *, t)
{
struct timespec64 tv[2];
@@ -244,14 +238,15 @@ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
}
return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}
+#endif
-COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags)
+COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
{
struct timespec64 tv[2];
if (t) {
- if (compat_get_timespec64(&tv[0], &t[0]) ||
- compat_get_timespec64(&tv[1], &t[1]))
+ if (get_old_timespec32(&tv[0], &t[0]) ||
+ get_old_timespec32(&tv[1], &t[1]))
return -EFAULT;
if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT)
@@ -260,8 +255,9 @@ COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filena
return do_utimes(dfd, filename, t ? tv : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME32
static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
- struct compat_timeval __user *t)
+ struct old_timeval32 __user *t)
{
struct timespec64 tv[2];
@@ -282,13 +278,14 @@ static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd,
const char __user *, filename,
- struct compat_timeval __user *, t)
+ struct old_timeval32 __user *, t)
{
return do_compat_futimesat(dfd, filename, t);
}
-COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t)
+COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct old_timeval32 __user *, t)
{
return do_compat_futimesat(AT_FDCWD, filename, t);
}
#endif
+#endif
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index c6299f82a6e4..844ed87b1900 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -191,6 +191,128 @@ xfs_attr_calc_size(
return nblks;
}
+STATIC int
+xfs_attr_try_sf_addname(
+ struct xfs_inode *dp,
+ struct xfs_da_args *args)
+{
+
+ struct xfs_mount *mp = dp->i_mount;
+ int error, error2;
+
+ error = xfs_attr_shortform_addname(args);
+ if (error == -ENOSPC)
+ return error;
+
+ /*
+ * Commit the shortform mods, and we're done.
+ * NOTE: this is also the error path (EEXIST, etc).
+ */
+ if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(args->trans);
+
+ error2 = xfs_trans_commit(args->trans);
+ args->trans = NULL;
+ return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+ struct xfs_da_args *args,
+ struct xfs_buf **leaf_bp)
+{
+ struct xfs_inode *dp = args->dp;
+ int error;
+
+ /*
+ * If the attribute list is non-existent or a shortform list,
+ * upgrade it to a single-leaf-block attribute list.
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+ dp->i_d.di_anextents == 0)) {
+
+ /*
+ * Build initial attribute list (if required).
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+ xfs_attr_shortform_create(args);
+
+ /*
+ * Try to add the attr to the attribute list in the inode.
+ */
+ error = xfs_attr_try_sf_addname(dp, args);
+ if (error != -ENOSPC)
+ return error;
+
+ /*
+ * It won't fit in the shortform, transform to a leaf block.
+ * GROT: another possible req'mt for a double-split btree op.
+ */
+ error = xfs_attr_shortform_to_leaf(args, leaf_bp);
+ if (error)
+ return error;
+
+ /*
+ * Prevent the leaf buffer from being unlocked so that a
+ * concurrent AIL push cannot grab the half-baked leaf
+ * buffer and run into problems with the write verifier.
+ */
+ xfs_trans_bhold(args->trans, *leaf_bp);
+
+ error = xfs_defer_finish(&args->trans);
+ if (error)
+ return error;
+
+ /*
+ * Commit the leaf transformation. We'll need another
+ * (linked) transaction to add the new attribute to the
+ * leaf.
+ */
+ error = xfs_trans_roll_inode(&args->trans, dp);
+ if (error)
+ return error;
+ xfs_trans_bjoin(args->trans, *leaf_bp);
+ *leaf_bp = NULL;
+ }
+
+ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+ error = xfs_attr_leaf_addname(args);
+ else
+ error = xfs_attr_node_addname(args);
+ return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_inode *dp = args->dp;
+ int error;
+
+ if (!xfs_inode_hasattr(dp)) {
+ error = -ENOATTR;
+ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+ error = xfs_attr_shortform_remove(args);
+ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+ error = xfs_attr_leaf_removename(args);
+ } else {
+ error = xfs_attr_node_removename(args);
+ }
+
+ return error;
+}
+
int
xfs_attr_set(
struct xfs_inode *dp,
@@ -204,7 +326,7 @@ xfs_attr_set(
struct xfs_da_args args;
struct xfs_trans_res tres;
int rsvd = (flags & ATTR_ROOT) != 0;
- int error, err2, local;
+ int error, local;
XFS_STATS_INC(mp, xs_attr_set);
@@ -255,93 +377,17 @@ xfs_attr_set(
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
- if (error) {
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_trans_cancel(args.trans);
- return error;
- }
+ if (error)
+ goto out_trans_cancel;
xfs_trans_ijoin(args.trans, dp, 0);
-
- /*
- * If the attribute list is non-existent or a shortform list,
- * upgrade it to a single-leaf-block attribute list.
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
-
- /*
- * Build initial attribute list (if required).
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
- xfs_attr_shortform_create(&args);
-
- /*
- * Try to add the attr to the attribute list in
- * the inode.
- */
- error = xfs_attr_shortform_addname(&args);
- if (error != -ENOSPC) {
- /*
- * Commit the shortform mods, and we're done.
- * NOTE: this is also the error path (EEXIST, etc).
- */
- ASSERT(args.trans != NULL);
-
- /*
- * If this is a synchronous mount, make sure that
- * the transaction goes to disk before returning
- * to the user.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(args.trans);
-
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_trans_ichgtime(args.trans, dp,
- XFS_ICHGTIME_CHG);
- }
- err2 = xfs_trans_commit(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- return error ? error : err2;
- }
-
- /*
- * It won't fit in the shortform, transform to a leaf block.
- * GROT: another possible req'mt for a double-split btree op.
- */
- error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
- if (error)
- goto out;
- /*
- * Prevent the leaf buffer from being unlocked so that a
- * concurrent AIL push cannot grab the half-baked leaf
- * buffer and run into problems with the write verifier.
- */
- xfs_trans_bhold(args.trans, leaf_bp);
- error = xfs_defer_finish(&args.trans);
- if (error)
- goto out;
-
- /*
- * Commit the leaf transformation. We'll need another (linked)
- * transaction to add the new attribute to the leaf, which
- * means that we have to hold & join the leaf buffer here too.
- */
- error = xfs_trans_roll_inode(&args.trans, dp);
- if (error)
- goto out;
- xfs_trans_bjoin(args.trans, leaf_bp);
- leaf_bp = NULL;
- }
-
- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
- error = xfs_attr_leaf_addname(&args);
- else
- error = xfs_attr_node_addname(&args);
+ error = xfs_attr_set_args(&args, &leaf_bp);
if (error)
- goto out;
+ goto out_release_leaf;
+ if (!args.trans) {
+ /* shortform attribute has already been committed */
+ goto out_unlock;
+ }
/*
* If this is a synchronous mount, make sure that the
@@ -358,17 +404,17 @@ xfs_attr_set(
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans);
+out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
return error;
-out:
+out_release_leaf:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
if (args.trans)
xfs_trans_cancel(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return error;
+ goto out_unlock;
}
/*
@@ -423,17 +469,7 @@ xfs_attr_remove(
*/
xfs_trans_ijoin(args.trans, dp, 0);
- if (!xfs_inode_hasattr(dp)) {
- error = -ENOATTR;
- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
- ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
- error = xfs_attr_shortform_remove(&args);
- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
- error = xfs_attr_leaf_removename(&args);
- } else {
- error = xfs_attr_node_removename(&args);
- }
-
+ error = xfs_attr_remove_args(&args);
if (error)
goto out;
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 033ff8c478e2..bdf52a333f3f 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
unsigned char *value, int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args, struct xfs_buf **leaf_bp);
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a47670332326..74d7228e755b 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
return -EFSCORRUPTED;
}
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+ struct xfs_inode *ip,
+ int size,
+ int *version)
+{
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_DEV:
+ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+ if (!ip->i_d.di_forkoff)
+ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+ else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+ *version = 2;
+ break;
+ default:
+ ASSERT(0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- switch (ip->i_d.di_format) {
- case XFS_DINODE_FMT_DEV:
- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
- if (!ip->i_d.di_forkoff)
- ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
- else if (mp->m_flags & XFS_MOUNT_ATTR2)
- version = 2;
- break;
- default:
- ASSERT(0);
- error = -EINVAL;
+ error = xfs_bmap_set_attrforkoff(ip, size, &version);
+ if (error)
goto trans_cancel;
- }
-
ASSERT(ip->i_afp == NULL);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -4081,8 +4092,7 @@ xfs_bmapi_allocate(
* extents to real extents when we're about to write the data.
*/
if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
- (bma->flags & XFS_BMAPI_PREALLOC) &&
- xfs_sb_version_hasextflgbit(&mp->m_sb))
+ (bma->flags & XFS_BMAPI_PREALLOC))
bma->got.br_state = XFS_EXT_UNWRITTEN;
if (bma->wasdel)
@@ -5245,8 +5255,7 @@ __xfs_bunmapi(
* unmapping part of it. But we can't really
* get rid of part of a realtime extent.
*/
- if (del.br_state == XFS_EXT_UNWRITTEN ||
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ if (del.br_state == XFS_EXT_UNWRITTEN) {
/*
* This piece is unwritten, or we're not
* using unwritten extents. Skip over it.
@@ -5296,10 +5305,9 @@ __xfs_bunmapi(
del.br_blockcount -= mod;
del.br_startoff += mod;
del.br_startblock += mod;
- } else if ((del.br_startoff == start &&
- (del.br_state == XFS_EXT_UNWRITTEN ||
- tp->t_blk_res == 0)) ||
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ } else if (del.br_startoff == start &&
+ (del.br_state == XFS_EXT_UNWRITTEN ||
+ tp->t_blk_res == 0)) {
/*
* Can't make it unwritten. There isn't
* a full extent here so just skip it.
@@ -6114,11 +6122,7 @@ xfs_bmap_validate_extent(
XFS_FSB_TO_AGNO(mp, endfsb))
return __this_address;
}
- if (irec->br_state != XFS_EXT_NORM) {
- if (whichfork != XFS_DATA_FORK)
- return __this_address;
- if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
- return __this_address;
- }
+ if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
+ return __this_address;
return NULL;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b6e9b639e731..488dc8860fd7 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, struct xfs_owner_info *oinfo,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index afbe336600e1..9995d5ae380b 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -287,6 +287,8 @@ static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
{
if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
return false;
+ if (!(sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT))
+ return false;
/* check for unknown features in the fs */
if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
@@ -357,12 +359,6 @@ static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
(sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
}
-static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
-}
-
static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
{
return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 081f46e30556..b5a82acd7dfe 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1115,7 +1115,8 @@ xfs_fs_geometry(
geo->version = XFS_FSOP_GEOM_VERSION;
geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
- XFS_FSOP_GEOM_FLAGS_DIRV2;
+ XFS_FSOP_GEOM_FLAGS_DIRV2 |
+ XFS_FSOP_GEOM_FLAGS_EXTFLG;
if (xfs_sb_version_hasattr(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR;
if (xfs_sb_version_hasquota(sbp))
@@ -1124,8 +1125,6 @@ xfs_fs_geometry(
geo->flags |= XFS_FSOP_GEOM_FLAGS_IALIGN;
if (xfs_sb_version_hasdalign(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_DALIGN;
- if (xfs_sb_version_hasextflgbit(sbp))
- geo->flags |= XFS_FSOP_GEOM_FLAGS_EXTFLG;
if (xfs_sb_version_hassector(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_SECTOR;
if (xfs_sb_version_hasasciici(sbp))
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 9f08dd9bf1d5..4fc0a5ea7673 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -29,6 +29,8 @@
#include "xfs_ag_resv.h"
#include "xfs_trans_space.h"
#include "xfs_quota.h"
+#include "xfs_attr.h"
+#include "xfs_reflink.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -692,13 +694,14 @@ xrep_findroot_block(
struct xrep_find_ag_btree *fab,
uint64_t owner,
xfs_agblock_t agbno,
- bool *found_it)
+ bool *done_with_block)
{
struct xfs_mount *mp = ri->sc->mp;
struct xfs_buf *bp;
struct xfs_btree_block *btblock;
xfs_daddr_t daddr;
- int error;
+ int block_level;
+ int error = 0;
daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.agno, agbno);
@@ -717,36 +720,111 @@ xrep_findroot_block(
return error;
}
+ /*
+ * Read the buffer into memory so that we can see if it's a match for
+ * our btree type. We have no clue if it is beforehand, and we want to
+ * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
+ * will cause needless disk reads in subsequent calls to this function)
+ * and logging metadata verifier failures.
+ *
+ * Therefore, pass in NULL buffer ops. If the buffer was already in
+ * memory from some other caller it will already have b_ops assigned.
+ * If it was in memory from a previous unsuccessful findroot_block
+ * call, the buffer won't have b_ops but it should be clean and ready
+ * for us to try to verify if the read call succeeds. The same applies
+ * if the buffer wasn't in memory at all.
+ *
+ * Note: If we never match a btree type with this buffer, it will be
+ * left in memory with NULL b_ops. This shouldn't be a problem unless
+ * the buffer gets written.
+ */
error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
mp->m_bsize, 0, &bp, NULL);
if (error)
return error;
- /*
- * Does this look like a block matching our fs and higher than any
- * other block we've found so far? If so, reattach buffer verifiers
- * so the AIL won't complain if the buffer is also dirty.
- */
+ /* Ensure the block magic matches the btree type we're looking for. */
btblock = XFS_BUF_TO_BLOCK(bp);
if (be32_to_cpu(btblock->bb_magic) != fab->magic)
goto out;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
- !uuid_equal(&btblock->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
- goto out;
- bp->b_ops = fab->buf_ops;
- /* Ignore this block if it's lower in the tree than we've seen. */
- if (fab->root != NULLAGBLOCK &&
- xfs_btree_get_level(btblock) < fab->height)
- goto out;
+ /*
+ * If the buffer already has ops applied and they're not the ones for
+ * this btree type, we know this block doesn't match the btree and we
+ * can bail out.
+ *
+ * If the buffer ops match ours, someone else has already validated
+ * the block for us, so we can move on to checking if this is a root
+ * block candidate.
+ *
+ * If the buffer does not have ops, nobody has successfully validated
+ * the contents and the buffer cannot be dirty. If the magic, uuid,
+ * and structure match this btree type then we'll move on to checking
+ * if it's a root block candidate. If there is no match, bail out.
+ */
+ if (bp->b_ops) {
+ if (bp->b_ops != fab->buf_ops)
+ goto out;
+ } else {
+ ASSERT(!xfs_trans_buf_is_dirty(bp));
+ if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
+ &mp->m_sb.sb_meta_uuid))
+ goto out;
+ fab->buf_ops->verify_read(bp);
+ if (bp->b_error) {
+ bp->b_error = 0;
+ goto out;
+ }
- /* Make sure we pass the verifiers. */
- bp->b_ops->verify_read(bp);
- if (bp->b_error)
+ /*
+ * Some read verifiers will (re)set b_ops, so we must be
+ * careful not to blow away any such assignment.
+ */
+ if (!bp->b_ops)
+ bp->b_ops = fab->buf_ops;
+ }
+
+ /*
+ * This block passes the magic/uuid and verifier tests for this btree
+ * type. We don't need the caller to try the other tree types.
+ */
+ *done_with_block = true;
+
+ /*
+ * Compare this btree block's level to the height of the current
+ * candidate root block.
+ *
+ * If the level matches the root we found previously, throw away both
+ * blocks because there can't be two candidate roots.
+ *
+ * If level is lower in the tree than the root we found previously,
+ * ignore this block.
+ */
+ block_level = xfs_btree_get_level(btblock);
+ if (block_level + 1 == fab->height) {
+ fab->root = NULLAGBLOCK;
goto out;
- fab->root = agbno;
- fab->height = xfs_btree_get_level(btblock) + 1;
- *found_it = true;
+ } else if (block_level < fab->height) {
+ goto out;
+ }
+
+ /*
+ * This is the highest block in the tree that we've found so far.
+ * Update the btree height to reflect what we've learned from this
+ * block.
+ */
+ fab->height = block_level + 1;
+
+ /*
+ * If this block doesn't have sibling pointers, then it's the new root
+ * block candidate. Otherwise, the root will be found farther up the
+ * tree.
+ */
+ if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
+ btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
+ fab->root = agbno;
+ else
+ fab->root = NULLAGBLOCK;
trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
be32_to_cpu(btblock->bb_magic), fab->height - 1);
@@ -768,7 +846,7 @@ xrep_findroot_rmap(
struct xrep_findroot *ri = priv;
struct xrep_find_ag_btree *fab;
xfs_agblock_t b;
- bool found_it;
+ bool done;
int error = 0;
/* Ignore anything that isn't AG metadata. */
@@ -777,16 +855,16 @@ xrep_findroot_rmap(
/* Otherwise scan each block + btree type. */
for (b = 0; b < rec->rm_blockcount; b++) {
- found_it = false;
+ done = false;
for (fab = ri->btree_info; fab->buf_ops; fab++) {
if (rec->rm_owner != fab->rmap_owner)
continue;
error = xrep_findroot_block(ri, fab,
rec->rm_owner, rec->rm_startblock + b,
- &found_it);
+ &done);
if (error)
return error;
- if (found_it)
+ if (done)
break;
}
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 4bfae1e61d30..1b2344d00525 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -412,19 +412,6 @@ xchk_validate_inputs(
goto out;
}
- error = -EOPNOTSUPP;
- /*
- * We won't scrub any filesystem that doesn't have the ability
- * to record unwritten extents. The option was made default in
- * 2003, removed from mkfs in 2007, and cannot be disabled in
- * v5, so if we find a filesystem without this flag it's either
- * really old or totally unsupported. Avoid it either way.
- * We also don't support v1-v3 filesystems, which aren't
- * mountable.
- */
- if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
- goto out;
-
/*
* We only want to repair read-write v5+ filesystems. Defer the check
* for ops->repair until after our scrub confirms that we need to
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 49f5f5896a43..338b9d9984e0 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -917,7 +917,7 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_INVALID,
+ .io_type = XFS_IO_HOLE,
};
int ret;
@@ -933,7 +933,7 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_INVALID,
+ .io_type = XFS_IO_HOLE,
};
int ret;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 9af867951a10..494b4338446e 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -12,21 +12,19 @@ extern struct bio_set xfs_ioend_bioset;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
- XFS_IO_INVALID, /* initial state */
+ XFS_IO_HOLE, /* covers region without any block allocation */
XFS_IO_DELALLOC, /* covers delalloc region */
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
XFS_IO_COW, /* covers copy-on-write extent */
- XFS_IO_HOLE, /* covers region without any block allocation */
};
#define XFS_IO_TYPES \
- { XFS_IO_INVALID, "invalid" }, \
- { XFS_IO_DELALLOC, "delalloc" }, \
- { XFS_IO_UNWRITTEN, "unwritten" }, \
- { XFS_IO_OVERWRITE, "overwrite" }, \
- { XFS_IO_COW, "CoW" }, \
- { XFS_IO_HOLE, "hole" }
+ { XFS_IO_HOLE, "hole" }, \
+ { XFS_IO_DELALLOC, "delalloc" }, \
+ { XFS_IO_UNWRITTEN, "unwritten" }, \
+ { XFS_IO_OVERWRITE, "overwrite" }, \
+ { XFS_IO_COW, "CoW" }
/*
* Structure for buffered I/O completions.
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6de8d90041ff..5d263dfdb3bc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -406,10 +406,10 @@ xfs_getbmap_report_one(
struct xfs_bmbt_irec *got)
{
struct kgetbmap *p = out + bmv->bmv_entries;
- bool shared = false, trimmed = false;
+ bool shared = false;
int error;
- error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, got, &shared);
if (error)
return error;
@@ -1043,44 +1043,6 @@ out_trans_cancel:
}
static int
-xfs_adjust_extent_unmap_boundaries(
- struct xfs_inode *ip,
- xfs_fileoff_t *startoffset_fsb,
- xfs_fileoff_t *endoffset_fsb)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_bmbt_irec imap;
- int nimap, error;
- xfs_extlen_t mod = 0;
-
- nimap = 1;
- error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
- if (error)
- return error;
-
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
- if (mod)
- *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
- }
-
- nimap = 1;
- error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
- if (error)
- return error;
-
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- mod++;
- if (mod && mod != mp->m_sb.sb_rextsize)
- *endoffset_fsb -= mod;
- }
-
- return 0;
-}
-
-static int
xfs_flush_unmap_range(
struct xfs_inode *ip,
xfs_off_t offset,
@@ -1133,19 +1095,8 @@ xfs_free_file_space(
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
/*
- * Need to zero the stuff we're not freeing, on disk. If it's a RT file
- * and we can't use unwritten extents then we actually need to ensure
- * to zero the whole extent, otherwise we just need to take of block
- * boundaries, and xfs_bunmapi will handle the rest.
+ * Need to zero the stuff we're not freeing, on disk.
*/
- if (XFS_IS_REALTIME_INODE(ip) &&
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
- error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
- &endoffset_fsb);
- if (error)
- return error;
- }
-
if (endoffset_fsb > startoffset_fsb) {
while (!done) {
error = xfs_unmap_extent(ip, startoffset_fsb,
@@ -1824,6 +1775,12 @@ xfs_swap_extents(
if (error)
goto out_unlock;
+ if (xfs_inode_has_cow_data(tip)) {
+ error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
+ if (error)
+ return error;
+ }
+
/*
* Extent "swapping" with rmap requires a permanent reservation and
* a block reservation because it's really just a remap operation
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e839907e8492..b21ea2ba768d 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -37,6 +37,32 @@ static kmem_zone_t *xfs_buf_zone;
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
+/*
+ * Locking orders
+ *
+ * xfs_buf_ioacct_inc:
+ * xfs_buf_ioacct_dec:
+ * b_sema (caller holds)
+ * b_lock
+ *
+ * xfs_buf_stale:
+ * b_sema (caller holds)
+ * b_lock
+ * lru_lock
+ *
+ * xfs_buf_rele:
+ * b_lock
+ * pag_buf_lock
+ * lru_lock
+ *
+ * xfs_buftarg_wait_rele
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ *
+ * xfs_buftarg_isolate
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ */
static inline int
xfs_buf_is_vmapped(
@@ -749,6 +775,30 @@ _xfs_buf_read(
return xfs_buf_submit(bp);
}
+/*
+ * If the caller passed in an ops structure and the buffer doesn't have ops
+ * assigned, set the ops and use them to verify the contents. If the contents
+ * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
+ * recorded errors and is already in XBF_DONE state.
+ */
+int
+xfs_buf_ensure_ops(
+ struct xfs_buf *bp,
+ const struct xfs_buf_ops *ops)
+{
+ ASSERT(bp->b_flags & XBF_DONE);
+ ASSERT(bp->b_error == 0);
+
+ if (!ops || bp->b_ops)
+ return 0;
+
+ bp->b_ops = ops;
+ bp->b_ops->verify_read(bp);
+ if (bp->b_error)
+ bp->b_flags &= ~XBF_DONE;
+ return bp->b_error;
+}
+
xfs_buf_t *
xfs_buf_read_map(
struct xfs_buftarg *target,
@@ -762,26 +812,32 @@ xfs_buf_read_map(
flags |= XBF_READ;
bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp) {
- trace_xfs_buf_read(bp, flags, _RET_IP_);
+ if (!bp)
+ return NULL;
- if (!(bp->b_flags & XBF_DONE)) {
- XFS_STATS_INC(target->bt_mount, xb_get_read);
- bp->b_ops = ops;
- _xfs_buf_read(bp, flags);
- } else if (flags & XBF_ASYNC) {
- /*
- * Read ahead call which is already satisfied,
- * drop the buffer
- */
- xfs_buf_relse(bp);
- return NULL;
- } else {
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- }
+ trace_xfs_buf_read(bp, flags, _RET_IP_);
+
+ if (!(bp->b_flags & XBF_DONE)) {
+ XFS_STATS_INC(target->bt_mount, xb_get_read);
+ bp->b_ops = ops;
+ _xfs_buf_read(bp, flags);
+ return bp;
}
+ xfs_buf_ensure_ops(bp, ops);
+
+ if (flags & XBF_ASYNC) {
+ /*
+ * Read ahead call which is already satisfied,
+ * drop the buffer
+ */
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+
+ /* We do not want read in the flags */
+ bp->b_flags &= ~XBF_READ;
+ ASSERT(bp->b_ops != NULL || ops == NULL);
return bp;
}
@@ -1006,8 +1062,18 @@ xfs_buf_rele(
ASSERT(atomic_read(&bp->b_hold) > 0);
- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
+ /*
+ * We grab the b_lock here first to serialise racing xfs_buf_rele()
+ * calls. The pag_buf_lock being taken on the last reference only
+ * serialises against racing lookups in xfs_buf_find(). IOWs, the second
+ * to last reference we drop here is not serialised against the last
+ * reference until we take bp->b_lock. Hence if we don't grab b_lock
+ * first, the last "release" reference can win the race to the lock and
+ * free the buffer before the second-to-last reference is processed,
+ * leading to a use-after-free scenario.
+ */
spin_lock(&bp->b_lock);
+ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
if (!release) {
/*
* Drop the in-flight state if the buffer is already on the LRU
@@ -1989,6 +2055,13 @@ xfs_buf_delwri_submit_buffers(
* is only safely useable for callers that can track I/O completion by higher
* level means, e.g. AIL pushing as the @buffer_list is consumed in this
* function.
+ *
+ * Note: this function will skip buffers it would block on, and in doing so
+ * leaves them on @buffer_list so they can be retried on a later pass. As such,
+ * it is up to the caller to ensure that the buffer list is fully submitted or
+ * cancelled appropriately when they are finished with the list. Failure to
+ * cancel or resubmit the list until it is empty will result in leaked buffers
+ * at unmount time.
*/
int
xfs_buf_delwri_submit_nowait(
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 4e3171acd0f8..b9f5511ea998 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -385,4 +385,6 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+int xfs_buf_ensure_ops(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+
#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 7c00b8bedfe3..093c2b8d7e20 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -470,20 +470,13 @@ xfs_fs_goingdown(
*/
void
xfs_do_force_shutdown(
- xfs_mount_t *mp,
+ struct xfs_mount *mp,
int flags,
char *fname,
int lnnum)
{
- int logerror;
-
- logerror = flags & SHUTDOWN_LOG_IO_ERROR;
+ bool logerror = flags & SHUTDOWN_LOG_IO_ERROR;
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_notice(mp,
- "%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
- __func__, flags, lnnum, fname, __return_address);
- }
/*
* No need to duplicate efforts.
*/
@@ -499,27 +492,34 @@ xfs_do_force_shutdown(
if (xfs_log_force_umount(mp, logerror))
return;
+ if (flags & SHUTDOWN_FORCE_UMOUNT) {
+ xfs_alert(mp,
+"User initiated shutdown received. Shutting down filesystem");
+ return;
+ }
+
+ xfs_notice(mp,
+"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
+ __func__, flags, lnnum, fname, __return_address);
+
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
- "Corruption of in-memory data detected. Shutting down filesystem");
+"Corruption of in-memory data detected. Shutting down filesystem");
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
- } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- if (logerror) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
- "Log I/O Error Detected. Shutting down filesystem");
- } else if (flags & SHUTDOWN_DEVICE_REQ) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "All device paths lost. Shutting down filesystem");
- } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "I/O Error Detected. Shutting down filesystem");
- }
- }
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_alert(mp,
- "Please umount the filesystem and rectify the problem(s)");
+ } else if (logerror) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+ "Log I/O Error Detected. Shutting down filesystem");
+ } else if (flags & SHUTDOWN_DEVICE_REQ) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "All device paths lost. Shutting down filesystem");
+ } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "I/O Error Detected. Shutting down filesystem");
}
+
+ xfs_alert(mp,
+ "Please unmount the filesystem and rectify the problem(s)");
}
/*
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0ef5ece5634c..6e2c08f30f60 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -604,14 +604,6 @@ xfs_ioc_space(
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
int error;
- /*
- * Only allow the sys admin to reserve space unless
- * unwritten extents are enabled.
- */
- if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
return -EPERM;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6320aca39f39..27c93b5f029d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -62,6 +62,21 @@ xfs_bmbt_to_iomap(
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
}
+static void
+xfs_hole_to_iomap(
+ struct xfs_inode *ip,
+ struct iomap *iomap,
+ xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t end_fsb)
+{
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
+ iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
+ iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
+ iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+}
+
xfs_extlen_t
xfs_eof_alignment(
struct xfs_inode *ip,
@@ -502,6 +517,7 @@ xfs_file_iomap_begin_delay(
struct inode *inode,
loff_t offset,
loff_t count,
+ unsigned flags,
struct iomap *iomap)
{
struct xfs_inode *ip = XFS_I(inode);
@@ -538,15 +554,23 @@ xfs_file_iomap_begin_delay(
goto out_unlock;
}
+ end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+
eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
- if (!eof && got.br_startoff <= offset_fsb) {
- if (xfs_is_reflink_inode(ip)) {
- bool shared;
+ if (eof)
+ got.br_startoff = end_fsb; /* fake hole until the end */
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
- maxbytes_fsb);
+ if (got.br_startoff <= offset_fsb) {
+ /*
+ * For reflink files we may need a delalloc reservation when
+ * overwriting shared extents. This includes zeroing of
+ * existing extents that contain data.
+ */
+ if (xfs_is_reflink_inode(ip) &&
+ ((flags & IOMAP_WRITE) ||
+ got.br_state != XFS_EXT_UNWRITTEN)) {
xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
- error = xfs_reflink_reserve_cow(ip, &got, &shared);
+ error = xfs_reflink_reserve_cow(ip, &got);
if (error)
goto out_unlock;
}
@@ -555,6 +579,11 @@ xfs_file_iomap_begin_delay(
goto done;
}
+ if (flags & IOMAP_ZERO) {
+ xfs_hole_to_iomap(ip, iomap, offset_fsb, got.br_startoff);
+ goto out_unlock;
+ }
+
error = xfs_qm_dqattach_locked(ip, false);
if (error)
goto out_unlock;
@@ -1003,16 +1032,17 @@ xfs_file_iomap_begin(
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0;
- bool shared = false, trimmed = false;
+ bool shared = false;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */
- return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
+ return xfs_file_iomap_begin_delay(inode, offset, length, flags,
+ iomap);
}
/*
@@ -1038,8 +1068,7 @@ xfs_file_iomap_begin(
if (flags & IOMAP_REPORT) {
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
- &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
if (error)
goto out_unlock;
}
@@ -1065,7 +1094,7 @@ xfs_file_iomap_begin(
if (error)
goto out_unlock;
} else {
- error = xfs_reflink_reserve_cow(ip, &imap, &shared);
+ error = xfs_reflink_reserve_cow(ip, &imap);
if (error)
goto out_unlock;
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 42ea7bab9144..8eaeec9d58ed 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -182,8 +182,7 @@ int
xfs_reflink_trim_around_shared(
struct xfs_inode *ip,
struct xfs_bmbt_irec *irec,
- bool *shared,
- bool *trimmed)
+ bool *shared)
{
xfs_agnumber_t agno;
xfs_agblock_t agbno;
@@ -209,7 +208,7 @@ xfs_reflink_trim_around_shared(
if (error)
return error;
- *shared = *trimmed = false;
+ *shared = false;
if (fbno == NULLAGBLOCK) {
/* No shared blocks at all. */
return 0;
@@ -222,8 +221,6 @@ xfs_reflink_trim_around_shared(
*/
irec->br_blockcount = flen;
*shared = true;
- if (flen != aglen)
- *trimmed = true;
return 0;
} else {
/*
@@ -233,7 +230,6 @@ xfs_reflink_trim_around_shared(
* start of the shared region.
*/
irec->br_blockcount = fbno - agbno;
- *trimmed = true;
return 0;
}
}
@@ -241,7 +237,7 @@ xfs_reflink_trim_around_shared(
/*
* Trim the passed in imap to the next shared/unshared extent boundary, and
* if imap->br_startoff points to a shared extent reserve space for it in the
- * COW fork. In this case *shared is set to true, else to false.
+ * COW fork.
*
* Note that imap will always contain the block numbers for the existing blocks
* in the data fork, as the upper layers need them for read-modify-write
@@ -250,14 +246,14 @@ xfs_reflink_trim_around_shared(
int
xfs_reflink_reserve_cow(
struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap,
- bool *shared)
+ struct xfs_bmbt_irec *imap)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got;
int error = 0;
- bool eof = false, trimmed;
+ bool eof = false;
struct xfs_iext_cursor icur;
+ bool shared;
/*
* Search the COW fork extent list first. This serves two purposes:
@@ -273,18 +269,16 @@ xfs_reflink_reserve_cow(
if (!eof && got.br_startoff <= imap->br_startoff) {
trace_xfs_reflink_cow_found(ip, imap);
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
-
- *shared = true;
return 0;
}
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, imap, &shared);
if (error)
return error;
/* Not shared? Just report the (potentially capped) extent. */
- if (!*shared)
+ if (!shared)
return 0;
/*
@@ -368,7 +362,6 @@ xfs_find_trim_cow_extent(
xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec got;
- bool trimmed;
*found = false;
@@ -376,9 +369,13 @@ xfs_find_trim_cow_extent(
* If we don't find an overlapping extent, trim the range we need to
* allocate to fit the hole we found.
*/
- if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
- got.br_startoff > offset_fsb)
- return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
+ got.br_startoff = offset_fsb + count_fsb;
+ if (got.br_startoff > offset_fsb) {
+ xfs_trim_extent(imap, imap->br_startoff,
+ got.br_startoff - imap->br_startoff);
+ return xfs_reflink_trim_around_shared(ip, imap, shared);
+ }
*shared = true;
if (isnullstartblock(got.br_startblock)) {
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index c585ad9552b2..7f47202b5639 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -10,10 +10,10 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
- struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
+ struct xfs_bmbt_irec *irec, bool *shared);
extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap, bool *shared);
+ struct xfs_bmbt_irec *imap);
extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 4e4423153071..cc509743facd 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -29,30 +29,30 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
char *desc;
int endpoint;
} xstats[] = {
- { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC },
- { "abt", XFSSTAT_END_ALLOC_BTREE },
- { "blk_map", XFSSTAT_END_BLOCK_MAPPING },
- { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE },
- { "dir", XFSSTAT_END_DIRECTORY_OPS },
- { "trans", XFSSTAT_END_TRANSACTIONS },
- { "ig", XFSSTAT_END_INODE_OPS },
- { "log", XFSSTAT_END_LOG_OPS },
- { "push_ail", XFSSTAT_END_TAIL_PUSHING },
- { "xstrat", XFSSTAT_END_WRITE_CONVERT },
- { "rw", XFSSTAT_END_READ_WRITE_OPS },
- { "attr", XFSSTAT_END_ATTRIBUTE_OPS },
- { "icluster", XFSSTAT_END_INODE_CLUSTER },
- { "vnodes", XFSSTAT_END_VNODE_OPS },
- { "buf", XFSSTAT_END_BUF },
- { "abtb2", XFSSTAT_END_ABTB_V2 },
- { "abtc2", XFSSTAT_END_ABTC_V2 },
- { "bmbt2", XFSSTAT_END_BMBT_V2 },
- { "ibt2", XFSSTAT_END_IBT_V2 },
- { "fibt2", XFSSTAT_END_FIBT_V2 },
- { "rmapbt", XFSSTAT_END_RMAP_V2 },
- { "refcntbt", XFSSTAT_END_REFCOUNT },
+ { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
+ { "abt", xfsstats_offset(xs_blk_mapr) },
+ { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
+ { "bmbt", xfsstats_offset(xs_dir_lookup) },
+ { "dir", xfsstats_offset(xs_trans_sync) },
+ { "trans", xfsstats_offset(xs_ig_attempts) },
+ { "ig", xfsstats_offset(xs_log_writes) },
+ { "log", xfsstats_offset(xs_try_logspace)},
+ { "push_ail", xfsstats_offset(xs_xstrat_quick)},
+ { "xstrat", xfsstats_offset(xs_write_calls) },
+ { "rw", xfsstats_offset(xs_attr_get) },
+ { "attr", xfsstats_offset(xs_iflush_count)},
+ { "icluster", xfsstats_offset(vn_active) },
+ { "vnodes", xfsstats_offset(xb_get) },
+ { "buf", xfsstats_offset(xs_abtb_2) },
+ { "abtb2", xfsstats_offset(xs_abtc_2) },
+ { "abtc2", xfsstats_offset(xs_bmbt_2) },
+ { "bmbt2", xfsstats_offset(xs_ibt_2) },
+ { "ibt2", xfsstats_offset(xs_fibt_2) },
+ { "fibt2", xfsstats_offset(xs_rmap_2) },
+ { "rmapbt", xfsstats_offset(xs_refcbt_2) },
+ { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
- { "qm", XFSSTAT_END_QM },
+ { "qm", xfsstats_offset(xs_xstrat_bytes)},
};
/* Loop over all stats groups */
@@ -104,6 +104,10 @@ void xfs_stats_clearall(struct xfsstats __percpu *stats)
#ifdef CONFIG_PROC_FS
/* legacy quota interfaces */
#ifdef CONFIG_XFS_QUOTA
+
+#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
+#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
+
static int xqm_proc_show(struct seq_file *m, void *v)
{
/* maximum; incore; ratio free to inuse; freelist */
@@ -119,7 +123,7 @@ static int xqmstat_proc_show(struct seq_file *m, void *v)
int j;
seq_printf(m, "qm");
- for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
+ for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
seq_putc(m, '\n');
return 0;
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 130db070e4d8..34d704f703d2 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -41,17 +41,14 @@ enum {
* XFS global statistics
*/
struct __xfsstats {
-# define XFSSTAT_END_EXTENT_ALLOC 4
uint32_t xs_allocx;
uint32_t xs_allocb;
uint32_t xs_freex;
uint32_t xs_freeb;
-# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4)
uint32_t xs_abt_lookup;
uint32_t xs_abt_compare;
uint32_t xs_abt_insrec;
uint32_t xs_abt_delrec;
-# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7)
uint32_t xs_blk_mapr;
uint32_t xs_blk_mapw;
uint32_t xs_blk_unmap;
@@ -59,21 +56,17 @@ struct __xfsstats {
uint32_t xs_del_exlist;
uint32_t xs_look_exlist;
uint32_t xs_cmp_exlist;
-# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4)
uint32_t xs_bmbt_lookup;
uint32_t xs_bmbt_compare;
uint32_t xs_bmbt_insrec;
uint32_t xs_bmbt_delrec;
-# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4)
uint32_t xs_dir_lookup;
uint32_t xs_dir_create;
uint32_t xs_dir_remove;
uint32_t xs_dir_getdents;
-# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3)
uint32_t xs_trans_sync;
uint32_t xs_trans_async;
uint32_t xs_trans_empty;
-# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7)
uint32_t xs_ig_attempts;
uint32_t xs_ig_found;
uint32_t xs_ig_frecycle;
@@ -81,13 +74,11 @@ struct __xfsstats {
uint32_t xs_ig_dup;
uint32_t xs_ig_reclaims;
uint32_t xs_ig_attrchg;
-# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5)
uint32_t xs_log_writes;
uint32_t xs_log_blocks;
uint32_t xs_log_noiclogs;
uint32_t xs_log_force;
uint32_t xs_log_force_sleep;
-# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10)
uint32_t xs_try_logspace;
uint32_t xs_sleep_logspace;
uint32_t xs_push_ail;
@@ -98,22 +89,17 @@ struct __xfsstats {
uint32_t xs_push_ail_flushing;
uint32_t xs_push_ail_restarts;
uint32_t xs_push_ail_flush;
-# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2)
uint32_t xs_xstrat_quick;
uint32_t xs_xstrat_split;
-# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2)
uint32_t xs_write_calls;
uint32_t xs_read_calls;
-# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4)
uint32_t xs_attr_get;
uint32_t xs_attr_set;
uint32_t xs_attr_remove;
uint32_t xs_attr_list;
-# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3)
uint32_t xs_iflush_count;
uint32_t xs_icluster_flushcnt;
uint32_t xs_icluster_flushinode;
-# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8)
uint32_t vn_active; /* # vnodes not on free lists */
uint32_t vn_alloc; /* # times vn_alloc called */
uint32_t vn_get; /* # times vn_get called */
@@ -122,7 +108,6 @@ struct __xfsstats {
uint32_t vn_reclaim; /* # times vn_reclaim called */
uint32_t vn_remove; /* # times vn_remove called */
uint32_t vn_free; /* # times vn_free called */
-#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
uint32_t xb_get;
uint32_t xb_create;
uint32_t xb_get_locked;
@@ -133,28 +118,19 @@ struct __xfsstats {
uint32_t xb_page_found;
uint32_t xb_get_read;
/* Version 2 btree counters */
-#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF + __XBTS_MAX)
uint32_t xs_abtb_2[__XBTS_MAX];
-#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2 + __XBTS_MAX)
uint32_t xs_abtc_2[__XBTS_MAX];
-#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2 + __XBTS_MAX)
uint32_t xs_bmbt_2[__XBTS_MAX];
-#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2 + __XBTS_MAX)
uint32_t xs_ibt_2[__XBTS_MAX];
-#define XFSSTAT_END_FIBT_V2 (XFSSTAT_END_IBT_V2 + __XBTS_MAX)
uint32_t xs_fibt_2[__XBTS_MAX];
-#define XFSSTAT_END_RMAP_V2 (XFSSTAT_END_FIBT_V2 + __XBTS_MAX)
uint32_t xs_rmap_2[__XBTS_MAX];
-#define XFSSTAT_END_REFCOUNT (XFSSTAT_END_RMAP_V2 + __XBTS_MAX)
uint32_t xs_refcbt_2[__XBTS_MAX];
-#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_REFCOUNT + 6)
uint32_t xs_qm_dqreclaims;
uint32_t xs_qm_dqreclaim_misses;
uint32_t xs_qm_dquot_dups;
uint32_t xs_qm_dqcachemisses;
uint32_t xs_qm_dqcachehits;
uint32_t xs_qm_dqwants;
-#define XFSSTAT_END_QM (XFSSTAT_END_XQMSTAT+2)
uint32_t xs_qm_dquot;
uint32_t xs_qm_dquot_unused;
/* Extra precision counters */
@@ -163,10 +139,12 @@ struct __xfsstats {
uint64_t xs_read_bytes;
};
+#define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
+
struct xfsstats {
union {
struct __xfsstats s;
- uint32_t a[XFSSTAT_END_XQMSTAT];
+ uint32_t a[xfsstats_offset(xs_qm_dquot)];
};
};
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 207ee302b1bb..d3e6cd063688 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -43,6 +43,7 @@
#include <linux/dax.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/magic.h>
#include <linux/mount.h>
#include <linux/mempool.h>
#include <linux/writeback.h>
@@ -933,6 +934,32 @@ xfs_fs_alloc_inode(
return NULL;
}
+#ifdef DEBUG
+static void
+xfs_check_delalloc(
+ struct xfs_inode *ip,
+ int whichfork)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_bmbt_irec got;
+ struct xfs_iext_cursor icur;
+
+ if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
+ return;
+ do {
+ if (isnullstartblock(got.br_startblock)) {
+ xfs_warn(ip->i_mount,
+ "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
+ ip->i_ino,
+ whichfork == XFS_DATA_FORK ? "data" : "cow",
+ got.br_startoff, got.br_blockcount);
+ }
+ } while (xfs_iext_next_extent(ifp, &icur, &got));
+}
+#else
+#define xfs_check_delalloc(ip, whichfork) do { } while (0)
+#endif
+
/*
* Now that the generic code is guaranteed not to be accessing
* the linux inode, we can inactivate and reclaim the inode.
@@ -951,7 +978,12 @@ xfs_fs_destroy_inode(
xfs_inactive(ip);
- ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
+ xfs_check_delalloc(ip, XFS_DATA_FORK);
+ xfs_check_delalloc(ip, XFS_COW_FORK);
+ ASSERT(0);
+ }
+
XFS_STATS_INC(ip->i_mount, vn_reclaim);
/*
@@ -1097,7 +1129,7 @@ xfs_fs_statfs(
xfs_extlen_t lsize;
int64_t ffree;
- statp->f_type = XFS_SB_MAGIC;
+ statp->f_type = XFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
@@ -1650,7 +1682,7 @@ xfs_fs_fill_super(
* we must configure the block size in the superblock before we run the
* full mount process as the mount process can lookup and cache inodes.
*/
- sb->s_magic = XFS_SB_MAGIC;
+ sb->s_magic = XFS_SUPER_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c3d278e96ad1..a0c5dbda18aa 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -220,6 +220,7 @@ void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
uint);
void xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
+bool xfs_trans_buf_is_dirty(struct xfs_buf *bp);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
void xfs_extent_free_init_defer_op(void);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 55326f971cb3..d3a4e89bf4a0 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -531,17 +531,33 @@ xfsaild(
set_current_state(TASK_INTERRUPTIBLE);
/*
- * Check kthread_should_stop() after we set the task state
- * to guarantee that we either see the stop bit and exit or
- * the task state is reset to runnable such that it's not
- * scheduled out indefinitely and detects the stop bit at
- * next iteration.
- *
+ * Check kthread_should_stop() after we set the task state to
+ * guarantee that we either see the stop bit and exit or the
+ * task state is reset to runnable such that it's not scheduled
+ * out indefinitely and detects the stop bit at next iteration.
* A memory barrier is included in above task state set to
* serialize again kthread_stop().
*/
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
+
+ /*
+ * The caller forces out the AIL before stopping the
+ * thread in the common case, which means the delwri
+ * queue is drained. In the shutdown case, the queue may
+ * still hold relogged buffers that haven't been
+ * submitted because they were pinned since added to the
+ * queue.
+ *
+ * Log I/O error processing stales the underlying buffer
+ * and clears the delwri state, expecting the buf to be
+ * removed on the next submission attempt. That won't
+ * happen if we're shutting down, so this is the last
+ * opportunity to release such buffers from the queue.
+ */
+ ASSERT(list_empty(&ailp->ail_buf_list) ||
+ XFS_FORCED_SHUTDOWN(ailp->ail_mount));
+ xfs_buf_delwri_cancel(&ailp->ail_buf_list);
break;
}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 286a287ac57a..629f1479c9d2 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -264,11 +264,39 @@ xfs_trans_read_buf_map(
return -EIO;
}
+ /*
+ * Check if the caller is trying to read a buffer that is
+ * already attached to the transaction yet has no buffer ops
+ * assigned. Ops are usually attached when the buffer is
+ * attached to the transaction, or by the read caller if
+ * special circumstances. That didn't happen, which is not
+ * how this is supposed to go.
+ *
+ * If the buffer passes verification we'll let this go, but if
+ * not we have to shut down. Let the transaction cleanup code
+ * release this buffer when it kills the tranaction.
+ */
+ ASSERT(bp->b_ops != NULL);
+ error = xfs_buf_ensure_ops(bp, ops);
+ if (error) {
+ xfs_buf_ioerror_alert(bp, __func__);
+
+ if (tp->t_flags & XFS_TRANS_DIRTY)
+ xfs_force_shutdown(tp->t_mountp,
+ SHUTDOWN_META_IO_ERROR);
+
+ /* bad CRC means corrupted metadata */
+ if (error == -EFSBADCRC)
+ error = -EFSCORRUPTED;
+ return error;
+ }
+
bip = bp->b_log_item;
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_read_buf_recur(bip);
+ ASSERT(bp->b_ops != NULL || ops == NULL);
*bpp = bp;
return 0;
}
@@ -316,11 +344,25 @@ xfs_trans_read_buf_map(
_xfs_trans_bjoin(tp, bp, 1);
trace_xfs_trans_read_buf(bp->b_log_item);
}
+ ASSERT(bp->b_ops != NULL || ops == NULL);
*bpp = bp;
return 0;
}
+/* Has this buffer been dirtied by anyone? */
+bool
+xfs_trans_buf_is_dirty(
+ struct xfs_buf *bp)
+{
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+
+ if (!bip)
+ return false;
+ ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
+ return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
+}
+
/*
* Release a buffer previously joined to the transaction. If the buffer is
* modified within this transaction, decrement the recursion count but do not