summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/fid.c19
-rw-r--r--fs/9p/v9fs.h6
-rw-r--r--fs/9p/vfs_file.c13
-rw-r--r--fs/9p/vfs_inode.c29
-rw-r--r--fs/9p/vfs_inode_dotl.c22
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/adfs/adfs.h25
-rw-r--r--fs/adfs/dir_f.c23
-rw-r--r--fs/adfs/dir_fplus.c119
-rw-r--r--fs/adfs/inode.c63
-rw-r--r--fs/adfs/map.c2
-rw-r--r--fs/adfs/super.c23
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/cache.c12
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/aio.c81
-rw-r--r--fs/attr.c6
-rw-r--r--fs/autofs4/autofs_i.h2
-rw-r--r--fs/autofs4/dev-ioctl.c4
-rw-r--r--fs/autofs4/expire.c84
-rw-r--r--fs/autofs4/root.c64
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/befs/ChangeLog10
-rw-r--r--fs/befs/befs_fs_types.h2
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/linuxvfs.c3
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/binfmt_elf.c4
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/bio-integrity.c3
-rw-r--r--fs/bio.c14
-rw-r--r--fs/block_dev.c35
-rw-r--r--fs/btrfs/acl.c2
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/ctree.c159
-rw-r--r--fs/btrfs/ctree.h23
-rw-r--r--fs/btrfs/delayed-ref.c6
-rw-r--r--fs/btrfs/dir-item.c45
-rw-r--r--fs/btrfs/disk-io.c215
-rw-r--r--fs/btrfs/extent-tree.c229
-rw-r--r--fs/btrfs/extent_io.c5
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/file.c388
-rw-r--r--fs/btrfs/free-space-cache.c594
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c3
-rw-r--r--fs/btrfs/inode.c394
-rw-r--r--fs/btrfs/ioctl.c110
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/relocation.c10
-rw-r--r--fs/btrfs/root-tree.c24
-rw-r--r--fs/btrfs/super.c24
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/tree-log.c57
-rw-r--r--fs/btrfs/volumes.c235
-rw-r--r--fs/btrfs/volumes.h12
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/btrfs/zlib.c3
-rw-r--r--fs/buffer.c53
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/debugfs.c6
-rw-r--r--fs/ceph/dir.c24
-rw-r--r--fs/ceph/file.c10
-rw-r--r--fs/ceph/inode.c25
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/ceph/snap.c6
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/ceph/super.h66
-rw-r--r--fs/cifs/AUTHORS2
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c30
-rw-r--r--fs/coda/sysctl.c17
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/direct-io.c7
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/recover.c2
-rw-r--r--fs/drop_caches.c24
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h30
-rw-r--r--fs/ecryptfs/file.c9
-rw-r--r--fs/ecryptfs/inode.c24
-rw-r--r--fs/ecryptfs/keystore.c272
-rw-r--r--fs/ecryptfs/main.c14
-rw-r--r--fs/ecryptfs/mmap.c61
-rw-r--r--fs/ecryptfs/read_write.c12
-rw-r--r--fs/ecryptfs/super.c3
-rw-r--r--fs/efs/inode.c1
-rw-r--r--fs/eventpoll.c60
-rw-r--r--fs/exofs/common.h22
-rw-r--r--fs/exofs/dir.c33
-rw-r--r--fs/exofs/exofs.h6
-rw-r--r--fs/exofs/file.c16
-rw-r--r--fs/exofs/inode.c52
-rw-r--r--fs/exofs/super.c190
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ext2.h6
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext2/ioctl.c6
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/acl.c2
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/inode.c9
-rw-r--r--fs/ext3/ioctl.c6
-rw-r--r--fs/ext3/resize.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/ext4.h12
-rw-r--r--fs/ext4/ext4_jbd2.h7
-rw-r--r--fs/ext4/extents.c223
-rw-r--r--fs/ext4/fsync.c16
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c434
-rw-r--r--fs/ext4/ioctl.c15
-rw-r--r--fs/ext4/mballoc.c36
-rw-r--r--fs/ext4/mballoc.h2
-rw-r--r--fs/ext4/migrate.c12
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/page-io.c16
-rw-r--r--fs/ext4/resize.c12
-rw-r--r--fs/ext4/super.c52
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/freevxfs/vxfs_fshead.c2
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/freevxfs/vxfs_olt.h2
-rw-r--r--fs/freevxfs/vxfs_subr.c1
-rw-r--r--fs/fs-writeback.c143
-rw-r--r--fs/fuse/cuse.c12
-rw-r--r--fs/fuse/dev.c27
-rw-r--r--fs/fuse/dir.c38
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/generic_acl.c2
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c12
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/inode.c685
-rw-r--r--fs/internal.h7
-rw-r--r--fs/isofs/inode.c1
-rw-r--r--fs/jbd/commit.c24
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jbd2/commit.c24
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/jffs2/TODO2
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/compr_zlib.c7
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c4
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/jfs/jfs_extent.c6
-rw-r--r--fs/jfs/jfs_imap.c14
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_metapage.c1
-rw-r--r--fs/jfs/jfs_metapage.h2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/locks.c12
-rw-r--r--fs/logfs/compr.c2
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c2
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/file.c2
-rw-r--r--fs/logfs/inode.c2
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/minix/Kconfig8
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/minix/minix.h74
-rw-r--r--fs/mpage.c8
-rw-r--r--fs/namei.c55
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/dir.c89
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/getroot.c4
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/internal.h27
-rw-r--r--fs/nfs/namespace.c117
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4_fs.h5
-rw-r--r--fs/nfs/nfs4filelayout.c352
-rw-r--r--fs/nfs/nfs4filelayout.h4
-rw-r--r--fs/nfs/nfs4filelayoutdev.c178
-rw-r--r--fs/nfs/nfs4proc.c302
-rw-r--r--fs/nfs/nfs4state.c3
-rw-r--r--fs/nfs/nfs4xdr.c313
-rw-r--r--fs/nfs/pagelist.c12
-rw-r--r--fs/nfs/pnfs.c142
-rw-r--r--fs/nfs/pnfs.h83
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/write.c227
-rw-r--r--fs/nfs_common/nfsacl.c3
-rw-r--r--fs/nfsd/export.c1
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nfsd/nfs4idmap.c1
-rw-r--r--fs/nfsd/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4state.c164
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nfsd/nfsctl.c35
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/state.h12
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/btnode.c7
-rw-r--r--fs/nilfs2/file.c11
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/mdt.c9
-rw-r--r--fs/nilfs2/nilfs.h14
-rw-r--r--fs/nilfs2/page.c5
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/inode_mark.c42
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c3
-rw-r--r--fs/notify/inotify/inotify_user.c39
-rw-r--r--fs/notify/mark.c3
-rw-r--r--fs/notify/vfsmount_mark.c1
-rw-r--r--fs/ntfs/aops.c4
-rw-r--r--fs/ntfs/attrib.c4
-rw-r--r--fs/ntfs/compress.c5
-rw-r--r--fs/ntfs/inode.c8
-rw-r--r--fs/ntfs/layout.h12
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/logfile.h2
-rw-r--r--fs/ntfs/mft.c8
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/acl.c3
-rw-r--r--fs/ocfs2/alloc.c216
-rw-r--r--fs/ocfs2/aops.c83
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/buffer_head_io.c49
-rw-r--r--fs/ocfs2/cluster/heartbeat.c9
-rw-r--r--fs/ocfs2/cluster/masklog.c20
-rw-r--r--fs/ocfs2/cluster/masklog.h105
-rw-r--r--fs/ocfs2/cluster/quorum.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c12
-rw-r--r--fs/ocfs2/dcache.c45
-rw-r--r--fs/ocfs2/dir.c121
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c6
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c36
-rw-r--r--fs/ocfs2/dlm/dlmlock.c10
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c9
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/dlmglue.c246
-rw-r--r--fs/ocfs2/export.c47
-rw-r--r--fs/ocfs2/extent_map.c10
-rw-r--r--fs/ocfs2/file.c220
-rw-r--r--fs/ocfs2/heartbeat.c4
-rw-r--r--fs/ocfs2/inode.c138
-rw-r--r--fs/ocfs2/ioctl.c43
-rw-r--r--fs/ocfs2/journal.c170
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/localalloc.c109
-rw-r--r--fs/ocfs2/locks.c1
-rw-r--r--fs/ocfs2/mmap.c7
-rw-r--r--fs/ocfs2/namei.c177
-rw-r--r--fs/ocfs2/ocfs2.h33
-rw-r--r--fs/ocfs2/ocfs2_fs.h4
-rw-r--r--fs/ocfs2/ocfs2_trace.h2739
-rw-r--r--fs/ocfs2/quota_global.c47
-rw-r--r--fs/ocfs2/quota_local.c16
-rw-r--r--fs/ocfs2/refcounttree.c158
-rw-r--r--fs/ocfs2/reservations.c57
-rw-r--r--fs/ocfs2/reservations.h2
-rw-r--r--fs/ocfs2/resize.c23
-rw-r--r--fs/ocfs2/slot_map.c16
-rw-r--r--fs/ocfs2/stackglue.h2
-rw-r--r--fs/ocfs2/suballoc.c193
-rw-r--r--fs/ocfs2/super.c91
-rw-r--r--fs/ocfs2/symlink.c14
-rw-r--r--fs/ocfs2/sysfile.c1
-rw-r--r--fs/ocfs2/uptodate.c73
-rw-r--r--fs/ocfs2/xattr.c159
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/partitions/check.c7
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c180
-rw-r--r--fs/proc/generic.c8
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/root.c32
-rw-r--r--fs/proc/task_mmu.c138
-rw-r--r--fs/proc/task_nommu.c6
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/pstore/platform.c15
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/quota/dquot.c43
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/lock.c2
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/squashfs/Kconfig12
-rw-r--r--fs/squashfs/cache.c4
-rw-r--r--fs/squashfs/decompressor.c34
-rw-r--r--fs/squashfs/decompressor.h7
-rw-r--r--fs/squashfs/dir.c9
-rw-r--r--fs/squashfs/lzo_wrapper.c4
-rw-r--r--fs/squashfs/namei.c12
-rw-r--r--fs/squashfs/squashfs.h1
-rw-r--r--fs/squashfs/squashfs_fs.h4
-rw-r--r--fs/squashfs/super.c15
-rw-r--r--fs/squashfs/xz_wrapper.c53
-rw-r--r--fs/squashfs/zlib_wrapper.c10
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/itree.c1
-rw-r--r--fs/ubifs/Kconfig11
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/commit.c2
-rw-r--r--fs/ubifs/debug.c65
-rw-r--r--fs/ubifs/file.c11
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/lprops.c2
-rw-r--r--fs/ubifs/lpt.c7
-rw-r--r--fs/ubifs/lpt_commit.c4
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ubifs/xattr.c4
-rw-r--r--fs/udf/balloc.c9
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/inode.c3
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/ufs/truncate.c1
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/utimes.c2
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c361
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h40
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c36
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c32
-rw-r--r--fs/xfs/quota/xfs_dquot.c2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c6
-rw-r--r--fs/xfs/xfs_log_priv.h2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_trans_buf.c3
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c7
-rw-r--r--fs/xfs/xfs_vnodeops.h1
392 files changed, 10337 insertions, 5450 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 515455296378..535ab6eccb1a 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -262,7 +262,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
if (strcmp(name, "") != 0)
return -EINVAL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
/*
* We allow set/get/list of acl when access=client is not specified
*/
@@ -312,7 +312,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
if (strcmp(name, "") != 0)
return -EINVAL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
/*
* set the attribute on the remote. Without even looking at the
* xattr value. We leave it to the server to validate
@@ -323,7 +323,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
/* update the cached acl value */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index cd63e002d826..0ee594569dcc 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -134,7 +134,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *old_fid = NULL;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
@@ -237,7 +237,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
int any, access;
struct v9fs_session_info *v9ses;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
switch (access) {
case V9FS_ACCESS_SINGLE:
@@ -286,9 +286,11 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
- int err;
+ int err, flags;
struct p9_fid *fid;
+ struct v9fs_session_info *v9ses;
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_clone_with_uid(dentry, 0);
if (IS_ERR(fid))
goto error_out;
@@ -297,8 +299,17 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
* dirty pages. We always request for the open fid in read-write
* mode so that a partial page write which result in page
* read can work.
+ *
+ * we don't have a tsyncfs operation for older version
+ * of protocol. So make sure the write back fid is
+ * opened in O_SYNC mode.
*/
- err = p9_client_open(fid, O_RDWR);
+ if (!v9fs_proto_dotl(v9ses))
+ flags = O_RDWR | O_SYNC;
+ else
+ flags = O_RDWR;
+
+ err = p9_client_open(fid, flags);
if (err < 0) {
p9_client_clunk(fid);
fid = ERR_PTR(err);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index bd8496db135b..9665c2b840e6 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -130,6 +130,7 @@ struct v9fs_inode {
#endif
unsigned int cache_validity;
struct p9_fid *writeback_fid;
+ struct mutex v_mutex;
struct inode vfs_inode;
};
@@ -173,6 +174,11 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
return (inode->i_sb->s_fs_info);
}
+static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
+{
+ return dentry->d_sb->s_fs_info;
+}
+
static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses)
{
return v9ses->flags & V9FS_PROTO_2000U;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 78bcb97c3425..ffed55817f0c 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -90,7 +90,9 @@ int v9fs_file_open(struct inode *inode, struct file *file)
}
file->private_data = fid;
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -101,10 +103,12 @@ int v9fs_file_open(struct inode *inode, struct file *file)
fid = v9fs_writeback_fid(file->f_path.dentry);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
+ mutex_unlock(&v9inode->v_mutex);
goto out_error;
}
v9inode->writeback_fid = (void *) fid;
}
+ mutex_unlock(&v9inode->v_mutex);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, file);
@@ -504,9 +508,12 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (!count)
goto out;
- return v9fs_file_write_internal(filp->f_path.dentry->d_inode,
+ retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
filp->private_data,
- data, count, offset, 1);
+ data, count, &origin, 1);
+ /* update offset on successful write */
+ if (retval > 0)
+ *offset = origin;
out:
return retval;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8a2c232f708a..7f6c67703195 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -221,6 +221,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
#endif
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
+ mutex_init(&v9inode->v_mutex);
return &v9inode->vfs_inode;
}
@@ -650,7 +651,9 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
v9inode = V9FS_I(dentry->d_inode);
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -661,10 +664,12 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
+ mutex_unlock(&v9inode->v_mutex);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
+ mutex_unlock(&v9inode->v_mutex);
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
@@ -931,7 +936,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
@@ -967,8 +972,12 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct p9_wstat wstat;
P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ retval = inode_change_ok(dentry->d_inode, iattr);
+ if (retval)
+ return retval;
+
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if(IS_ERR(fid))
return PTR_ERR(fid);
@@ -993,12 +1002,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (iattr->ia_valid & ATTR_GID)
wstat.n_gid = iattr->ia_gid;
}
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode)) {
- retval = vmtruncate(dentry->d_inode, iattr->ia_size);
- if (retval)
- return retval;
- }
+
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -1006,6 +1010,11 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
retval = p9_client_wstat(fid, &wstat);
if (retval < 0)
return retval;
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode))
+ truncate_setsize(dentry->d_inode, iattr->ia_size);
+
v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
@@ -1130,7 +1139,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 67c138e94feb..ffbb113d5f33 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -245,7 +245,9 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
v9fs_set_create_acl(dentry, dacl, pacl);
v9inode = V9FS_I(inode);
- if (v9ses->cache && !v9inode->writeback_fid) {
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
@@ -256,10 +258,12 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
+ mutex_unlock(&v9inode->v_mutex);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
+ mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
@@ -391,7 +395,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
@@ -448,17 +452,11 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
retval = -EPERM;
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(dentry->d_inode)) {
- retval = vmtruncate(dentry->d_inode, iattr->ia_size);
- if (retval)
- return retval;
- }
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -466,8 +464,12 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
- v9fs_invalidate_inode_attr(dentry->d_inode);
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode))
+ truncate_setsize(dentry->d_inode, iattr->ia_size);
+
+ v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
if (iattr->ia_valid & ATTR_MODE) {
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 09fd08d1606f..f3eed3383e4f 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -262,7 +262,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
goto done;
}
- v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ v9ses = v9fs_dentry2v9ses(dentry);
if (v9fs_proto_dotl(v9ses)) {
res = p9_client_statfs(fid, &rs);
if (res == 0) {
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 2ff622f6f547..718ac1f440c6 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -50,6 +50,7 @@ struct adfs_sb_info {
gid_t s_gid; /* owner gid */
umode_t s_owner_mask; /* ADFS owner perm -> unix perm */
umode_t s_other_mask; /* ADFS other perm -> unix perm */
+ int s_ftsuffix; /* ,xyz hex filetype suffix option */
__u32 s_ids_per_zone; /* max. no ids in one zone */
__u32 s_idlen; /* length of ID in map */
@@ -79,6 +80,10 @@ struct adfs_dir {
int nr_buffers;
struct buffer_head *bh[4];
+
+ /* big directories need allocated buffers */
+ struct buffer_head **bh_fplus;
+
unsigned int pos;
unsigned int parent_id;
@@ -89,7 +94,7 @@ struct adfs_dir {
/*
* This is the overall maximum name length
*/
-#define ADFS_MAX_NAME_LEN 256
+#define ADFS_MAX_NAME_LEN (256 + 4) /* +4 for ,xyz hex filetype suffix */
struct object_info {
__u32 parent_id; /* parent object id */
__u32 file_id; /* object id */
@@ -97,10 +102,26 @@ struct object_info {
__u32 execaddr; /* execution address */
__u32 size; /* size */
__u8 attr; /* RISC OS attributes */
- unsigned char name_len; /* name length */
+ unsigned int name_len; /* name length */
char name[ADFS_MAX_NAME_LEN];/* file name */
+
+ /* RISC OS file type (12-bit: derived from loadaddr) */
+ __u16 filetype;
};
+/* RISC OS 12-bit filetype converts to ,xyz hex filename suffix */
+static inline int append_filetype_suffix(char *buf, __u16 filetype)
+{
+ if (filetype == 0xffff) /* no explicit 12-bit file type was set */
+ return 0;
+
+ *buf++ = ',';
+ *buf++ = hex_asc_lo(filetype >> 8);
+ *buf++ = hex_asc_lo(filetype >> 4);
+ *buf++ = hex_asc_lo(filetype >> 0);
+ return 4;
+}
+
struct adfs_dir_ops {
int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index bafc71222e25..4bbe853ee50a 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -52,7 +52,6 @@ static inline int adfs_readname(char *buf, char *ptr, int maxlen)
*buf++ = *ptr;
ptr++;
}
- *buf = '\0';
return buf - old_buf;
}
@@ -208,7 +207,8 @@ release_buffers:
* convert a disk-based directory entry to a Linux ADFS directory entry
*/
static inline void
-adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
+adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
+ struct adfs_direntry *de)
{
obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN);
obj->file_id = adfs_readval(de->dirinddiscadd, 3);
@@ -216,6 +216,23 @@ adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
obj->execaddr = adfs_readval(de->direxec, 4);
obj->size = adfs_readval(de->dirlen, 4);
obj->attr = de->newdiratts;
+ obj->filetype = -1;
+
+ /*
+ * object is a file and is filetyped and timestamped?
+ * RISC OS 12-bit filetype is stored in load_address[19:8]
+ */
+ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
+ (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
+ obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
+
+ /* optionally append the ,xyz hex filetype suffix */
+ if (ADFS_SB(dir->sb)->s_ftsuffix)
+ obj->name_len +=
+ append_filetype_suffix(
+ &obj->name[obj->name_len],
+ obj->filetype);
+ }
}
/*
@@ -260,7 +277,7 @@ __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
if (!de.dirobname[0])
return -ENOENT;
- adfs_dir2obj(obj, &de);
+ adfs_dir2obj(dir, obj, &de);
return 0;
}
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 1796bb352d05..d9e3bee4e653 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/buffer_head.h>
+#include <linux/slab.h>
#include "adfs.h"
#include "dir_fplus.h"
@@ -22,30 +23,53 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
dir->nr_buffers = 0;
+ /* start off using fixed bh set - only alloc for big dirs */
+ dir->bh_fplus = &dir->bh[0];
+
block = __adfs_block_map(sb, id, 0);
if (!block) {
adfs_error(sb, "dir object %X has a hole at offset 0", id);
goto out;
}
- dir->bh[0] = sb_bread(sb, block);
- if (!dir->bh[0])
+ dir->bh_fplus[0] = sb_bread(sb, block);
+ if (!dir->bh_fplus[0])
goto out;
dir->nr_buffers += 1;
- h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
size = le32_to_cpu(h->bigdirsize);
if (size != sz) {
- printk(KERN_WARNING "adfs: adfs_fplus_read: directory header size\n"
- " does not match directory size\n");
+ printk(KERN_WARNING "adfs: adfs_fplus_read:"
+ " directory header size %X\n"
+ " does not match directory size %X\n",
+ size, sz);
}
if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
h->bigdirversion[2] != 0 || size & 2047 ||
- h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME))
+ h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
+ printk(KERN_WARNING "adfs: dir object %X has"
+ " malformed dir header\n", id);
goto out;
+ }
size >>= sb->s_blocksize_bits;
+ if (size > sizeof(dir->bh)/sizeof(dir->bh[0])) {
+ /* this directory is too big for fixed bh set, must allocate */
+ struct buffer_head **bh_fplus =
+ kzalloc(size * sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!bh_fplus) {
+ adfs_error(sb, "not enough memory for"
+ " dir object %X (%d blocks)", id, size);
+ goto out;
+ }
+ dir->bh_fplus = bh_fplus;
+ /* copy over the pointer to the block that we've already read */
+ dir->bh_fplus[0] = dir->bh[0];
+ }
+
for (blk = 1; blk < size; blk++) {
block = __adfs_block_map(sb, id, blk);
if (!block) {
@@ -53,25 +77,44 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
goto out;
}
- dir->bh[blk] = sb_bread(sb, block);
- if (!dir->bh[blk])
+ dir->bh_fplus[blk] = sb_bread(sb, block);
+ if (!dir->bh_fplus[blk]) {
+ adfs_error(sb, "dir object %X failed read for"
+ " offset %d, mapped block %X",
+ id, blk, block);
goto out;
- dir->nr_buffers = blk;
+ }
+
+ dir->nr_buffers += 1;
}
- t = (struct adfs_bigdirtail *)(dir->bh[size - 1]->b_data + (sb->s_blocksize - 8));
+ t = (struct adfs_bigdirtail *)
+ (dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
t->bigdirendmasseq != h->startmasseq ||
- t->reserved[0] != 0 || t->reserved[1] != 0)
+ t->reserved[0] != 0 || t->reserved[1] != 0) {
+ printk(KERN_WARNING "adfs: dir object %X has "
+ "malformed dir end\n", id);
goto out;
+ }
dir->parent_id = le32_to_cpu(h->bigdirparent);
dir->sb = sb;
return 0;
+
out:
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh[i]);
+ if (dir->bh_fplus) {
+ for (i = 0; i < dir->nr_buffers; i++)
+ brelse(dir->bh_fplus[i]);
+
+ if (&dir->bh[0] != dir->bh_fplus)
+ kfree(dir->bh_fplus);
+
+ dir->bh_fplus = NULL;
+ }
+
+ dir->nr_buffers = 0;
dir->sb = NULL;
return ret;
}
@@ -79,7 +122,8 @@ out:
static int
adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
{
- struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ struct adfs_bigdirheader *h =
+ (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
int ret = -ENOENT;
if (fpos <= le32_to_cpu(h->bigdirentries)) {
@@ -102,21 +146,27 @@ dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
partial = sb->s_blocksize - offset;
if (partial >= len)
- memcpy(to, dir->bh[buffer]->b_data + offset, len);
+ memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
else {
char *c = (char *)to;
remainder = len - partial;
- memcpy(c, dir->bh[buffer]->b_data + offset, partial);
- memcpy(c + partial, dir->bh[buffer + 1]->b_data, remainder);
+ memcpy(c,
+ dir->bh_fplus[buffer]->b_data + offset,
+ partial);
+
+ memcpy(c + partial,
+ dir->bh_fplus[buffer + 1]->b_data,
+ remainder);
}
}
static int
adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
{
- struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
+ struct adfs_bigdirheader *h =
+ (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
struct adfs_bigdirentry bde;
unsigned int offset;
int i, ret = -ENOENT;
@@ -147,6 +197,24 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
if (obj->name[i] == '/')
obj->name[i] = '.';
+ obj->filetype = -1;
+
+ /*
+ * object is a file and is filetyped and timestamped?
+ * RISC OS 12-bit filetype is stored in load_address[19:8]
+ */
+ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
+ (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
+ obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
+
+ /* optionally append the ,xyz hex filetype suffix */
+ if (ADFS_SB(dir->sb)->s_ftsuffix)
+ obj->name_len +=
+ append_filetype_suffix(
+ &obj->name[obj->name_len],
+ obj->filetype);
+ }
+
dir->pos += 1;
ret = 0;
out:
@@ -160,7 +228,7 @@ adfs_fplus_sync(struct adfs_dir *dir)
int i;
for (i = dir->nr_buffers - 1; i >= 0; i--) {
- struct buffer_head *bh = dir->bh[i];
+ struct buffer_head *bh = dir->bh_fplus[i];
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
err = -EIO;
@@ -174,8 +242,17 @@ adfs_fplus_free(struct adfs_dir *dir)
{
int i;
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh[i]);
+ if (dir->bh_fplus) {
+ for (i = 0; i < dir->nr_buffers; i++)
+ brelse(dir->bh_fplus[i]);
+
+ if (&dir->bh[0] != dir->bh_fplus)
+ kfree(dir->bh_fplus);
+
+ dir->bh_fplus = NULL;
+ }
+
+ dir->nr_buffers = 0;
dir->sb = NULL;
}
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 09fe40198d1c..d5250c5aae21 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -72,32 +72,18 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage,
.writepage = adfs_writepage,
- .sync_page = block_sync_page,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
.bmap = _adfs_bmap
};
-static inline unsigned int
-adfs_filetype(struct inode *inode)
-{
- unsigned int type;
-
- if (ADFS_I(inode)->stamped)
- type = (ADFS_I(inode)->loadaddr >> 8) & 0xfff;
- else
- type = (unsigned int) -1;
-
- return type;
-}
-
/*
* Convert ADFS attributes and filetype to Linux permission.
*/
static umode_t
adfs_atts2mode(struct super_block *sb, struct inode *inode)
{
- unsigned int filetype, attr = ADFS_I(inode)->attr;
+ unsigned int attr = ADFS_I(inode)->attr;
umode_t mode, rmask;
struct adfs_sb_info *asb = ADFS_SB(sb);
@@ -106,9 +92,7 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
return S_IFDIR | S_IXUGO | mode;
}
- filetype = adfs_filetype(inode);
-
- switch (filetype) {
+ switch (ADFS_I(inode)->filetype) {
case 0xfc0: /* LinkFS */
return S_IFLNK|S_IRWXUGO;
@@ -174,50 +158,48 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
/*
* Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time
- * referenced to 1 Jan 1900 (til 2248)
+ * referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
+ * of time to convert from RISC OS epoch to Unix epoch.
*/
static void
adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
{
unsigned int high, low;
+ /* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
+ * 01 Jan 1900 00:00:00 (RISC OS epoch)
+ */
+ static const s64 nsec_unix_epoch_diff_risc_os_epoch =
+ 2208988800000000000LL;
+ s64 nsec;
if (ADFS_I(inode)->stamped == 0)
goto cur_time;
- high = ADFS_I(inode)->loadaddr << 24;
- low = ADFS_I(inode)->execaddr;
+ high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */
+ low = ADFS_I(inode)->execaddr; /* bottom 32 bits of timestamp */
- high |= low >> 8;
- low &= 255;
+ /* convert 40-bit centi-seconds to 32-bit seconds
+ * going via nanoseconds to retain precision
+ */
+ nsec = (((s64) high << 32) | (s64) low) * 10000000; /* cs to ns */
/* Files dated pre 01 Jan 1970 00:00:00. */
- if (high < 0x336e996a)
+ if (nsec < nsec_unix_epoch_diff_risc_os_epoch)
goto too_early;
- /* Files dated post 18 Jan 2038 03:14:05. */
- if (high >= 0x656e9969)
- goto too_late;
+ /* convert from RISC OS to Unix epoch */
+ nsec -= nsec_unix_epoch_diff_risc_os_epoch;
- /* discard 2208988800 (0x336e996a00) seconds of time */
- high -= 0x336e996a;
-
- /* convert 40-bit centi-seconds to 32-bit seconds */
- tv->tv_sec = (((high % 100) << 8) + low) / 100 + (high / 100 << 8);
- tv->tv_nsec = 0;
+ *tv = ns_to_timespec(nsec);
return;
cur_time:
- *tv = CURRENT_TIME_SEC;
+ *tv = CURRENT_TIME;
return;
too_early:
tv->tv_sec = tv->tv_nsec = 0;
return;
-
- too_late:
- tv->tv_sec = 0x7ffffffd;
- tv->tv_nsec = 0;
- return;
}
/*
@@ -279,7 +261,8 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->loadaddr = obj->loadaddr;
ADFS_I(inode)->execaddr = obj->execaddr;
ADFS_I(inode)->attr = obj->attr;
- ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
+ ADFS_I(inode)->filetype = obj->filetype;
+ ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
inode->i_mode = adfs_atts2mode(sb, inode);
adfs_adfs2unix_time(&inode->i_mtime, inode);
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index d1a5932bb0f1..6935f05202ac 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -51,7 +51,7 @@ static DEFINE_RWLOCK(adfs_map_lock);
/*
* This is fun. We need to load up to 19 bits from the map at an
- * arbitary bit alignment. (We're limited to 19 bits by F+ version 2).
+ * arbitrary bit alignment. (We're limited to 19 bits by F+ version 2).
*/
#define GET_FRAG_ID(_map,_start,_idmask) \
({ \
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 06d7388b477b..c8bf36a1996a 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -138,17 +138,20 @@ static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
seq_printf(seq, ",ownmask=%o", asb->s_owner_mask);
if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK)
seq_printf(seq, ",othmask=%o", asb->s_other_mask);
+ if (asb->s_ftsuffix != 0)
+ seq_printf(seq, ",ftsuffix=%u", asb->s_ftsuffix);
return 0;
}
-enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err};
+enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix, Opt_err};
static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_ownmask, "ownmask=%o"},
{Opt_othmask, "othmask=%o"},
+ {Opt_ftsuffix, "ftsuffix=%u"},
{Opt_err, NULL}
};
@@ -189,6 +192,11 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
asb->s_other_mask = option;
break;
+ case Opt_ftsuffix:
+ if (match_int(args, &option))
+ return -EINVAL;
+ asb->s_ftsuffix = option;
+ break;
default:
printk("ADFS-fs: unrecognised mount option \"%s\" "
"or missing value\n", p);
@@ -366,6 +374,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_gid = 0;
asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
+ asb->s_ftsuffix = 0;
if (parse_options(sb, data))
goto error;
@@ -445,11 +454,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root);
root_obj.name_len = 0;
- root_obj.loadaddr = 0;
- root_obj.execaddr = 0;
+ /* Set root object date as 01 Jan 1987 00:00:00 */
+ root_obj.loadaddr = 0xfff0003f;
+ root_obj.execaddr = 0xec22c000;
root_obj.size = ADFS_NEWDIR_SIZE;
root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ |
ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ;
+ root_obj.filetype = -1;
/*
* If this is a F+ disk with variable length directories,
@@ -463,6 +474,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb->s_dir = &adfs_f_dir_ops;
asb->s_namelen = ADFS_F_NAME_LEN;
}
+ /*
+ * ,xyz hex filetype suffix may be added by driver
+ * to files that have valid RISC OS filetype
+ */
+ if (asb->s_ftsuffix)
+ asb->s_namelen += 4;
sb->s_d_op = &adfs_dentry_operations;
root = adfs_iget(sb, &root_obj);
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0a90dcd46de2..acf321b70fcd 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
- .sync_page = block_sync_page,
.write_begin = affs_write_begin,
.write_end = generic_write_end,
.bmap = _affs_bmap
@@ -786,7 +785,6 @@ out:
const struct address_space_operations affs_aops_ofs = {
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
- //.sync_page = affs_sync_page_ofs,
.write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs
};
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index 0fb315dd4d2a..577763c3d88b 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -98,7 +98,7 @@ static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -117,7 +117,7 @@ static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
@@ -150,7 +150,7 @@ static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -172,7 +172,7 @@ static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static
enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data,
@@ -283,7 +283,7 @@ static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
}
/*
- * provide new auxilliary cache data
+ * provide new auxiliary cache data
*/
static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
@@ -309,7 +309,7 @@ static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
}
/*
- * check that the auxilliary data indicates that the entry is still valid
+ * check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 0d5eeadf6121..3c090b7555ea 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -293,7 +293,7 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
if (!cell) {
/* this should not happen unless user tries to mount
* when root cell is not set. Return an impossibly
- * bizzare errno to alert the user. Things like
+ * bizarre errno to alert the user. Things like
* ENOENT might be "more appropriate" but they happen
* for other reasons.
*/
diff --git a/fs/aio.c b/fs/aio.c
index 7f54f43b8f7c..e29ec485af25 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -34,8 +34,6 @@
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
-#include <linux/mempool.h>
-#include <linux/hash.h>
#include <linux/compat.h>
#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
-#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
-struct aio_batch_entry {
- struct hlist_node list;
- struct address_space *mapping;
-};
-mempool_t *abe_pool;
-
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -86,8 +76,7 @@ static int __init aio_setup(void)
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
- abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
- BUG_ON(!aio_wq || !abe_pool);
+ BUG_ON(!aio_wq);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
@@ -520,7 +509,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
ctx->reqs_active--;
if (unlikely(!ctx->reqs_active && ctx->dead))
- wake_up(&ctx->wait);
+ wake_up_all(&ctx->wait);
}
static void aio_fput_routine(struct work_struct *data)
@@ -1229,7 +1218,7 @@ static void io_destroy(struct kioctx *ioctx)
* by other CPUs at this point. Right now, we rely on the
* locking done by the above calls to ensure this consistency.
*/
- wake_up(&ioctx->wait);
+ wake_up_all(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
@@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
return 0;
}
-static void aio_batch_add(struct address_space *mapping,
- struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos;
- unsigned bucket;
-
- bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
- hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
- if (abe->mapping == mapping)
- return;
- }
-
- abe = mempool_alloc(abe_pool, GFP_KERNEL);
-
- /*
- * we should be using igrab here, but
- * we don't want to hammer on the global
- * inode spinlock just to take an extra
- * reference on a file that we must already
- * have a reference to.
- *
- * When we're called, we always have a reference
- * on the file, so we must always have a reference
- * on the inode, so ihold() is safe here.
- */
- ihold(mapping->host);
- abe->mapping = mapping;
- hlist_add_head(&abe->list, &batch_hash[bucket]);
- return;
-}
-
-static void aio_batch_free(struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos, *n;
- int i;
-
- for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
- hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
- blk_run_address_space(abe->mapping);
- iput(abe->mapping->host);
- hlist_del(&abe->list);
- mempool_free(abe, abe_pool);
- }
- }
-}
-
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb, struct hlist_head *batch_hash,
- bool compat)
+ struct iocb *iocb, bool compat)
{
struct kiocb *req;
struct file *file;
@@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
;
}
spin_unlock_irq(&ctx->ctx_lock);
- if (req->ki_opcode == IOCB_CMD_PREAD ||
- req->ki_opcode == IOCB_CMD_PREADV ||
- req->ki_opcode == IOCB_CMD_PWRITE ||
- req->ki_opcode == IOCB_CMD_PWRITEV)
- aio_batch_add(file->f_mapping, batch_hash);
aio_put_req(req); /* drop extra ref to req */
return 0;
@@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
struct kioctx *ctx;
long ret = 0;
int i;
- struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
+ struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
@@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
return -EINVAL;
}
+ blk_start_plug(&plug);
+
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
@@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
break;
}
- ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
+ ret = io_submit_one(ctx, user_iocb, &tmp, compat);
if (ret)
break;
}
- aio_batch_free(batch_hash);
+ blk_finish_plug(&plug);
put_ioctx(ctx);
return i ? i : ret;
diff --git a/fs/attr.c b/fs/attr.c
index 7ca41811afa1..91dbe2a107f2 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -59,7 +59,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
@@ -69,7 +69,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
}
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(inode_newsize_ok);
* setattr_copy must be called with i_mutex held.
*
* setattr_copy updates the inode's metadata with that specified
- * in attr. Noticably missing is inode size update, which is more complex
+ * in attr. Noticeably missing is inode size update, which is more complex
* as it requires pagecache updates.
*
* The inode is not marked as dirty after this operation. The rationale is
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 54f923792728..475f9c597cb7 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -61,8 +61,6 @@ do { \
current->pid, __func__, ##args); \
} while (0)
-extern spinlock_t autofs4_lock;
-
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 1442da4860e5..509fe1eb66ae 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -372,6 +372,10 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
return -EBUSY;
} else {
struct file *pipe = fget(pipefd);
+ if (!pipe) {
+ err = -EBADF;
+ goto out;
+ }
if (!pipe->f_op || !pipe->f_op->write) {
err = -EPIPE;
fput(pipe);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index f43100b9662b..450f529a4eae 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -87,18 +87,70 @@ done:
}
/*
+ * Calculate and dget next entry in the subdirs list under root.
+ */
+static struct dentry *get_next_positive_subdir(struct dentry *prev,
+ struct dentry *root)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct list_head *next;
+ struct dentry *p, *q;
+
+ spin_lock(&sbi->lookup_lock);
+
+ if (prev == NULL) {
+ spin_lock(&root->d_lock);
+ prev = dget_dlock(root);
+ next = prev->d_subdirs.next;
+ p = prev;
+ goto start;
+ }
+
+ p = prev;
+ spin_lock(&p->d_lock);
+again:
+ next = p->d_u.d_child.next;
+start:
+ if (next == &root->d_subdirs) {
+ spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ dput(prev);
+ return NULL;
+ }
+
+ q = list_entry(next, struct dentry, d_u.d_child);
+
+ spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+ /* Negative dentry - try next */
+ if (!simple_positive(q)) {
+ spin_unlock(&p->d_lock);
+ p = q;
+ goto again;
+ }
+ dget_dlock(q);
+ spin_unlock(&q->d_lock);
+ spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ dput(prev);
+
+ return q;
+}
+
+/*
* Calculate and dget next entry in top down tree traversal.
*/
static struct dentry *get_next_positive_dentry(struct dentry *prev,
struct dentry *root)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
struct list_head *next;
struct dentry *p, *ret;
if (prev == NULL)
return dget(root);
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
relock:
p = prev;
spin_lock(&p->d_lock);
@@ -110,7 +162,7 @@ again:
if (p == root) {
spin_unlock(&p->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
}
@@ -140,7 +192,7 @@ again:
dget_dlock(ret);
spin_unlock(&ret->d_lock);
spin_unlock(&p->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
dput(prev);
@@ -290,11 +342,8 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(root);
/* No point expiring a pending mount */
- if (ino->flags & AUTOFS_INF_PENDING) {
- spin_unlock(&sbi->fs_lock);
- return NULL;
- }
- managed_dentry_set_transit(root);
+ if (ino->flags & AUTOFS_INF_PENDING)
+ goto out;
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
struct autofs_info *ino = autofs4_dentry_ino(root);
ino->flags |= AUTOFS_INF_EXPIRING;
@@ -302,7 +351,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
spin_unlock(&sbi->fs_lock);
return root;
}
- managed_dentry_clear_transit(root);
+out:
spin_unlock(&sbi->fs_lock);
dput(root);
@@ -336,13 +385,12 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
timeout = sbi->exp_timeout;
dentry = NULL;
- while ((dentry = get_next_positive_dentry(dentry, root))) {
+ while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
/* No point expiring a pending mount */
if (ino->flags & AUTOFS_INF_PENDING)
- goto cont;
- managed_dentry_set_transit(dentry);
+ goto next;
/*
* Case 1: (i) indirect mount or top level pseudo direct mount
@@ -402,8 +450,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
}
}
next:
- managed_dentry_clear_transit(dentry);
-cont:
spin_unlock(&sbi->fs_lock);
}
return NULL;
@@ -415,13 +461,13 @@ found:
ino->flags |= AUTOFS_INF_EXPIRING;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
spin_lock(&expired->d_parent->d_lock);
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&expired->d_lock);
spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return expired;
}
@@ -484,8 +530,6 @@ int autofs4_expire_run(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
ino->flags &= ~AUTOFS_INF_EXPIRING;
- if (!d_unhashed(dentry))
- managed_dentry_clear_transit(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -513,9 +557,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_EXPIRING;
spin_lock(&dentry->d_lock);
- if (ret)
- __managed_dentry_clear_transit(dentry);
- else {
+ if (!ret) {
if ((IS_ROOT(dentry) ||
(autofs_type_indirect(sbi->type) &&
IS_ROOT(dentry->d_parent))) &&
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index e6f84d26f4cf..f55ae23b137e 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -23,8 +23,6 @@
#include "autofs_i.h"
-DEFINE_SPINLOCK(autofs4_lock);
-
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
@@ -125,15 +123,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
* autofs file system so just let the libfs routines handle
* it.
*/
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
out:
return dcache_dir_open(inode, file);
@@ -171,7 +169,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
@@ -204,14 +201,12 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
dget_dlock(active);
spin_unlock(&active->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return active;
}
next:
spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return NULL;
}
@@ -226,7 +221,6 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->expiring_list;
list_for_each(p, head) {
@@ -259,14 +253,12 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
dget_dlock(expiring);
spin_unlock(&expiring->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return expiring;
}
next:
spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return NULL;
}
@@ -275,17 +267,16 @@ static int autofs4_mount_wait(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
- int status;
+ int status = 0;
if (ino->flags & AUTOFS_INF_PENDING) {
DPRINTK("waiting for mount name=%.*s",
dentry->d_name.len, dentry->d_name.name);
status = autofs4_wait(sbi, dentry, NFY_MOUNT);
DPRINTK("mount wait done status=%d", status);
- ino->last_used = jiffies;
- return status;
}
- return 0;
+ ino->last_used = jiffies;
+ return status;
}
static int do_expire_wait(struct dentry *dentry)
@@ -319,9 +310,12 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
+ struct autofs_info *ino;
struct dentry *new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
+ ino = autofs4_dentry_ino(new);
+ ino->last_used = jiffies;
dput(path->dentry);
path->dentry = new;
}
@@ -338,18 +332,6 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
DPRINTK("dentry=%p %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
- /*
- * Someone may have manually umounted this or it was a submount
- * that has gone away.
- */
- spin_lock(&dentry->d_lock);
- if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
- if (!(dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
- (dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
- __managed_dentry_set_transit(path->dentry);
- }
- spin_unlock(&dentry->d_lock);
-
/* The daemon never triggers a mount. */
if (autofs4_oz_mode(sbi))
return NULL;
@@ -418,18 +400,17 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
done:
if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
/*
- * Any needed mounting has been completed and the path updated
- * so turn this into a normal dentry so we don't continually
- * call ->d_automount() and ->d_manage().
- */
- spin_lock(&dentry->d_lock);
- __managed_dentry_clear_transit(dentry);
- /*
+ * Any needed mounting has been completed and the path
+ * updated so clear DCACHE_NEED_AUTOMOUNT so we don't
+ * call ->d_automount() on rootless multi-mounts since
+ * it can lead to an incorrect ELOOP error return.
+ *
* Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and
* symlinks as in all other cases the dentry will be covered by
* an actual mount so ->d_automount() won't be called during
* the follow.
*/
+ spin_lock(&dentry->d_lock);
if ((!d_mountpoint(dentry) &&
!list_empty(&dentry->d_subdirs)) ||
(dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
@@ -455,6 +436,8 @@ int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
/* The daemon never waits. */
if (autofs4_oz_mode(sbi)) {
+ if (rcu_walk)
+ return 0;
if (!d_mountpoint(dentry))
return -EISDIR;
return 0;
@@ -612,12 +595,12 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
dir->i_mtime = CURRENT_TIME;
- spin_lock(&autofs4_lock);
- autofs4_add_expiring(dentry);
+ spin_lock(&sbi->lookup_lock);
+ __autofs4_add_expiring(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
return 0;
}
@@ -629,7 +612,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
* set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
* of the directory tree. There is no need to clear the automount flag
* following a mount or restore it after an expire because these mounts
- * are always covered. However, it is neccessary to ensure that these
+ * are always covered. However, it is necessary to ensure that these
* flags are clear on non-empty directories to avoid unnecessary calls
* during path walks.
*/
@@ -686,20 +669,17 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
if (!autofs4_oz_mode(sbi))
return -EACCES;
- spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&autofs4_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
- spin_unlock(&sbi->lookup_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->lookup_lock);
if (sbi->version < 5)
autofs_clear_leaf_automount_flags(dentry);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 56010056b2e6..25435987d6ae 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -197,12 +197,12 @@ rename_retry:
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- spin_lock(&autofs4_lock);
+ spin_lock(&sbi->fs_lock);
for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
len += tmp->d_name.len + 1;
if (!len || --len > NAME_MAX) {
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->fs_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
@@ -218,7 +218,7 @@ rename_retry:
p -= tmp->d_name.len;
strncpy(p, tmp->d_name.name, tmp->d_name.len);
}
- spin_unlock(&autofs4_lock);
+ spin_unlock(&sbi->fs_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
diff --git a/fs/befs/ChangeLog b/fs/befs/ChangeLog
index ce8c787916be..75a461cfaca6 100644
--- a/fs/befs/ChangeLog
+++ b/fs/befs/ChangeLog
@@ -24,7 +24,7 @@ Version 0.9 (2002-03-14)
Version 0.64 (2002-02-07)
==========
-* Did the string comparision really right this time (btree.c) [WD]
+* Did the string comparison really right this time (btree.c) [WD]
* Fixed up some places where I assumed that a long int could hold
a pointer value. (btree.c) [WD]
@@ -114,7 +114,7 @@ Version 0.6 (2001-12-15)
More flexible. Will soon be controllable at mount time
(see TODO). [WD]
-* Rewrote datastream positon lookups.
+* Rewrote datastream position lookups.
(datastream.c) [WD]
* Moved the TODO list to its own file.
@@ -150,7 +150,7 @@ Version 0.50 (2001-11-13)
* Anton also told me that the blocksize is not allowed to be larger than
the page size in linux, which is 4k i386. Oops. Added a test for
(blocksize > PAGE_SIZE), and refuse to mount in that case. What this
- practicaly means is that 8k blocksize volumes won't work without a major
+ practically means is that 8k blocksize volumes won't work without a major
restructuring of the driver (or an alpha or other 64bit hardware). [WD]
* Cleaned up the befs_count_blocks() function. Much smarter now.
@@ -183,7 +183,7 @@ Version 0.45 (2001-10-29)
structures into the generic pointer fields of the public structures
with kmalloc(). put_super and put_inode free them. This allows us not
to have to touch the definitions of the public structures in
- include/linux/fs.h. Also, befs_inode_info is huge (becuase of the
+ include/linux/fs.h. Also, befs_inode_info is huge (because of the
symlink string). (super.c, inode.c, befs_fs.h) [WD]
* Fixed a thinko that was corrupting file reads after the first block_run
@@ -404,7 +404,7 @@ Version 0.4 (2001-10-28)
* Fixed compile errors on 2.4.1 kernel (WD)
Resolve rejected patches
- Accomodate changed NLS interface (util.h)
+ Accommodate changed NLS interface (util.h)
Needed to include <linux/slab.h> in most files
Makefile changes
fs/Config.in changes
diff --git a/fs/befs/befs_fs_types.h b/fs/befs/befs_fs_types.h
index 7893eaa1e58c..eb557d9dc8be 100644
--- a/fs/befs/befs_fs_types.h
+++ b/fs/befs/befs_fs_types.h
@@ -234,7 +234,7 @@ typedef struct {
} PACKED befs_btree_super;
/*
- * Header stucture of each btree node
+ * Header structure of each btree node
*/
typedef struct {
fs64 left;
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 4202db7496cb..a66c9b1136e0 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -5,7 +5,7 @@
*
* Licensed under the GNU GPL. See the file COPYING for details.
*
- * 2002-02-05: Sergey S. Kostyliov added binary search withing
+ * 2002-02-05: Sergey S. Kostyliov added binary search within
* btree nodes.
*
* Many thanks to:
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b1d0c794747b..54b8c28bebc8 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
- .sync_page = block_sync_page,
.bmap = befs_bmap,
};
@@ -735,7 +734,7 @@ parse_options(char *options, befs_mount_options * opts)
/* This function has the responsibiltiy of getting the
* filesystem ready for unmounting.
- * Basicly, we free everything that we allocated in
+ * Basically, we free everything that we allocated in
* befs_read_inode
*/
static void
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index eb67edd0f8ea..f20e8a71062f 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.readpage = bfs_readpage,
.writepage = bfs_writepage,
- .sync_page = block_sync_page,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
.bmap = bfs_bmap,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d5b640ba6cb1..f34078d702d3 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -570,7 +570,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
- unsigned long reloc_func_desc = 0;
+ unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
@@ -1906,7 +1906,7 @@ static int elf_core_dump(struct coredump_params *cprm)
segs = current->mm->map_count;
segs += elf_core_extra_phdrs();
- gate_vma = get_gate_vma(current);
+ gate_vma = get_gate_vma(current->mm);
if (gate_vma != NULL)
segs++;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 811384bec8de..397d3057d336 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -717,7 +717,7 @@ static int load_flat_file(struct linux_binprm * bprm,
* help simplify all this mumbo jumbo
*
* We've got two different sections of relocation entries.
- * The first is the GOT which resides at the begining of the data segment
+ * The first is the GOT which resides at the beginning of the data segment
* and is terminated with a -1. This one can be relocated in place.
* The second is the extra relocation entries tacked after the image's
* data segment. These require a little more processing as the entry is
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index e49cce234c65..9c5e6b2cd11a 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -761,6 +761,9 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
+ if (bs->bio_integrity_pool)
+ return 0;
+
bs->bio_integrity_pool =
mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
diff --git a/fs/bio.c b/fs/bio.c
index 4bd454fa844e..840a0d755248 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -43,7 +43,7 @@ static mempool_t *bio_split_pool __read_mostly;
* unsigned short
*/
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
@@ -111,7 +111,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
if (!slab)
goto out_unlock;
- printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+ printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
bslab->slab = slab;
bslab->slab_ref = 1;
bslab->slab_size = sz;
@@ -1436,7 +1436,7 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
* preferred way to end I/O on a bio, it takes care of clearing
* BIO_UPTODATE on error. @error is 0 on success, and and one of the
* established -Exxxx (-EIO, for instance) error values in case
- * something went wrong. Noone should call bi_end_io() directly on a
+ * something went wrong. No one should call bi_end_io() directly on a
* bio unless they own it and thus know that it has an end_io
* function.
**/
@@ -1636,9 +1636,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
- if (bioset_integrity_create(bs, pool_size))
- goto bad;
-
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1656,12 +1653,10 @@ static void __init biovec_init_slabs(void)
int size;
struct biovec_slab *bvs = bvec_slabs + i;
-#ifndef CONFIG_BLK_DEV_INTEGRITY
if (bvs->nr_vecs <= BIO_INLINE_VECS) {
bvs->slab = NULL;
continue;
}
-#endif
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
@@ -1684,6 +1679,9 @@ static int __init init_bio(void)
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
+ if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
+ panic("bio: can't create integrity pool\n");
+
bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
sizeof(struct bio_pair));
if (!bio_split_pool)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 889287019599..5147bdd3b8e1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -55,11 +55,13 @@ EXPORT_SYMBOL(I_BDEV);
static void bdev_inode_switch_bdi(struct inode *inode,
struct backing_dev_info *dst)
{
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
inode->i_data.backing_dev_info = dst;
if (inode->i_state & I_DIRTY)
list_move(&inode->i_wb_list, &dst->wb.b_dirty);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
}
static sector_t max_block(struct block_device *bdev)
@@ -651,7 +653,7 @@ void bd_forget(struct inode *inode)
* @whole: whole block device containing @bdev, may equal @bdev
* @holder: holder trying to claim @bdev
*
- * Test whther @bdev can be claimed by @holder.
+ * Test whether @bdev can be claimed by @holder.
*
* CONTEXT:
* spin_lock(&bdev_lock).
@@ -1087,6 +1089,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (!disk)
goto out;
+ disk_block_events(disk);
mutex_lock_nested(&bdev->bd_mutex, for_part);
if (!bdev->bd_openers) {
bdev->bd_disk = disk;
@@ -1108,10 +1111,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
*/
disk_put_part(bdev->bd_part);
bdev->bd_part = NULL;
- module_put(disk->fops->owner);
- put_disk(disk);
bdev->bd_disk = NULL;
mutex_unlock(&bdev->bd_mutex);
+ disk_unblock_events(disk);
+ module_put(disk->fops->owner);
+ put_disk(disk);
goto restart;
}
if (ret)
@@ -1148,9 +1152,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
}
} else {
- module_put(disk->fops->owner);
- put_disk(disk);
- disk = NULL;
if (bdev->bd_contains == bdev) {
if (bdev->bd_disk->fops->open) {
ret = bdev->bd_disk->fops->open(bdev, mode);
@@ -1160,11 +1161,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (bdev->bd_invalidated)
rescan_partitions(bdev->bd_disk, bdev);
}
+ /* only one opener holds refs to the module and disk */
+ module_put(disk->fops->owner);
+ put_disk(disk);
}
bdev->bd_openers++;
if (for_part)
bdev->bd_part_count++;
mutex_unlock(&bdev->bd_mutex);
+ disk_unblock_events(disk);
return 0;
out_clear:
@@ -1177,10 +1182,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_contains = NULL;
out_unlock_bdev:
mutex_unlock(&bdev->bd_mutex);
- out:
- if (disk)
- module_put(disk->fops->owner);
+ disk_unblock_events(disk);
+ module_put(disk->fops->owner);
put_disk(disk);
+ out:
bdput(bdev);
return ret;
@@ -1446,14 +1451,13 @@ int blkdev_put(struct block_device *bdev, fmode_t mode)
if (bdev_free) {
if (bdev->bd_write_holder) {
disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
- } else
disk_check_events(bdev->bd_disk);
+ bdev->bd_write_holder = false;
+ }
}
mutex_unlock(&bdev->bd_mutex);
- } else
- disk_check_events(bdev->bd_disk);
+ }
return __blkdev_put(bdev, mode, 0);
}
@@ -1527,7 +1531,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
- .sync_page = block_sync_page,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
.writepages = generic_writepages,
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 9c949348510b..de34bfad9ec3 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -170,7 +170,7 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
int ret;
struct posix_acl *acl = NULL;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (!IS_POSIXACL(dentry->d_inode))
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ccc991c542df..57c3bb2884ce 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -136,9 +136,8 @@ struct btrfs_inode {
* items we think we'll end up using, and reserved_extents is the number
* of extent items we've reserved metadata for.
*/
- spinlock_t accounting_lock;
atomic_t outstanding_extents;
- int reserved_extents;
+ atomic_t reserved_extents;
/*
* ordered_data_close is set by truncate when a file that used
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 4d2110eafe29..41d1d7c70e29 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+ if (!cb)
+ return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
cb->errors = 0;
cb->inode = inode;
@@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ if(!bio) {
+ kfree(cb);
+ return -ENOMEM;
+ }
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
atomic_inc(&cb->pending_bios);
@@ -657,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- btrfs_lookup_bio_sums(root, inode, comp_bio,
- sums);
+ ret = btrfs_lookup_bio_sums(root, inode,
+ comp_bio, sums);
+ BUG_ON(ret);
}
sums += (comp_bio->bi_size + root->sectorsize - 1) /
root->sectorsize;
@@ -683,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
BUG_ON(ret);
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
BUG_ON(ret);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b5baff0dccfe..84d7ca1fe0ba 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
- spin_lock(&root->node_lock);
- eb = root->node;
+
+ rcu_read_lock();
+ eb = rcu_dereference(root->node);
extent_buffer_get(eb);
- spin_unlock(&root->node_lock);
+ rcu_read_unlock();
return eb;
}
@@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
btrfs_tree_lock(eb);
-
- spin_lock(&root->node_lock);
- if (eb == root->node) {
- spin_unlock(&root->node_lock);
+ if (eb == root->node)
break;
- }
- spin_unlock(&root->node_lock);
-
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
}
@@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
parent_start = 0;
- spin_lock(&root->node_lock);
- root->node = cow;
extent_buffer_get(cow);
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, cow);
btrfs_free_tree_block(trans, root, buf, parent_start,
last_ref);
@@ -542,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0);
+
+ trace_btrfs_cow_block(root, buf, *cow_ret);
+
return ret;
}
@@ -686,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (!cur) {
cur = read_tree_block(root, blocknr,
blocksize, gen);
+ if (!cur)
+ return -EIO;
} else if (!uptodate) {
btrfs_read_buffer(cur, gen);
}
@@ -732,122 +730,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root,
return btrfs_item_offset_nr(leaf, nr - 1);
}
-/*
- * extra debugging checks to make sure all the items in a key are
- * well formed and in the proper order
- */
-static int check_node(struct btrfs_root *root, struct btrfs_path *path,
- int level)
-{
- struct extent_buffer *parent = NULL;
- struct extent_buffer *node = path->nodes[level];
- struct btrfs_disk_key parent_key;
- struct btrfs_disk_key node_key;
- int parent_slot;
- int slot;
- struct btrfs_key cpukey;
- u32 nritems = btrfs_header_nritems(node);
-
- if (path->nodes[level + 1])
- parent = path->nodes[level + 1];
-
- slot = path->slots[level];
- BUG_ON(nritems == 0);
- if (parent) {
- parent_slot = path->slots[level + 1];
- btrfs_node_key(parent, &parent_key, parent_slot);
- btrfs_node_key(node, &node_key, 0);
- BUG_ON(memcmp(&parent_key, &node_key,
- sizeof(struct btrfs_disk_key)));
- BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
- btrfs_header_bytenr(node));
- }
- BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
- if (slot != 0) {
- btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
- btrfs_node_key(node, &node_key, slot);
- BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
- }
- if (slot < nritems - 1) {
- btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
- btrfs_node_key(node, &node_key, slot);
- BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
- }
- return 0;
-}
-
-/*
- * extra checking to make sure all the items in a leaf are
- * well formed and in the proper order
- */
-static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
- int level)
-{
- struct extent_buffer *leaf = path->nodes[level];
- struct extent_buffer *parent = NULL;
- int parent_slot;
- struct btrfs_key cpukey;
- struct btrfs_disk_key parent_key;
- struct btrfs_disk_key leaf_key;
- int slot = path->slots[0];
-
- u32 nritems = btrfs_header_nritems(leaf);
-
- if (path->nodes[level + 1])
- parent = path->nodes[level + 1];
-
- if (nritems == 0)
- return 0;
-
- if (parent) {
- parent_slot = path->slots[level + 1];
- btrfs_node_key(parent, &parent_key, parent_slot);
- btrfs_item_key(leaf, &leaf_key, 0);
-
- BUG_ON(memcmp(&parent_key, &leaf_key,
- sizeof(struct btrfs_disk_key)));
- BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
- btrfs_header_bytenr(leaf));
- }
- if (slot != 0 && slot < nritems - 1) {
- btrfs_item_key(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
- if (comp_keys(&leaf_key, &cpukey) <= 0) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad key\n", slot);
- BUG_ON(1);
- }
- if (btrfs_item_offset_nr(leaf, slot - 1) !=
- btrfs_item_end_nr(leaf, slot)) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad\n", slot);
- BUG_ON(1);
- }
- }
- if (slot < nritems - 1) {
- btrfs_item_key(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
- BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
- if (btrfs_item_offset_nr(leaf, slot) !=
- btrfs_item_end_nr(leaf, slot + 1)) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d offset bad\n", slot);
- BUG_ON(1);
- }
- }
- BUG_ON(btrfs_item_offset_nr(leaf, 0) +
- btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
- return 0;
-}
-
-static noinline int check_block(struct btrfs_root *root,
- struct btrfs_path *path, int level)
-{
- return 0;
- if (level == 0)
- return check_leaf(root, path, level);
- return check_node(root, path, level);
-}
/*
* search for key in the extent_buffer. The items start at offset p,
@@ -1046,9 +928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto enospc;
}
- spin_lock(&root->node_lock);
- root->node = child;
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, child);
add_root_to_dirty_list(root);
btrfs_tree_unlock(child);
@@ -1188,7 +1068,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
/* double check we haven't messed things up */
- check_block(root, path, level);
if (orig_ptr !=
btrfs_node_blockptr(path->nodes[level], path->slots[level]))
BUG();
@@ -1798,12 +1677,6 @@ cow_done:
if (!cow)
btrfs_unlock_up_safe(p, level + 1);
- ret = check_block(root, p, level);
- if (ret) {
- ret = -1;
- goto done;
- }
-
ret = bin_search(b, key, level, &slot);
if (level != 0) {
@@ -2130,10 +2003,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
- spin_lock(&root->node_lock);
old = root->node;
- root->node = c;
- spin_unlock(&root->node_lock);
+ rcu_assign_pointer(root->node, c);
/* the super has an extra ref to root->node */
free_extent_buffer(old);
@@ -3840,7 +3711,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
unsigned long ptr;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (!ret) {
leaf = path->nodes[0];
@@ -4217,6 +4089,7 @@ find_next_key:
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
+ BUG_ON(!cur);
btrfs_tree_lock(cur);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7f78cc78fdd0..3458b5725540 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/kobject.h>
+#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include "extent_io.h"
#include "extent_map.h"
@@ -40,6 +41,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
+extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
#define BTRFS_MAGIC "_BHRfS_M"
@@ -782,9 +784,6 @@ struct btrfs_free_cluster {
/* first extent starting offset */
u64 window_start;
- /* if this cluster simply points at a bitmap in the block group */
- bool points_to_bitmap;
-
struct btrfs_block_group_cache *block_group;
/*
* when a cluster is allocated from a block group, we put the
@@ -1283,6 +1282,9 @@ struct btrfs_root {
#define BTRFS_INODE_NODUMP (1 << 8)
#define BTRFS_INODE_NOATIME (1 << 9)
#define BTRFS_INODE_DIRSYNC (1 << 10)
+#define BTRFS_INODE_COMPRESS (1 << 11)
+
+#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
/* some macros to generate set/get funcs for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
@@ -2157,6 +2159,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve, int sinfo);
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2227,10 +2231,12 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes);
+ u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
+int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
+int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2355,6 +2361,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
+void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
+
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
@@ -2392,6 +2400,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
+int verify_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_dir_item *dir_item);
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -2528,7 +2539,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
-void btrfs_orphan_cleanup(struct btrfs_root *root);
+int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
@@ -2536,7 +2547,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_cont_expand(struct inode *inode, loff_t size);
+int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e807b143b857..bce28f653899 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&head_ref->cluster);
mutex_init(&head_ref->mutex);
+ trace_btrfs_delayed_ref_head(ref, head_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
@@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
}
full_ref->level = level;
+ trace_btrfs_delayed_tree_ref(ref, full_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
@@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
full_ref->objectid = owner;
full_ref->offset = offset;
+ trace_btrfs_delayed_data_ref(ref, full_ref, action);
+
existing = tree_insert(&delayed_refs->root, &ref->rb_node);
if (existing) {
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f0cad5ae5be7..c62f02f6ae69 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -151,7 +151,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
ret = PTR_ERR(dir_item);
if (ret == -EEXIST)
goto second_insert;
- goto out;
+ goto out_free;
}
leaf = path->nodes[0];
@@ -170,7 +170,7 @@ second_insert:
/* FIXME, use some real flag for selecting the extra index */
if (root == root->fs_info->tree_root) {
ret = 0;
- goto out;
+ goto out_free;
}
btrfs_release_path(root, path);
@@ -180,7 +180,7 @@ second_insert:
name, name_len);
if (IS_ERR(dir_item)) {
ret2 = PTR_ERR(dir_item);
- goto out;
+ goto out_free;
}
leaf = path->nodes[0];
btrfs_cpu_key_to_disk(&disk_key, location);
@@ -192,7 +192,9 @@ second_insert:
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
-out:
+
+out_free:
+
btrfs_free_path(path);
if (ret)
return ret;
@@ -377,6 +379,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+ if (verify_dir_item(root, leaf, dir_item))
+ return NULL;
+
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
@@ -429,3 +434,35 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
}
return ret;
}
+
+int verify_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_dir_item *dir_item)
+{
+ u16 namelen = BTRFS_NAME_LEN;
+ u8 type = btrfs_dir_type(leaf, dir_item);
+
+ if (type >= BTRFS_FT_MAX) {
+ printk(KERN_CRIT "btrfs: invalid dir item type: %d\n",
+ (int)type);
+ return 1;
+ }
+
+ if (type == BTRFS_FT_XATTR)
+ namelen = XATTR_NAME_MAX;
+
+ if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
+ printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
+ (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ return 1;
+ }
+
+ /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
+ if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) {
+ printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n",
+ (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 100b07f021b4..8f1d44ba332f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,6 +29,7 @@
#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
+#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -198,7 +199,7 @@ u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
void btrfs_csum_final(u32 crc, char *result)
{
- *(__le32 *)result = ~cpu_to_le32(crc);
+ put_unaligned_le32(~crc, result);
}
/*
@@ -323,6 +324,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
int num_copies = 0;
int mirror_num = 0;
+ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, start, 1,
@@ -331,6 +333,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
!verify_parent_transid(io_tree, eb, parent_transid))
return ret;
+ /*
+ * This buffer's crc is fine, but its contents are corrupted, so
+ * there is no reason to read the other copies, they won't be
+ * any less wrong.
+ */
+ if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
+ return ret;
+
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len);
if (num_copies == 1)
@@ -419,6 +429,73 @@ static int check_tree_block_fsid(struct btrfs_root *root,
return ret;
}
+#define CORRUPT(reason, eb, root, slot) \
+ printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
+ "root=%llu, slot=%d\n", reason, \
+ (unsigned long long)btrfs_header_bytenr(eb), \
+ (unsigned long long)root->objectid, slot)
+
+static noinline int check_leaf(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ struct btrfs_key key;
+ struct btrfs_key leaf_key;
+ u32 nritems = btrfs_header_nritems(leaf);
+ int slot;
+
+ if (nritems == 0)
+ return 0;
+
+ /* Check the 0 item */
+ if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("invalid item offset size pair", leaf, root, 0);
+ return -EIO;
+ }
+
+ /*
+ * Check to make sure each items keys are in the correct order and their
+ * offsets make sense. We only have to loop through nritems-1 because
+ * we check the current slot against the next slot, which verifies the
+ * next slot's offset+size makes sense and that the current's slot
+ * offset is correct.
+ */
+ for (slot = 0; slot < nritems - 1; slot++) {
+ btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
+ btrfs_item_key_to_cpu(leaf, &key, slot + 1);
+
+ /* Make sure the keys are in the right order */
+ if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
+ CORRUPT("bad key order", leaf, root, slot);
+ return -EIO;
+ }
+
+ /*
+ * Make sure the offset and ends are right, remember that the
+ * item data starts at the end of the leaf and grows towards the
+ * front.
+ */
+ if (btrfs_item_offset_nr(leaf, slot) !=
+ btrfs_item_end_nr(leaf, slot + 1)) {
+ CORRUPT("slot offset bad", leaf, root, slot);
+ return -EIO;
+ }
+
+ /*
+ * Check to make sure that we don't point outside of the leaf,
+ * just incase all the items are consistent to eachother, but
+ * all point outside of the leaf.
+ */
+ if (btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("slot end outside of leaf", leaf, root, slot);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
@@ -485,8 +562,20 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
btrfs_set_buffer_lockdep_class(eb, found_level);
ret = csum_tree_block(root, eb, 1);
- if (ret)
+ if (ret) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /*
+ * If this is a leaf block and it is corrupt, set the corrupt bit so
+ * that we don't try and read the other copies of this block, just
+ * return -EIO.
+ */
+ if (found_level == 0 && check_leaf(root, eb)) {
+ set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
+ }
end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
end = eb->start + end - 1;
@@ -847,7 +936,6 @@ static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
- .sync_page = block_sync_page,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
@@ -1160,7 +1248,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root, fs_info, location->objectid);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ kfree(root);
+ return ERR_PTR(-ENOMEM);
+ }
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
if (ret == 0) {
l = path->nodes[0];
@@ -1184,8 +1275,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
out:
- if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
+ if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
+ btrfs_check_and_init_root_item(&root->root_item);
+ }
return root;
}
@@ -1331,82 +1424,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
}
/*
- * this unplugs every device on the box, and it is only used when page
- * is null
- */
-static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct btrfs_device *device;
- struct btrfs_fs_info *info;
-
- info = (struct btrfs_fs_info *)bdi->unplug_io_data;
- list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
- if (!device->bdev)
- continue;
-
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
- }
-}
-
-static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct inode *inode;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
- struct address_space *mapping;
- u64 offset;
-
- /* the generic O_DIRECT read code does this */
- if (1 || !page) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- /*
- * page->mapping may change at any time. Get a consistent copy
- * and use that for everything below
- */
- smp_mb();
- mapping = page->mapping;
- if (!mapping)
- return;
-
- inode = mapping->host;
-
- /*
- * don't do the expensive searching for a small number of
- * devices
- */
- if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- offset = page_offset(page);
-
- em_tree = &BTRFS_I(inode)->extent_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
- read_unlock(&em_tree->lock);
- if (!em) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- free_extent_map(em);
- __unplug_io_fn(bdi, page);
- return;
- }
- offset = offset - em->start;
- btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
- em->block_start + offset, page);
- free_extent_map(em);
-}
-
-/*
* If this fails, caller must call bdi_destroy() to get rid of the
* bdi again.
*/
@@ -1420,8 +1437,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
return err;
bdi->ra_pages = default_backing_dev_info.ra_pages;
- bdi->unplug_io_fn = btrfs_unplug_io_fn;
- bdi->unplug_io_data = info;
bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info;
return 0;
@@ -1632,6 +1647,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
goto fail_bdi;
}
+ fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS;
+
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
@@ -1762,6 +1779,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ /*
+ * In the long term, we'll store the compression type in the super
+ * block, and it'll be used for per file compression control.
+ */
+ fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+
ret = btrfs_parse_options(tree_root, options);
if (ret) {
err = ret;
@@ -1967,6 +1990,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->metadata_alloc_profile = (u64)-1;
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+ ret = btrfs_init_space_info(fs_info);
+ if (ret) {
+ printk(KERN_ERR "Failed to initial space info: %d\n", ret);
+ goto fail_block_groups;
+ }
+
ret = btrfs_read_block_groups(extent_root);
if (ret) {
printk(KERN_ERR "Failed to read block groups: %d\n", ret);
@@ -2058,9 +2087,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) {
down_read(&fs_info->cleanup_work_sem);
- btrfs_orphan_cleanup(fs_info->fs_root);
- btrfs_orphan_cleanup(fs_info->tree_root);
+ err = btrfs_orphan_cleanup(fs_info->fs_root);
+ if (!err)
+ err = btrfs_orphan_cleanup(fs_info->tree_root);
up_read(&fs_info->cleanup_work_sem);
+ if (err) {
+ close_ctree(tree_root);
+ return ERR_PTR(err);
+ }
}
return tree_root;
@@ -2435,8 +2469,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
root_objectid = gang[ret - 1]->root_key.objectid + 1;
for (i = 0; i < ret; i++) {
+ int err;
+
root_objectid = gang[i]->root_key.objectid;
- btrfs_orphan_cleanup(gang[i]);
+ err = btrfs_orphan_cleanup(gang[i]);
+ if (err)
+ return err;
}
root_objectid++;
}
@@ -2947,7 +2985,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
break;
/* opt_discard */
- ret = btrfs_error_discard_extent(root, start, end + 1 - start);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_error_discard_extent(root, start,
+ end + 1 - start,
+ NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
btrfs_error_unpin_extent_range(root, start, end);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b3089b5c2df..f619c3cb13b7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -36,8 +36,6 @@
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
-static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve, int sinfo);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -442,7 +440,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/
- if (!trans->transaction->in_commit &&
+ if (trans && (!trans->transaction->in_commit) &&
(root && root != root->fs_info->tree_root)) {
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
@@ -471,7 +469,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
if (load_cache_only)
return 0;
- caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
+ caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
BUG_ON(!caching_ctl);
INIT_LIST_HEAD(&caching_ctl->list);
@@ -1740,39 +1738,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
return ret;
}
-static void btrfs_issue_discard(struct block_device *bdev,
+static int btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len)
{
- blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
+ return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
}
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes)
+ u64 num_bytes, u64 *actual_bytes)
{
int ret;
- u64 map_length = num_bytes;
+ u64 discarded_bytes = 0;
struct btrfs_multi_bio *multi = NULL;
- if (!btrfs_test_opt(root, DISCARD))
- return 0;
/* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
- bytenr, &map_length, &multi, 0);
+ ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
+ bytenr, &num_bytes, &multi, 0);
if (!ret) {
struct btrfs_bio_stripe *stripe = multi->stripes;
int i;
- if (map_length > num_bytes)
- map_length = num_bytes;
for (i = 0; i < multi->num_stripes; i++, stripe++) {
- btrfs_issue_discard(stripe->dev->bdev,
- stripe->physical,
- map_length);
+ ret = btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ stripe->length);
+ if (!ret)
+ discarded_bytes += stripe->length;
+ else if (ret != -EOPNOTSUPP)
+ break;
}
kfree(multi);
}
+ if (discarded_bytes && ret == -EOPNOTSUPP)
+ ret = 0;
+
+ if (actual_bytes)
+ *actual_bytes = discarded_bytes;
+
return ret;
}
@@ -3996,6 +4000,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve;
int nr_extents;
+ int reserved_extents;
int ret;
if (btrfs_transaction_in_commit(root->fs_info))
@@ -4003,25 +4008,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
num_bytes = ALIGN(num_bytes, root->sectorsize);
- spin_lock(&BTRFS_I(inode)->accounting_lock);
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
- if (nr_extents > BTRFS_I(inode)->reserved_extents) {
- nr_extents -= BTRFS_I(inode)->reserved_extents;
+ reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+
+ if (nr_extents > reserved_extents) {
+ nr_extents -= reserved_extents;
to_reserve = calc_trans_metadata_size(root, nr_extents);
} else {
nr_extents = 0;
to_reserve = 0;
}
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
+
to_reserve += calc_csum_metadata_size(inode, num_bytes);
ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
if (ret)
return ret;
- spin_lock(&BTRFS_I(inode)->accounting_lock);
- BTRFS_I(inode)->reserved_extents += nr_extents;
+ atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
atomic_inc(&BTRFS_I(inode)->outstanding_extents);
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
block_rsv_add_bytes(block_rsv, to_reserve, 1);
@@ -4036,20 +4040,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 to_free;
int nr_extents;
+ int reserved_extents;
num_bytes = ALIGN(num_bytes, root->sectorsize);
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
- spin_lock(&BTRFS_I(inode)->accounting_lock);
- nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
- if (nr_extents < BTRFS_I(inode)->reserved_extents) {
- nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
- BTRFS_I(inode)->reserved_extents -= nr_extents;
- } else {
- nr_extents = 0;
- }
- spin_unlock(&BTRFS_I(inode)->accounting_lock);
+ reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+ do {
+ int old, new;
+
+ nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
+ if (nr_extents >= reserved_extents) {
+ nr_extents = 0;
+ break;
+ }
+ old = reserved_extents;
+ nr_extents = reserved_extents - nr_extents;
+ new = reserved_extents - nr_extents;
+ old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
+ reserved_extents, new);
+ if (likely(old == reserved_extents))
+ break;
+ reserved_extents = old;
+ } while (1);
to_free = calc_csum_metadata_size(inode, num_bytes);
if (nr_extents > 0)
@@ -4223,8 +4237,8 @@ int btrfs_pin_extent(struct btrfs_root *root,
* update size of reserved extents. this function may return -EAGAIN
* if 'reserve' is true or 'sinfo' is false.
*/
-static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int reserve, int sinfo)
+int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve, int sinfo)
{
int ret = 0;
if (sinfo) {
@@ -4363,7 +4377,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
if (ret)
break;
- ret = btrfs_discard_extent(root, start, end + 1 - start);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start,
+ end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
unpin_extent_range(root, start, end);
@@ -4704,10 +4720,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(cache, buf->start, buf->len);
- ret = update_reserved_bytes(cache, buf->len, 0, 0);
+ ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
if (ret == -EAGAIN) {
/* block group became read-only */
- update_reserved_bytes(cache, buf->len, 0, 1);
+ btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
goto out;
}
@@ -4744,6 +4760,11 @@ pin:
}
}
out:
+ /*
+ * Deleting the buffer, clear the corrupt flag since it doesn't matter
+ * anymore.
+ */
+ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
btrfs_put_block_group(cache);
}
@@ -5191,7 +5212,7 @@ checks:
search_start - offset);
BUG_ON(offset > search_start);
- ret = update_reserved_bytes(block_group, num_bytes, 1,
+ ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
(data & BTRFS_BLOCK_GROUP_DATA));
if (ret == -EAGAIN) {
btrfs_add_free_space(block_group, offset, num_bytes);
@@ -5397,6 +5418,8 @@ again:
dump_space_info(sinfo, num_bytes, 1);
}
+ trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
+
return ret;
}
@@ -5412,12 +5435,15 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
return -ENOSPC;
}
- ret = btrfs_discard_extent(root, start, len);
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
- update_reserved_bytes(cache, len, 0, 1);
+ btrfs_update_reserved_bytes(cache, len, 0, 1);
btrfs_put_block_group(cache);
+ trace_btrfs_reserved_extent_free(root, start, len);
+
return ret;
}
@@ -5444,7 +5470,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -5614,7 +5641,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
put_caching_control(caching_ctl);
}
- ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
+ ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
BUG_ON(ret);
btrfs_put_block_group(block_group);
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
@@ -6047,6 +6074,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
next = read_tree_block(root, bytenr, blocksize, generation);
+ if (!next)
+ return -EIO;
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}
@@ -6438,10 +6467,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- BUG_ON(!wc);
+ if (!wc) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
@@ -6899,7 +6932,11 @@ static noinline int get_new_locations(struct inode *reloc_inode,
}
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ if (exts != *extents)
+ kfree(exts);
+ return -ENOMEM;
+ }
cur_pos = extent_key->objectid - offset;
last_byte = extent_key->objectid + extent_key->offset;
@@ -6941,6 +6978,10 @@ static noinline int get_new_locations(struct inode *reloc_inode,
struct disk_extent *old = exts;
max *= 2;
exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto out;
+ }
memcpy(exts, old, sizeof(*exts) * nr);
if (old != *extents)
kfree(old);
@@ -7423,7 +7464,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
int ret;
new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
- BUG_ON(!new_extent);
+ if (!new_extent)
+ return -ENOMEM;
ref = btrfs_lookup_leaf_ref(root, leaf->start);
BUG_ON(!ref);
@@ -7609,7 +7651,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
BUG_ON(!reloc_root);
- btrfs_orphan_cleanup(reloc_root);
+ ret = btrfs_orphan_cleanup(reloc_root);
+ BUG_ON(ret);
return 0;
}
@@ -7627,7 +7670,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
return 0;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
- BUG_ON(!root_item);
+ if (!root_item)
+ return -ENOMEM;
ret = btrfs_copy_root(trans, root, root->commit_root,
&eb, BTRFS_TREE_RELOC_OBJECTID);
@@ -7653,7 +7697,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
&root_key);
- BUG_ON(!reloc_root);
+ BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid;
reloc_root->commit_root = NULL;
reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
@@ -7906,6 +7950,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
eb = read_tree_block(found_root, block_start,
block_size, 0);
+ if (!eb) {
+ ret = -EIO;
+ goto out;
+ }
btrfs_tree_lock(eb);
BUG_ON(level != btrfs_header_level(eb));
@@ -8621,6 +8669,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
BUG_ON(!block_group);
BUG_ON(!block_group->ro);
+ /*
+ * Free the reserved super bytes from this block group before
+ * remove it.
+ */
+ free_excluded_extents(root, block_group);
+
memcpy(&key, &block_group->key, sizeof(key));
if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
@@ -8724,13 +8778,84 @@ out:
return ret;
}
+int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_space_info *space_info;
+ int ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0,
+ &space_info);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
return unpin_extent_range(root, start, end);
}
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
- u64 num_bytes)
+ u64 num_bytes, u64 *actual_bytes)
+{
+ return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
+}
+
+int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
{
- return btrfs_discard_extent(root, bytenr, num_bytes);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_group_cache *cache = NULL;
+ u64 group_trimmed;
+ u64 start;
+ u64 end;
+ u64 trimmed = 0;
+ int ret = 0;
+
+ cache = btrfs_lookup_block_group(fs_info, range->start);
+
+ while (cache) {
+ if (cache->key.objectid >= (range->start + range->len)) {
+ btrfs_put_block_group(cache);
+ break;
+ }
+
+ start = max(range->start, cache->key.objectid);
+ end = min(range->start + range->len,
+ cache->key.objectid + cache->key.offset);
+
+ if (end - start >= range->minlen) {
+ if (!block_group_cache_done(cache)) {
+ ret = cache_block_group(cache, NULL, root, 0);
+ if (!ret)
+ wait_block_group_cache_done(cache);
+ }
+ ret = btrfs_trim_block_group(cache,
+ &group_trimmed,
+ start,
+ end,
+ range->minlen);
+
+ trimmed += group_trimmed;
+ if (ret) {
+ btrfs_put_block_group(cache);
+ break;
+ }
+ }
+
+ cache = next_block_group(fs_info->tree_root, cache);
+ }
+
+ range->len = trimmed;
+ return ret;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 714adc4ac4c2..20ddb28602a8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2188,10 +2188,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
- write_flags = WRITE_SYNC_PLUG;
+ write_flags = WRITE_SYNC;
else
write_flags = WRITE;
+ trace___extent_writepage(page, inode, wbc);
+
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
@@ -3690,6 +3692,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
"wanted %lu %lu\n", (unsigned long long)eb->start,
eb->len, start, min_len);
WARN_ON(1);
+ return -EINVAL;
}
p = extent_buffer_page(eb, i);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 9318dfefd59c..f62c5442835d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -31,6 +31,7 @@
#define EXTENT_BUFFER_UPTODATE 0
#define EXTENT_BUFFER_BLOCKING 1
#define EXTENT_BUFFER_DIRTY 2
+#define EXTENT_BUFFER_CORRUPT 3
/* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2b6c12e983b3..a24a3f2fa13e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -243,7 +243,7 @@ out:
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
- * reference dropped if the merge attempt was successfull.
+ * reference dropped if the merge attempt was successful.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 4f19a3e1bf32..a6a9d4e8b491 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -48,7 +48,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
file_key.objectid = objectid;
file_key.offset = pos;
btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
@@ -169,6 +170,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
if (bio->bi_size > PAGE_CACHE_SIZE * 8)
path->reada = 2;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f447b783bb84..e621ea54a3fd 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -45,14 +45,14 @@
* and be replaced with calls into generic code.
*/
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
- int write_bytes,
+ size_t write_bytes,
struct page **prepared_pages,
struct iov_iter *i)
{
size_t copied = 0;
+ size_t total_copied = 0;
int pg = 0;
int offset = pos & (PAGE_CACHE_SIZE - 1);
- int total_copied = 0;
while (write_bytes > 0) {
size_t count = min_t(size_t,
@@ -88,9 +88,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
total_copied += copied;
/* Return to btrfs_file_aio_write to fault page */
- if (unlikely(copied == 0)) {
+ if (unlikely(copied == 0))
break;
- }
if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
offset += copied;
@@ -109,8 +108,6 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
- if (!pages[i])
- break;
/* page checked is some magic around finding pages that
* have been modified without going through btrfs_set_page_dirty
* clear it here
@@ -130,13 +127,12 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct file *file,
- struct page **pages,
- size_t num_pages,
- loff_t pos,
- size_t write_bytes)
+static noinline int dirty_and_release_pages(struct btrfs_root *root,
+ struct file *file,
+ struct page **pages,
+ size_t num_pages,
+ loff_t pos,
+ size_t write_bytes)
{
int err = 0;
int i;
@@ -154,7 +150,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
NULL);
- BUG_ON(err);
+ if (err)
+ return err;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
@@ -162,13 +159,14 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
ClearPageChecked(p);
set_page_dirty(p);
}
- if (end_pos > isize) {
+
+ /*
+ * we've only changed i_size in ram, and we haven't updated
+ * the disk i_size. There is no need to log the inode
+ * at this time.
+ */
+ if (end_pos > isize)
i_size_write(inode, end_pos);
- /* we've only changed i_size in ram, and we haven't updated
- * the disk i_size. There is no need to log the inode
- * at this time.
- */
- }
return 0;
}
@@ -610,6 +608,8 @@ again:
key.offset = split;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
@@ -819,12 +819,11 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
if (start_pos > inode->i_size) {
- err = btrfs_cont_expand(inode, start_pos);
+ err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
if (err)
return err;
}
- memset(pages, 0, num_pages * sizeof(struct page *));
again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
@@ -896,156 +895,71 @@ fail:
}
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+static noinline ssize_t __btrfs_buffered_write(struct file *file,
+ struct iov_iter *i,
+ loff_t pos)
{
- struct file *file = iocb->ki_filp;
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
- struct iov_iter i;
- loff_t *ppos = &iocb->ki_pos;
- loff_t start_pos;
- ssize_t num_written = 0;
- ssize_t err = 0;
- size_t count;
- size_t ocount;
- int ret = 0;
- int nrptrs;
unsigned long first_index;
unsigned long last_index;
- int will_write;
- int buffered = 0;
- int copied = 0;
- int dirty_pages = 0;
-
- will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
- (file->f_flags & O_DIRECT));
-
- start_pos = pos;
-
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
- mutex_lock(&inode->i_mutex);
-
- err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
- if (err)
- goto out;
- count = ocount;
-
- current->backing_dev_info = inode->i_mapping->backing_dev_info;
- err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
- if (err)
- goto out;
-
- if (count == 0)
- goto out;
-
- err = file_remove_suid(file);
- if (err)
- goto out;
-
- /*
- * If BTRFS flips readonly due to some impossible error
- * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
- * although we have opened a file as writable, we have
- * to stop this write operation to ensure FS consistency.
- */
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- err = -EROFS;
- goto out;
- }
-
- file_update_time(file);
- BTRFS_I(inode)->sequence++;
-
- if (unlikely(file->f_flags & O_DIRECT)) {
- num_written = generic_file_direct_write(iocb, iov, &nr_segs,
- pos, ppos, count,
- ocount);
- /*
- * the generic O_DIRECT will update in-memory i_size after the
- * DIOs are done. But our endio handlers that update the on
- * disk i_size never update past the in memory i_size. So we
- * need one more update here to catch any additions to the
- * file
- */
- if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
- btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
- mark_inode_dirty(inode);
- }
-
- if (num_written < 0) {
- ret = num_written;
- num_written = 0;
- goto out;
- } else if (num_written == count) {
- /* pick up pos changes done by the generic code */
- pos = *ppos;
- goto out;
- }
- /*
- * We are going to do buffered for the rest of the range, so we
- * need to make sure to invalidate the buffered pages when we're
- * done.
- */
- buffered = 1;
- pos += num_written;
- }
+ size_t num_written = 0;
+ int nrptrs;
+ int ret = 0;
- iov_iter_init(&i, iov, nr_segs, count, num_written);
- nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
+ nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* generic_write_checks can change our pos */
- start_pos = pos;
+ if (!pages)
+ return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT;
- last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
+ last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
- while (iov_iter_count(&i) > 0) {
+ while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
- size_t write_bytes = min(iov_iter_count(&i),
+ size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE -
offset);
size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ size_t dirty_pages;
+ size_t copied;
WARN_ON(num_pages > nrptrs);
- memset(pages, 0, sizeof(struct page *) * nrptrs);
/*
* Fault pages before locking them in prepare_pages
* to avoid recursive lock
*/
- if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
+ if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
ret = -EFAULT;
- goto out;
+ break;
}
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
if (ret)
- goto out;
+ break;
+ /*
+ * This is going to setup the pages array with the number of
+ * pages we want, so we don't really need to worry about the
+ * contents of pages from loop to loop
+ */
ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, last_index,
write_bytes);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
- goto out;
+ break;
}
copied = btrfs_copy_from_user(pos, num_pages,
- write_bytes, pages, &i);
+ write_bytes, pages, i);
/*
* if we have trouble faulting in the pages, fall
@@ -1061,6 +975,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
+ /*
+ * If we had a short copy we need to release the excess delaloc
+ * bytes we reserved. We need to increment outstanding_extents
+ * because btrfs_delalloc_release_space will decrement it, but
+ * we still have an outstanding extent for the chunk we actually
+ * managed to copy.
+ */
if (num_pages > dirty_pages) {
if (copied > 0)
atomic_inc(
@@ -1071,39 +992,157 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
}
if (copied > 0) {
- dirty_and_release_pages(NULL, root, file, pages,
- dirty_pages, pos, copied);
+ ret = dirty_and_release_pages(root, file, pages,
+ dirty_pages, pos,
+ copied);
+ if (ret) {
+ btrfs_delalloc_release_space(inode,
+ dirty_pages << PAGE_CACHE_SHIFT);
+ btrfs_drop_pages(pages, num_pages);
+ break;
+ }
}
btrfs_drop_pages(pages, num_pages);
- if (copied > 0) {
- if (will_write) {
- filemap_fdatawrite_range(inode->i_mapping, pos,
- pos + copied - 1);
- } else {
- balance_dirty_pages_ratelimited_nr(
- inode->i_mapping,
- dirty_pages);
- if (dirty_pages <
- (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root, 1);
- btrfs_throttle(root);
- }
- }
+ cond_resched();
+
+ balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+ dirty_pages);
+ if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+ btrfs_btree_balance_dirty(root, 1);
+ btrfs_throttle(root);
pos += copied;
num_written += copied;
+ }
- cond_resched();
+ kfree(pages);
+
+ return num_written ? num_written : ret;
+}
+
+static ssize_t __btrfs_direct_write(struct kiocb *iocb,
+ const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos,
+ loff_t *ppos, size_t count, size_t ocount)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct iov_iter i;
+ ssize_t written;
+ ssize_t written_buffered;
+ loff_t endbyte;
+ int err;
+
+ written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
+ count, ocount);
+
+ /*
+ * the generic O_DIRECT will update in-memory i_size after the
+ * DIOs are done. But our endio handlers that update the on
+ * disk i_size never update past the in memory i_size. So we
+ * need one more update here to catch any additions to the
+ * file
+ */
+ if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ mark_inode_dirty(inode);
}
+
+ if (written < 0 || written == count)
+ return written;
+
+ pos += written;
+ count -= written;
+ iov_iter_init(&i, iov, nr_segs, count, written);
+ written_buffered = __btrfs_buffered_write(file, &i, pos);
+ if (written_buffered < 0) {
+ err = written_buffered;
+ goto out;
+ }
+ endbyte = pos + written_buffered - 1;
+ err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+ if (err)
+ goto out;
+ written += written_buffered;
+ *ppos = pos + written_buffered;
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
+ endbyte >> PAGE_CACHE_SHIFT);
out:
- mutex_unlock(&inode->i_mutex);
- if (ret)
- err = ret;
+ return written ? written : err;
+}
- kfree(pages);
- *ppos = pos;
+static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
+ const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ loff_t *ppos = &iocb->ki_pos;
+ ssize_t num_written = 0;
+ ssize_t err = 0;
+ size_t count, ocount;
+
+ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+ mutex_lock(&inode->i_mutex);
+
+ err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+ count = ocount;
+
+ current->backing_dev_info = inode->i_mapping->backing_dev_info;
+ err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ if (count == 0) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ err = file_remove_suid(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+
+ /*
+ * If BTRFS flips readonly due to some impossible error
+ * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
+ * although we have opened a file as writable, we have
+ * to stop this write operation to ensure FS consistency.
+ */
+ if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ mutex_unlock(&inode->i_mutex);
+ err = -EROFS;
+ goto out;
+ }
+
+ file_update_time(file);
+ BTRFS_I(inode)->sequence++;
+
+ if (unlikely(file->f_flags & O_DIRECT)) {
+ num_written = __btrfs_direct_write(iocb, iov, nr_segs,
+ pos, ppos, count, ocount);
+ } else {
+ struct iov_iter i;
+
+ iov_iter_init(&i, iov, nr_segs, count, num_written);
+
+ num_written = __btrfs_buffered_write(file, &i, pos);
+ if (num_written > 0)
+ *ppos = pos + num_written;
+ }
+
+ mutex_unlock(&inode->i_mutex);
/*
* we want to make sure fsync finds this change
@@ -1118,43 +1157,12 @@ out:
* one running right now.
*/
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
-
- if (num_written > 0 && will_write) {
- struct btrfs_trans_handle *trans;
-
- err = btrfs_wait_ordered_range(inode, start_pos, num_written);
- if (err)
+ if (num_written > 0 || num_written == -EIOCBQUEUED) {
+ err = generic_write_sync(file, pos, num_written);
+ if (err < 0 && num_written > 0)
num_written = err;
-
- if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- num_written = PTR_ERR(trans);
- goto done;
- }
- mutex_lock(&inode->i_mutex);
- ret = btrfs_log_dentry_safe(trans, root,
- file->f_dentry);
- mutex_unlock(&inode->i_mutex);
- if (ret == 0) {
- ret = btrfs_sync_log(trans, root);
- if (ret == 0)
- btrfs_end_transaction(trans, root);
- else
- btrfs_commit_transaction(trans, root);
- } else if (ret != BTRFS_NO_LOG_SYNC) {
- btrfs_commit_transaction(trans, root);
- } else {
- btrfs_end_transaction(trans, root);
- }
- }
- if (file->f_flags & O_DIRECT && buffered) {
- invalidate_mapping_pages(inode->i_mapping,
- start_pos >> PAGE_CACHE_SHIFT,
- (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
- }
}
-done:
+out:
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
@@ -1197,6 +1205,7 @@ int btrfs_sync_file(struct file *file, int datasync)
int ret = 0;
struct btrfs_trans_handle *trans;
+ trace_btrfs_sync_file(file, datasync);
/* we wait first, since the writeback may change the inode */
root->log_batch++;
@@ -1324,7 +1333,8 @@ static long btrfs_fallocate(struct file *file, int mode,
goto out;
if (alloc_start > inode->i_size) {
- ret = btrfs_cont_expand(inode, alloc_start);
+ ret = btrfs_cont_expand(inode, i_size_read(inode),
+ alloc_start);
if (ret)
goto out;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index a0390657451b..f561c953205b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -24,6 +24,7 @@
#include "free-space-cache.h"
#include "transaction.h"
#include "disk-io.h"
+#include "extent_io.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
@@ -81,6 +82,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
return ERR_PTR(-ENOENT);
}
+ inode->i_mapping->flags &= ~__GFP_FS;
+
spin_lock(&block_group->lock);
if (!root->fs_info->closing) {
block_group->inode = igrab(inode);
@@ -222,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
u64 num_entries;
u64 num_bitmaps;
u64 generation;
+ u64 used = btrfs_block_group_used(&block_group->item);
u32 cur_crc = ~(u32)0;
pgoff_t index = 0;
unsigned long first_page_offset;
@@ -393,7 +397,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
break;
need_loop = 1;
- e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!e) {
kunmap(page);
unlock_page(page);
@@ -405,7 +410,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bytes = le64_to_cpu(entry->bytes);
if (!e->bytes) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
@@ -420,7 +425,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
@@ -465,6 +471,17 @@ next:
index++;
}
+ spin_lock(&block_group->tree_lock);
+ if (block_group->free_space != (block_group->key.offset - used -
+ block_group->bytes_super)) {
+ spin_unlock(&block_group->tree_lock);
+ printk(KERN_ERR "block group %llu has an wrong amount of free "
+ "space\n", block_group->key.objectid);
+ ret = 0;
+ goto free_cache;
+ }
+ spin_unlock(&block_group->tree_lock);
+
ret = 1;
out:
kfree(checksums);
@@ -493,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct list_head *pos, *n;
struct page *page;
struct extent_state *cached_state = NULL;
+ struct btrfs_free_cluster *cluster = NULL;
+ struct extent_io_tree *unpin = NULL;
struct list_head bitmap_list;
struct btrfs_key key;
+ u64 start, end, len;
u64 bytes = 0;
u32 *crc, *checksums;
pgoff_t index = 0, last_index = 0;
@@ -503,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
int entries = 0;
int bitmaps = 0;
int ret = 0;
+ bool next_page = false;
root = root->fs_info->tree_root;
@@ -549,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
*/
first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+ /* Get the cluster for this block_group if it exists */
+ if (!list_empty(&block_group->cluster_list))
+ cluster = list_entry(block_group->cluster_list.next,
+ struct btrfs_free_cluster,
+ block_group_list);
+
+ /*
+ * We shouldn't have switched the pinned extents yet so this is the
+ * right one
+ */
+ unpin = root->fs_info->pinned_extents;
+
/*
* Lock all pages first so we can lock the extent safely.
*
@@ -578,6 +611,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state, GFP_NOFS);
+ /*
+ * When searching for pinned extents, we need to start at our start
+ * offset.
+ */
+ start = block_group->key.objectid;
+
/* Write out the extent entries */
do {
struct btrfs_free_space_entry *entry;
@@ -585,6 +624,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
unsigned long offset = 0;
unsigned long start_offset = 0;
+ next_page = false;
+
if (index == 0) {
start_offset = first_page_offset;
offset = start_offset;
@@ -596,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
entry = addr + start_offset;
memset(addr, 0, PAGE_CACHE_SIZE);
- while (1) {
+ while (node && !next_page) {
struct btrfs_free_space *e;
e = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -612,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
entry->type = BTRFS_FREE_SPACE_EXTENT;
}
node = rb_next(node);
- if (!node)
- break;
+ if (!node && cluster) {
+ node = rb_first(&cluster->root);
+ cluster = NULL;
+ }
offset += sizeof(struct btrfs_free_space_entry);
if (offset + sizeof(struct btrfs_free_space_entry) >=
PAGE_CACHE_SIZE)
+ next_page = true;
+ entry++;
+ }
+
+ /*
+ * We want to add any pinned extents to our free space cache
+ * so we don't leak the space
+ */
+ while (!next_page && (start < block_group->key.objectid +
+ block_group->key.offset)) {
+ ret = find_first_extent_bit(unpin, start, &start, &end,
+ EXTENT_DIRTY);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+
+ /* This pinned extent is out of our range */
+ if (start >= block_group->key.objectid +
+ block_group->key.offset)
break;
+
+ len = block_group->key.objectid +
+ block_group->key.offset - start;
+ len = min(len, end + 1 - start);
+
+ entries++;
+ entry->offset = cpu_to_le64(start);
+ entry->bytes = cpu_to_le64(len);
+ entry->type = BTRFS_FREE_SPACE_EXTENT;
+
+ start = end + 1;
+ offset += sizeof(struct btrfs_free_space_entry);
+ if (offset + sizeof(struct btrfs_free_space_entry) >=
+ PAGE_CACHE_SIZE)
+ next_page = true;
entry++;
}
*crc = ~(u32)0;
@@ -648,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
page_cache_release(page);
index++;
- } while (node);
+ } while (node || next_page);
/* Write out the bitmaps */
list_for_each_safe(pos, n, &bitmap_list) {
@@ -1187,7 +1265,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group,
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
- kfree(bitmap_info);
+ kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
@@ -1285,9 +1363,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
* If we are below the extents threshold then we can add this as an
* extent, and don't have to deal with the bitmap
*/
- if (block_group->free_extents < block_group->extents_thresh &&
- info->bytes > block_group->sectorsize * 4)
- return 0;
+ if (block_group->free_extents < block_group->extents_thresh) {
+ /*
+ * If this block group has some small extents we don't want to
+ * use up all of our free slots in the cache with them, we want
+ * to reserve them to larger extents, however if we have plent
+ * of cache left then go ahead an dadd them, no sense in adding
+ * the overhead of a bitmap if we don't have to.
+ */
+ if (info->bytes <= block_group->sectorsize * 4) {
+ if (block_group->free_extents * 2 <=
+ block_group->extents_thresh)
+ return 0;
+ } else {
+ return 0;
+ }
+ }
/*
* some block groups are so tiny they can't be enveloped by a bitmap, so
@@ -1342,8 +1433,8 @@ new_bitmap:
/* no pre-allocated info, allocate a new one */
if (!info) {
- info = kzalloc(sizeof(struct btrfs_free_space),
- GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!info) {
spin_lock(&block_group->tree_lock);
ret = -ENOMEM;
@@ -1365,7 +1456,7 @@ out:
if (info) {
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
return ret;
@@ -1398,7 +1489,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes;
- kfree(right_info);
+ kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
}
@@ -1410,7 +1501,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
- kfree(left_info);
+ kmem_cache_free(btrfs_free_space_cachep, left_info);
merged = true;
}
@@ -1423,7 +1514,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info;
int ret = 0;
- info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info)
return -ENOMEM;
@@ -1450,7 +1541,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
link:
ret = link_free_space(block_group, info);
if (ret)
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
out:
spin_unlock(&block_group->tree_lock);
@@ -1520,7 +1611,7 @@ again:
kfree(info->bitmap);
block_group->total_bitmaps--;
}
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock;
}
@@ -1556,7 +1647,7 @@ again:
/* the hole we're creating ends at the end
* of the info struct, just free the info
*/
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
spin_unlock(&block_group->tree_lock);
@@ -1629,30 +1720,28 @@ __btrfs_return_cluster_to_free_space(
{
struct btrfs_free_space *entry;
struct rb_node *node;
- bool bitmap;
spin_lock(&cluster->lock);
if (cluster->block_group != block_group)
goto out;
- bitmap = cluster->points_to_bitmap;
cluster->block_group = NULL;
cluster->window_start = 0;
list_del_init(&cluster->block_group_list);
- cluster->points_to_bitmap = false;
-
- if (bitmap)
- goto out;
node = rb_first(&cluster->root);
while (node) {
+ bool bitmap;
+
entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
- BUG_ON(entry->bitmap);
- try_merge_free_space(block_group, entry, false);
+
+ bitmap = (entry->bitmap != NULL);
+ if (!bitmap)
+ try_merge_free_space(block_group, entry, false);
tree_insert_offset(&block_group->free_space_offset,
- entry->offset, &entry->offset_index, 0);
+ entry->offset, &entry->offset_index, bitmap);
}
cluster->root = RB_ROOT;
@@ -1689,7 +1778,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
unlink_free_space(block_group, info);
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
if (need_resched()) {
spin_unlock(&block_group->tree_lock);
cond_resched();
@@ -1722,7 +1811,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
entry->offset += bytes;
entry->bytes -= bytes;
if (!entry->bytes)
- kfree(entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
else
link_free_space(block_group, entry);
}
@@ -1775,50 +1864,24 @@ int btrfs_return_cluster_to_free_space(
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
+ struct btrfs_free_space *entry,
u64 bytes, u64 min_start)
{
- struct btrfs_free_space *entry;
int err;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
u64 ret = 0;
- spin_lock(&block_group->tree_lock);
- spin_lock(&cluster->lock);
-
- if (!cluster->points_to_bitmap)
- goto out;
-
- if (cluster->block_group != block_group)
- goto out;
-
- /*
- * search_start is the beginning of the bitmap, but at some point it may
- * be a good idea to point to the actual start of the free area in the
- * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
- * to 1 to make sure we get the bitmap entry
- */
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, search_start),
- 1, 0);
- if (!entry || !entry->bitmap)
- goto out;
-
search_start = min_start;
search_bytes = bytes;
err = search_bitmap(block_group, entry, &search_start,
&search_bytes);
if (err)
- goto out;
+ return 0;
ret = search_start;
bitmap_clear_bits(block_group, entry, ret, bytes);
- if (entry->bytes == 0)
- free_bitmap(block_group, entry);
-out:
- spin_unlock(&cluster->lock);
- spin_unlock(&block_group->tree_lock);
return ret;
}
@@ -1836,10 +1899,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct rb_node *node;
u64 ret = 0;
- if (cluster->points_to_bitmap)
- return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
- min_start);
-
spin_lock(&cluster->lock);
if (bytes > cluster->max_size)
goto out;
@@ -1852,9 +1911,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
goto out;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
-
while(1) {
- if (entry->bytes < bytes || entry->offset < min_start) {
+ if (entry->bytes < bytes ||
+ (!entry->bitmap && entry->offset < min_start)) {
struct rb_node *node;
node = rb_next(&entry->offset_index);
@@ -1864,10 +1923,27 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
offset_index);
continue;
}
- ret = entry->offset;
- entry->offset += bytes;
- entry->bytes -= bytes;
+ if (entry->bitmap) {
+ ret = btrfs_alloc_from_bitmap(block_group,
+ cluster, entry, bytes,
+ min_start);
+ if (ret == 0) {
+ struct rb_node *node;
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ break;
+ entry = rb_entry(node, struct btrfs_free_space,
+ offset_index);
+ continue;
+ }
+ } else {
+
+ ret = entry->offset;
+
+ entry->offset += bytes;
+ entry->bytes -= bytes;
+ }
if (entry->bytes == 0)
rb_erase(&entry->offset_index, &cluster->root);
@@ -1884,7 +1960,12 @@ out:
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
- kfree(entry);
+ if (entry->bitmap) {
+ kfree(entry->bitmap);
+ block_group->total_bitmaps--;
+ recalculate_thresholds(block_group);
+ }
+ kmem_cache_free(btrfs_free_space_cachep, entry);
}
spin_unlock(&block_group->tree_lock);
@@ -1904,12 +1985,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
unsigned long found_bits;
unsigned long start = 0;
unsigned long total_found = 0;
+ int ret;
bool found = false;
i = offset_to_bit(entry->offset, block_group->sectorsize,
max_t(u64, offset, entry->offset));
- search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
- total_bits = bytes_to_bits(bytes, block_group->sectorsize);
+ search_bits = bytes_to_bits(bytes, block_group->sectorsize);
+ total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
again:
found_bits = 0;
@@ -1926,7 +2008,7 @@ again:
}
if (!found_bits)
- return -1;
+ return -ENOSPC;
if (!found) {
start = i;
@@ -1950,189 +2032,208 @@ again:
cluster->window_start = start * block_group->sectorsize +
entry->offset;
- cluster->points_to_bitmap = true;
+ rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ ret = tree_insert_offset(&cluster->root, entry->offset,
+ &entry->offset_index, 1);
+ BUG_ON(ret);
return 0;
}
/*
- * here we try to find a cluster of blocks in a block group. The goal
- * is to find at least bytes free and up to empty_size + bytes free.
- * We might not find them all in one contiguous area.
- *
- * returns zero and sets up cluster if things worked out, otherwise
- * it returns -enospc
+ * This searches the block group for just extents to fill the cluster with.
*/
-int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_free_cluster *cluster,
- u64 offset, u64 bytes, u64 empty_size)
+static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space *first = NULL;
struct btrfs_free_space *entry = NULL;
+ struct btrfs_free_space *prev = NULL;
+ struct btrfs_free_space *last;
struct rb_node *node;
- struct btrfs_free_space *next;
- struct btrfs_free_space *last = NULL;
- u64 min_bytes;
u64 window_start;
u64 window_free;
- u64 max_extent = 0;
- bool found_bitmap = false;
- int ret;
+ u64 max_extent;
+ u64 max_gap = 128 * 1024;
- /* for metadata, allow allocates with more holes */
- if (btrfs_test_opt(root, SSD_SPREAD)) {
- min_bytes = bytes + empty_size;
- } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
- /*
- * we want to do larger allocations when we are
- * flushing out the delayed refs, it helps prevent
- * making more work as we go along.
- */
- if (trans->transaction->delayed_refs.flushing)
- min_bytes = max(bytes, (bytes + empty_size) >> 1);
- else
- min_bytes = max(bytes, (bytes + empty_size) >> 4);
- } else
- min_bytes = max(bytes, (bytes + empty_size) >> 2);
-
- spin_lock(&block_group->tree_lock);
- spin_lock(&cluster->lock);
-
- /* someone already found a cluster, hooray */
- if (cluster->block_group) {
- ret = 0;
- goto out;
- }
-again:
- entry = tree_search_offset(block_group, offset, found_bitmap, 1);
- if (!entry) {
- ret = -ENOSPC;
- goto out;
- }
+ entry = tree_search_offset(block_group, offset, 0, 1);
+ if (!entry)
+ return -ENOSPC;
/*
- * If found_bitmap is true, we exhausted our search for extent entries,
- * and we just want to search all of the bitmaps that we can find, and
- * ignore any extent entries we find.
+ * We don't want bitmaps, so just move along until we find a normal
+ * extent entry.
*/
- while (entry->bitmap || found_bitmap ||
- (!entry->bitmap && entry->bytes < min_bytes)) {
- struct rb_node *node = rb_next(&entry->offset_index);
-
- if (entry->bitmap && entry->bytes > bytes + empty_size) {
- ret = btrfs_bitmap_cluster(block_group, entry, cluster,
- offset, bytes + empty_size,
- min_bytes);
- if (!ret)
- goto got_it;
- }
-
- if (!node) {
- ret = -ENOSPC;
- goto out;
- }
+ while (entry->bitmap) {
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
entry = rb_entry(node, struct btrfs_free_space, offset_index);
}
- /*
- * We already searched all the extent entries from the passed in offset
- * to the end and didn't find enough space for the cluster, and we also
- * didn't find any bitmaps that met our criteria, just go ahead and exit
- */
- if (found_bitmap) {
- ret = -ENOSPC;
- goto out;
- }
-
- cluster->points_to_bitmap = false;
window_start = entry->offset;
window_free = entry->bytes;
- last = entry;
max_extent = entry->bytes;
+ first = entry;
+ last = entry;
+ prev = entry;
- while (1) {
- /* out window is just right, lets fill it */
- if (window_free >= bytes + empty_size)
- break;
-
- node = rb_next(&last->offset_index);
- if (!node) {
- if (found_bitmap)
- goto again;
- ret = -ENOSPC;
- goto out;
- }
- next = rb_entry(node, struct btrfs_free_space, offset_index);
+ while (window_free <= min_bytes) {
+ node = rb_next(&entry->offset_index);
+ if (!node)
+ return -ENOSPC;
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
- /*
- * we found a bitmap, so if this search doesn't result in a
- * cluster, we know to go and search again for the bitmaps and
- * start looking for space there
- */
- if (next->bitmap) {
- if (!found_bitmap)
- offset = next->offset;
- found_bitmap = true;
- last = next;
+ if (entry->bitmap)
continue;
- }
-
/*
* we haven't filled the empty size and the window is
* very large. reset and try again
*/
- if (next->offset - (last->offset + last->bytes) > 128 * 1024 ||
- next->offset - window_start > (bytes + empty_size) * 2) {
- entry = next;
+ if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
+ entry->offset - window_start > (min_bytes * 2)) {
+ first = entry;
window_start = entry->offset;
window_free = entry->bytes;
last = entry;
max_extent = entry->bytes;
} else {
- last = next;
- window_free += next->bytes;
+ last = entry;
+ window_free += entry->bytes;
if (entry->bytes > max_extent)
max_extent = entry->bytes;
}
+ prev = entry;
}
- cluster->window_start = entry->offset;
+ cluster->window_start = first->offset;
+
+ node = &first->offset_index;
/*
* now we've found our entries, pull them out of the free space
* cache and put them into the cluster rbtree
- *
- * The cluster includes an rbtree, but only uses the offset index
- * of each free space cache entry.
*/
- while (1) {
+ do {
+ int ret;
+
+ entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
- if (entry->bitmap && node) {
- entry = rb_entry(node, struct btrfs_free_space,
- offset_index);
+ if (entry->bitmap)
continue;
- } else if (entry->bitmap && !node) {
- break;
- }
rb_erase(&entry->offset_index, &block_group->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
BUG_ON(ret);
+ } while (node && entry != last);
- if (!node || entry == last)
- break;
+ cluster->max_size = max_extent;
+
+ return 0;
+}
+
+/*
+ * This specifically looks for bitmaps that may work in the cluster, we assume
+ * that we have already failed to find extents that will work.
+ */
+static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 min_bytes)
+{
+ struct btrfs_free_space *entry;
+ struct rb_node *node;
+ int ret = -ENOSPC;
+
+ if (block_group->total_bitmaps == 0)
+ return -ENOSPC;
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group, offset),
+ 0, 1);
+ if (!entry)
+ return -ENOSPC;
+
+ node = &entry->offset_index;
+ do {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ node = rb_next(&entry->offset_index);
+ if (!entry->bitmap)
+ continue;
+ if (entry->bytes < min_bytes)
+ continue;
+ ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
+ bytes, min_bytes);
+ } while (ret && node);
+
+ return ret;
+}
+
+/*
+ * here we try to find a cluster of blocks in a block group. The goal
+ * is to find at least bytes free and up to empty_size + bytes free.
+ * We might not find them all in one contiguous area.
+ *
+ * returns zero and sets up cluster if things worked out, otherwise
+ * it returns -enospc
+ */
+int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_free_cluster *cluster,
+ u64 offset, u64 bytes, u64 empty_size)
+{
+ u64 min_bytes;
+ int ret;
+
+ /* for metadata, allow allocates with more holes */
+ if (btrfs_test_opt(root, SSD_SPREAD)) {
+ min_bytes = bytes + empty_size;
+ } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
+ /*
+ * we want to do larger allocations when we are
+ * flushing out the delayed refs, it helps prevent
+ * making more work as we go along.
+ */
+ if (trans->transaction->delayed_refs.flushing)
+ min_bytes = max(bytes, (bytes + empty_size) >> 1);
+ else
+ min_bytes = max(bytes, (bytes + empty_size) >> 4);
+ } else
+ min_bytes = max(bytes, (bytes + empty_size) >> 2);
+
+ spin_lock(&block_group->tree_lock);
+
+ /*
+ * If we know we don't have enough space to make a cluster don't even
+ * bother doing all the work to try and find one.
+ */
+ if (block_group->free_space < min_bytes) {
+ spin_unlock(&block_group->tree_lock);
+ return -ENOSPC;
}
- cluster->max_size = max_extent;
-got_it:
- ret = 0;
- atomic_inc(&block_group->count);
- list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
- cluster->block_group = block_group;
+ spin_lock(&cluster->lock);
+
+ /* someone already found a cluster, hooray */
+ if (cluster->block_group) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
+ min_bytes);
+ if (ret)
+ ret = setup_cluster_bitmap(block_group, cluster, offset,
+ bytes, min_bytes);
+
+ if (!ret) {
+ atomic_inc(&block_group->count);
+ list_add_tail(&cluster->block_group_list,
+ &block_group->cluster_list);
+ cluster->block_group = block_group;
+ }
out:
spin_unlock(&cluster->lock);
spin_unlock(&block_group->tree_lock);
@@ -2149,8 +2250,99 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
spin_lock_init(&cluster->refill_lock);
cluster->root = RB_ROOT;
cluster->max_size = 0;
- cluster->points_to_bitmap = false;
INIT_LIST_HEAD(&cluster->block_group_list);
cluster->block_group = NULL;
}
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+ u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+ struct btrfs_free_space *entry = NULL;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ u64 bytes = 0;
+ u64 actually_trimmed;
+ int ret = 0;
+
+ *trimmed = 0;
+
+ while (start < end) {
+ spin_lock(&block_group->tree_lock);
+
+ if (block_group->free_space < minlen) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+
+ entry = tree_search_offset(block_group, start, 0, 1);
+ if (!entry)
+ entry = tree_search_offset(block_group,
+ offset_to_bitmap(block_group,
+ start),
+ 1, 1);
+
+ if (!entry || entry->offset >= end) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+
+ if (entry->bitmap) {
+ ret = search_bitmap(block_group, entry, &start, &bytes);
+ if (!ret) {
+ if (start >= end) {
+ spin_unlock(&block_group->tree_lock);
+ break;
+ }
+ bytes = min(bytes, end - start);
+ bitmap_clear_bits(block_group, entry,
+ start, bytes);
+ if (entry->bytes == 0)
+ free_bitmap(block_group, entry);
+ } else {
+ start = entry->offset + BITS_PER_BITMAP *
+ block_group->sectorsize;
+ spin_unlock(&block_group->tree_lock);
+ ret = 0;
+ continue;
+ }
+ } else {
+ start = entry->offset;
+ bytes = min(entry->bytes, end - start);
+ unlink_free_space(block_group, entry);
+ kfree(entry);
+ }
+
+ spin_unlock(&block_group->tree_lock);
+
+ if (bytes >= minlen) {
+ int update_ret;
+ update_ret = btrfs_update_reserved_bytes(block_group,
+ bytes, 1, 1);
+
+ ret = btrfs_error_discard_extent(fs_info->extent_root,
+ start,
+ bytes,
+ &actually_trimmed);
+
+ btrfs_add_free_space(block_group,
+ start, bytes);
+ if (!update_ret)
+ btrfs_update_reserved_bytes(block_group,
+ bytes, 0, 1);
+
+ if (ret)
+ break;
+ *trimmed += actually_trimmed;
+ }
+ start += bytes;
+ bytes = 0;
+
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index e49ca5c321b5..65c3b935289f 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -68,4 +68,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
int btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster);
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+ u64 *trimmed, u64 start, u64 end, u64 minlen);
#endif
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c56eb5909172..c05a08f4c411 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -30,7 +30,8 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
int slot;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
search_key.type = -1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 512c3d1da083..5cc64ab9c485 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -50,6 +50,7 @@
#include "tree-log.h"
#include "compression.h"
#include "locking.h"
+#include "free-space-cache.h"
struct btrfs_iget_args {
u64 ino;
@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
+struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -82,7 +84,8 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
-static void btrfs_truncate(struct inode *inode);
+static int btrfs_setsize(struct inode *inode, loff_t newsize);
+static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
@@ -109,6 +112,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
+ int compress_type,
struct page **compressed_pages)
{
struct btrfs_key key;
@@ -123,12 +127,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
size_t cur_size = size;
size_t datasize;
unsigned long offset;
- int compress_type = BTRFS_COMPRESS_NONE;
- if (compressed_size && compressed_pages) {
- compress_type = root->fs_info->compress_type;
+ if (compressed_size && compressed_pages)
cur_size = compressed_size;
- }
path = btrfs_alloc_path();
if (!path)
@@ -218,7 +219,7 @@ fail:
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end,
- size_t compressed_size,
+ size_t compressed_size, int compress_type,
struct page **compressed_pages)
{
u64 isize = i_size_read(inode);
@@ -251,7 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
- compressed_pages);
+ compress_type, compressed_pages);
BUG_ON(ret);
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
@@ -288,6 +289,7 @@ static noinline int add_async_extent(struct async_cow *cow,
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
+ BUG_ON(!async_extent);
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
@@ -382,9 +384,11 @@ again:
*/
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
(btrfs_test_opt(root, COMPRESS) ||
- (BTRFS_I(inode)->force_compress))) {
+ (BTRFS_I(inode)->force_compress) ||
+ (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ BUG_ON(!pages);
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
@@ -427,12 +431,13 @@ again:
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(trans, root, inode,
- start, end, 0, NULL);
+ start, end, 0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end,
- total_compressed, pages);
+ total_compressed,
+ compress_type, pages);
}
if (ret == 0) {
/*
@@ -786,7 +791,7 @@ static noinline int cow_file_range(struct inode *inode,
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
- start, end, 0, NULL);
+ start, end, 0, 0, NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
@@ -1254,7 +1259,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS) &&
- !(BTRFS_I(inode)->force_compress))
+ !(BTRFS_I(inode)->force_compress) &&
+ !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else
@@ -1461,8 +1467,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
- } else if (!skip_sum)
- btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ } else if (!skip_sum) {
+ ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
+ if (ret)
+ return ret;
+ }
goto mapit;
} else if (!skip_sum) {
/* csum items have already been cloned */
@@ -1785,6 +1794,8 @@ out:
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
+ trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
+
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
@@ -1895,10 +1906,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
else
rw = READ;
- BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
+ ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
failrec->last_mirror,
failrec->bio_flags, 0);
- return 0;
+ return ret;
}
/*
@@ -2210,8 +2221,6 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
insert = 1;
#endif
insert = 1;
- } else {
- WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
}
if (!BTRFS_I(inode)->orphan_meta_reserved) {
@@ -2282,7 +2291,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
-void btrfs_orphan_cleanup(struct btrfs_root *root)
+int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -2292,10 +2301,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
- return;
+ return 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
@@ -2304,18 +2316,16 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0) {
- printk(KERN_ERR "Error searching slot for orphan: %d"
- "\n", ret);
- break;
- }
+ if (ret < 0)
+ goto out;
/*
* if ret == 0 means we found what we were searching for, which
- * is weird, but possible, so only screw with path if we didnt
+ * is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -2343,7 +2353,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
- BUG_ON(IS_ERR(inode));
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
/*
* add this inode to the orphan list so btrfs_orphan_del does
@@ -2361,7 +2374,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (is_bad_inode(inode)) {
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
btrfs_orphan_del(trans, inode);
btrfs_end_transaction(trans, root);
iput(inode);
@@ -2370,17 +2386,22 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
+ if (!S_ISREG(inode->i_mode)) {
+ WARN_ON(1);
+ iput(inode);
+ continue;
+ }
nr_truncate++;
- btrfs_truncate(inode);
+ ret = btrfs_truncate(inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
+ if (ret)
+ goto out;
}
- btrfs_free_path(path);
-
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
@@ -2389,14 +2410,20 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
- btrfs_end_transaction(trans, root);
+ if (!IS_ERR(trans))
+ btrfs_end_transaction(trans, root);
}
if (nr_unlink)
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
if (nr_truncate)
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
+
+out:
+ if (ret)
+ printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
+ btrfs_free_path(path);
+ return ret;
}
/*
@@ -2635,10 +2662,10 @@ failed:
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
-int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *dir, struct inode *inode,
- const char *name, int name_len)
+static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, struct inode *inode,
+ const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
@@ -2710,12 +2737,25 @@ err:
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
btrfs_update_inode(trans, root, dir);
- btrfs_drop_nlink(inode);
- ret = btrfs_update_inode(trans, root, inode);
out:
return ret;
}
+int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, struct inode *inode,
+ const char *name, int name_len)
+{
+ int ret;
+ ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
+ if (!ret) {
+ btrfs_drop_nlink(inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ }
+ return ret;
+}
+
+
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
struct btrfs_path *path)
@@ -3537,7 +3577,13 @@ out:
return ret;
}
-int btrfs_cont_expand(struct inode *inode, loff_t size)
+/*
+ * This function puts in dummy file extents for the area we're creating a hole
+ * for. So if we are truncating this file to a larger size we need to insert
+ * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
+ * the range between oldsize and size
+ */
+int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3545,7 +3591,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 mask = root->sectorsize - 1;
- u64 hole_start = (inode->i_size + mask) & ~mask;
+ u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
u64 last_byte;
u64 cur_offset;
@@ -3590,13 +3636,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
&hint_byte, 1);
- BUG_ON(err);
+ if (err)
+ break;
err = btrfs_insert_file_extent(trans, root,
inode->i_ino, cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
- BUG_ON(err);
+ if (err)
+ break;
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
@@ -3616,81 +3664,41 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
return err;
}
-static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
+static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
- unsigned long nr;
+ loff_t oldsize = i_size_read(inode);
int ret;
- if (attr->ia_size == inode->i_size)
+ if (newsize == oldsize)
return 0;
- if (attr->ia_size > inode->i_size) {
- unsigned long limit;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (attr->ia_size > inode->i_sb->s_maxbytes)
- return -EFBIG;
- if (limit != RLIM_INFINITY && attr->ia_size > limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- }
-
- trans = btrfs_start_transaction(root, 5);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- btrfs_set_trans_block_group(trans, inode);
-
- ret = btrfs_orphan_add(trans, inode);
- BUG_ON(ret);
-
- nr = trans->blocks_used;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
-
- if (attr->ia_size > inode->i_size) {
- ret = btrfs_cont_expand(inode, attr->ia_size);
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ truncate_pagecache(inode, oldsize, newsize);
+ ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) {
- btrfs_truncate(inode);
+ btrfs_setsize(inode, oldsize);
return ret;
}
- i_size_write(inode, attr->ia_size);
- btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ mark_inode_dirty(inode);
+ } else {
- trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
- btrfs_set_trans_block_group(trans, inode);
- trans->block_rsv = root->orphan_block_rsv;
- BUG_ON(!trans->block_rsv);
+ /*
+ * We're truncating a file that used to have good data down to
+ * zero. Make sure it gets into the ordered flush list so that
+ * any new writes get down to disk quickly.
+ */
+ if (newsize == 0)
+ BTRFS_I(inode)->ordered_data_close = 1;
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
- if (inode->i_nlink > 0) {
- ret = btrfs_orphan_del(trans, inode);
- BUG_ON(ret);
- }
- nr = trans->blocks_used;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
- return 0;
+ /* we don't support swapfiles, so vmtruncate shouldn't fail */
+ truncate_setsize(inode, newsize);
+ ret = btrfs_truncate(inode);
}
- /*
- * We're truncating a file that used to have good data down to
- * zero. Make sure it gets into the ordered flush list so that
- * any new writes get down to disk quickly.
- */
- if (attr->ia_size == 0)
- BTRFS_I(inode)->ordered_data_close = 1;
-
- /* we don't support swapfiles, so vmtruncate shouldn't fail */
- ret = vmtruncate(inode, attr->ia_size);
- BUG_ON(ret);
-
- return 0;
+ return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
@@ -3707,7 +3715,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- err = btrfs_setattr_size(inode, attr);
+ err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
@@ -3730,6 +3738,8 @@ void btrfs_evict_inode(struct inode *inode)
unsigned long nr;
int ret;
+ trace_btrfs_inode_evict(inode);
+
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
root == root->fs_info->tree_root))
@@ -4072,7 +4082,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
-
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
@@ -4147,8 +4156,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
- btrfs_orphan_cleanup(sub_root);
+ ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
+ if (ret)
+ inode = ERR_PTR(ret);
}
return inode;
@@ -4282,6 +4293,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
while (di_cur < di_total) {
struct btrfs_key location;
+ if (verify_dir_item(root, leaf, di))
+ break;
+
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
@@ -4517,6 +4531,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
return ERR_PTR(-ENOMEM);
if (dir) {
+ trace_btrfs_inode_request(dir);
+
ret = btrfs_set_inode_index(dir, index);
if (ret) {
iput(inode);
@@ -4585,12 +4601,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if ((mode & S_IFREG)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(root, NODATACOW) ||
+ (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
insert_inode_hash(inode);
inode_tree_add(inode);
+
+ trace_btrfs_inode_new(inode);
+
return inode;
fail:
if (dir)
@@ -4809,7 +4829,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
- return -EPERM;
+ return -EXDEV;
+
+ if (inode->i_nlink == ~0U)
+ return -EMLINK;
btrfs_inc_nlink(inode);
inode->i_ctime = CURRENT_TIME;
@@ -5265,6 +5288,9 @@ insert:
}
write_unlock(&em_tree->lock);
out:
+
+ trace_btrfs_get_extent(root, em);
+
if (path)
btrfs_free_path(path);
if (trans) {
@@ -5748,6 +5774,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
kfree(dip->csums);
kfree(dip);
+
+ /* If we had a csum failure make sure to clear the uptodate flag */
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
@@ -5849,6 +5879,10 @@ out_done:
kfree(dip->csums);
kfree(dip);
+
+ /* If we had an error make sure to clear the uptodate flag */
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
@@ -5922,9 +5956,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
- } else if (!skip_sum)
- btrfs_lookup_bio_sums_dio(root, inode, bio,
+ } else if (!skip_sum) {
+ ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
+ if (ret)
+ goto err;
+ }
ret = btrfs_map_bio(root, rw, bio, 0, 1);
err:
@@ -5948,6 +5985,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
+ int write = rw & REQ_WRITE;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
@@ -5984,7 +6022,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
goto out_err;
}
- if (!skip_sum)
+ /* Write's use the ordered csums */
+ if (!write && !skip_sum)
csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
@@ -6052,7 +6091,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
}
dip->csums = NULL;
- if (!skip_sum) {
+ /* Write's use the ordered csum stuff, so we don't need dip->csums */
+ if (!write && !skip_sum) {
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
if (!dip->csums) {
kfree(dip);
@@ -6474,28 +6514,42 @@ out:
return ret;
}
-static void btrfs_truncate(struct inode *inode)
+static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
+ int err = 0;
struct btrfs_trans_handle *trans;
unsigned long nr;
u64 mask = root->sectorsize - 1;
- if (!S_ISREG(inode->i_mode)) {
- WARN_ON(1);
- return;
- }
-
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
- return;
+ return ret;
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ trans = btrfs_start_transaction(root, 5);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_orphan_add(trans, inode);
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+ /* Now start a transaction for the truncate */
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = root->orphan_block_rsv;
@@ -6522,29 +6576,38 @@ static void btrfs_truncate(struct inode *inode)
while (1) {
if (!trans) {
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = root->orphan_block_rsv;
}
ret = btrfs_block_rsv_check(trans, root,
root->orphan_block_rsv, 0, 5);
- if (ret) {
- BUG_ON(ret != -EAGAIN);
+ if (ret == -EAGAIN) {
ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
trans = NULL;
continue;
+ } else if (ret) {
+ err = ret;
+ break;
}
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
- if (ret != -EAGAIN)
+ if (ret != -EAGAIN) {
+ err = ret;
break;
+ }
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ break;
+ }
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
@@ -6554,16 +6617,27 @@ static void btrfs_truncate(struct inode *inode)
if (ret == 0 && inode->i_nlink > 0) {
ret = btrfs_orphan_del(trans, inode);
- BUG_ON(ret);
+ if (ret)
+ err = ret;
+ } else if (ret && inode->i_nlink > 0) {
+ /*
+ * Failed to do the truncate, remove us from the in memory
+ * orphan list.
+ */
+ ret = btrfs_orphan_del(NULL, inode);
}
ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ if (ret && !err)
+ err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
- BUG_ON(ret);
+ if (ret && !err)
+ err = ret;
btrfs_btree_balance_dirty(root, nr);
+
+ return err;
}
/*
@@ -6630,9 +6704,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
- spin_lock_init(&ei->accounting_lock);
atomic_set(&ei->outstanding_extents, 0);
- ei->reserved_extents = 0;
+ atomic_set(&ei->reserved_extents, 0);
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
@@ -6668,7 +6741,7 @@ void btrfs_destroy_inode(struct inode *inode)
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
- WARN_ON(BTRFS_I(inode)->reserved_extents);
+ WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
/*
* This can happen where we create an inode, but somebody else also
@@ -6760,6 +6833,8 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
+ if (btrfs_free_space_cachep)
+ kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
@@ -6788,6 +6863,12 @@ int btrfs_init_cachep(void)
if (!btrfs_path_cachep)
goto fail;
+ btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
+ sizeof(struct btrfs_free_space), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_free_space_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();
@@ -6806,6 +6887,26 @@ static int btrfs_getattr(struct vfsmount *mnt,
return 0;
}
+/*
+ * If a file is moved, it will inherit the cow and compression flags of the new
+ * directory.
+ */
+static void fixup_inode_flags(struct inode *dir, struct inode *inode)
+{
+ struct btrfs_inode *b_dir = BTRFS_I(dir);
+ struct btrfs_inode *b_inode = BTRFS_I(inode);
+
+ if (b_dir->flags & BTRFS_INODE_NODATACOW)
+ b_inode->flags |= BTRFS_INODE_NODATACOW;
+ else
+ b_inode->flags &= ~BTRFS_INODE_NODATACOW;
+
+ if (b_dir->flags & BTRFS_INODE_COMPRESS)
+ b_inode->flags |= BTRFS_INODE_COMPRESS;
+ else
+ b_inode->flags &= ~BTRFS_INODE_COMPRESS;
+}
+
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -6854,8 +6955,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 20);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_notrans;
+ }
btrfs_set_trans_block_group(trans, new_dir);
@@ -6908,11 +7011,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
- btrfs_inc_nlink(old_dentry->d_inode);
- ret = btrfs_unlink_inode(trans, root, old_dir,
- old_dentry->d_inode,
- old_dentry->d_name.name,
- old_dentry->d_name.len);
+ ret = __btrfs_unlink_inode(trans, root, old_dir,
+ old_dentry->d_inode,
+ old_dentry->d_name.name,
+ old_dentry->d_name.len);
+ if (!ret)
+ ret = btrfs_update_inode(trans, root, old_inode);
}
BUG_ON(ret);
@@ -6939,6 +7043,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ fixup_inode_flags(new_dir, old_inode);
+
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
@@ -6952,7 +7058,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
out_fail:
btrfs_end_transaction_throttle(trans, root);
-
+out_notrans:
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
@@ -7340,7 +7446,6 @@ static const struct address_space_operations btrfs_aops = {
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
- .sync_page = block_sync_page,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
@@ -7356,7 +7461,6 @@ static const struct address_space_operations btrfs_symlink_aops = {
};
static const struct inode_operations btrfs_file_inode_operations = {
- .truncate = btrfs_truncate,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5fdb2abc4fa7..cfc264fefdb0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -40,6 +40,7 @@
#include <linux/xattr.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -138,6 +139,24 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
return 0;
}
+static int check_flags(unsigned int flags)
+{
+ if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NOATIME_FL | FS_NODUMP_FL | \
+ FS_SYNC_FL | FS_DIRSYNC_FL | \
+ FS_NOCOMP_FL | FS_COMPR_FL | \
+ FS_NOCOW_FL | FS_COW_FL))
+ return -EOPNOTSUPP;
+
+ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
+ return -EINVAL;
+
+ if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
+ return -EINVAL;
+
+ return 0;
+}
+
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
@@ -153,12 +172,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
- if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
- FS_NOATIME_FL | FS_NODUMP_FL | \
- FS_SYNC_FL | FS_DIRSYNC_FL))
- return -EOPNOTSUPP;
+ ret = check_flags(flags);
+ if (ret)
+ return ret;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
mutex_lock(&inode->i_mutex);
@@ -201,6 +219,22 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
+ /*
+ * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
+ * flag may be changed automatically if compression code won't make
+ * things smaller.
+ */
+ if (flags & FS_NOCOMP_FL) {
+ ip->flags &= ~BTRFS_INODE_COMPRESS;
+ ip->flags |= BTRFS_INODE_NOCOMPRESS;
+ } else if (flags & FS_COMPR_FL) {
+ ip->flags |= BTRFS_INODE_COMPRESS;
+ ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ }
+ if (flags & FS_NOCOW_FL)
+ ip->flags |= BTRFS_INODE_NODATACOW;
+ else if (flags & FS_COW_FL)
+ ip->flags &= ~BTRFS_INODE_NODATACOW;
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
@@ -213,9 +247,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
+
+ ret = 0;
out_unlock:
mutex_unlock(&inode->i_mutex);
- return 0;
+ return ret;
}
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
@@ -225,6 +261,49 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
return put_user(inode->i_generation, arg);
}
+static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_device *device;
+ struct request_queue *q;
+ struct fstrim_range range;
+ u64 minlen = ULLONG_MAX;
+ u64 num_devices = 0;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+ if (!device->bdev)
+ continue;
+ q = bdev_get_queue(device->bdev);
+ if (blk_queue_discard(q)) {
+ num_devices++;
+ minlen = min((u64)q->limits.discard_granularity,
+ minlen);
+ }
+ }
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ if (!num_devices)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&range, arg, sizeof(range)))
+ return -EFAULT;
+
+ range.minlen = max(range.minlen, minlen);
+ ret = btrfs_trim_fs(root, &range);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &range, sizeof(range)))
+ return -EFAULT;
+
+ return 0;
+}
+
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
char *name, int namelen,
@@ -294,6 +373,10 @@ static noinline int create_subvol(struct btrfs_root *root,
inode_item->nbytes = cpu_to_le64(root->leafsize);
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
+ root_item.flags = 0;
+ root_item.byte_limit = 0;
+ inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
+
btrfs_set_root_bytenr(&root_item, leaf->start);
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
@@ -409,7 +492,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
if (ret)
goto fail;
- btrfs_orphan_cleanup(pending_snapshot->snap);
+ ret = btrfs_orphan_cleanup(pending_snapshot->snap);
+ if (ret)
+ goto fail;
parent = dget_parent(dentry);
inode = btrfs_lookup_dentry(parent->d_inode, dentry);
@@ -1077,7 +1162,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (flags & ~BTRFS_SUBVOL_RDONLY)
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
down_write(&root->fs_info->subvol_sem);
@@ -2348,12 +2433,17 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root;
struct btrfs_trans_handle *trans;
u64 transid;
+ int ret;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
transid = trans->transid;
- btrfs_commit_transaction_async(trans, root, 0);
+ ret = btrfs_commit_transaction_async(trans, root, 0);
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
if (argp)
if (copy_to_user(argp, &transid, sizeof(transid)))
@@ -2388,6 +2478,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_setflags(file, argp);
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
+ case FITRIM:
+ return btrfs_ioctl_fitrim(file, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SNAP_CREATE_V2:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 083a55477375..a1c940425307 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list);
+ trace_btrfs_ordered_extent_add(inode, entry);
+
spin_lock(&tree->lock);
node = tree_insert(&tree->tree, file_offset,
&entry->rb_node);
@@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
struct list_head *cur;
struct btrfs_ordered_sum *sum;
+ trace_btrfs_ordered_extent_put(entry->inode, entry);
+
if (atomic_dec_and_test(&entry->refs)) {
while (!list_empty(&entry->list)) {
cur = entry->list.next;
@@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&entry->root_extent_list);
+ trace_btrfs_ordered_extent_remove(inode, entry);
+
/*
* we have no more ordered extents for this inode and
* no dirty pages. We can safely remove it from the
@@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode,
u64 start = entry->file_offset;
u64 end = start + entry->len - 1;
+ trace_btrfs_ordered_extent_start(inode, entry);
+
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 31ade5802ae8..199a80134312 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1724,6 +1724,7 @@ again:
eb = read_tree_block(dest, old_bytenr, blocksize,
old_ptr_gen);
+ BUG_ON(!eb);
btrfs_tree_lock(eb);
if (cow) {
ret = btrfs_cow_block(trans, dest, eb, parent,
@@ -2345,7 +2346,7 @@ struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
root = next->root;
BUG_ON(!root);
- /* no other choice for non-refernce counted tree */
+ /* no other choice for non-references counted tree */
if (!root->ref_cows)
return root;
@@ -2513,6 +2514,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
blocksize = btrfs_level_size(root, node->level);
generation = btrfs_node_ptr_generation(upper->eb, slot);
eb = read_tree_block(root, bytenr, blocksize, generation);
+ if (!eb) {
+ err = -EIO;
+ goto next;
+ }
btrfs_tree_lock(eb);
btrfs_set_lock_blocking(eb);
@@ -2670,6 +2675,7 @@ static int get_tree_block_key(struct reloc_control *rc,
BUG_ON(block->key_ready);
eb = read_tree_block(rc->extent_root, block->bytenr,
block->key.objectid, block->key.offset);
+ BUG_ON(!eb);
WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
@@ -4209,7 +4215,7 @@ out:
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
else
- btrfs_orphan_cleanup(fs_root);
+ err = btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6a1086e83ffc..6928bff62daa 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -88,7 +88,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
search_key.offset = (u64)-1;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
goto out;
@@ -332,7 +333,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *leaf;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, -1, 1);
if (ret < 0)
goto out;
@@ -471,3 +473,21 @@ again:
btrfs_free_path(path);
return 0;
}
+
+/*
+ * Old btrfs forgets to init root_item->flags and root_item->byte_limit
+ * for subvolumes. To work around this problem, we steal a bit from
+ * root_item->inode_item->flags, and use it to indicate if those fields
+ * have been properly initialized.
+ */
+void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
+{
+ u64 inode_flags = le64_to_cpu(root_item->inode.flags);
+
+ if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) {
+ inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT;
+ root_item->inode.flags = cpu_to_le64(inode_flags);
+ root_item->flags = 0;
+ root_item->byte_limit = 0;
+ }
+}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d39a9895d932..58e7de9cc90c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -52,6 +52,9 @@
#include "export.h"
#include "compression.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/btrfs.h>
+
static const struct super_operations btrfs_super_ops;
static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
@@ -620,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_root *root = btrfs_sb(sb);
int ret;
+ trace_btrfs_sync_fs(wait);
+
if (!wait) {
filemap_flush(root->fs_info->btree_inode->i_mapping);
return 0;
@@ -639,6 +644,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
struct btrfs_fs_info *info = root->fs_info;
+ char *compress_type;
if (btrfs_test_opt(root, DEGRADED))
seq_puts(seq, ",degraded");
@@ -657,8 +663,16 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (info->thread_pool_size != min_t(unsigned long,
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
- if (btrfs_test_opt(root, COMPRESS))
- seq_puts(seq, ",compress");
+ if (btrfs_test_opt(root, COMPRESS)) {
+ if (info->compress_type == BTRFS_COMPRESS_ZLIB)
+ compress_type = "zlib";
+ else
+ compress_type = "lzo";
+ if (btrfs_test_opt(root, FORCE_COMPRESS))
+ seq_printf(seq, ",compress-force=%s", compress_type);
+ else
+ seq_printf(seq, ",compress=%s", compress_type);
+ }
if (btrfs_test_opt(root, NOSSD))
seq_puts(seq, ",nossd");
if (btrfs_test_opt(root, SSD_SPREAD))
@@ -673,6 +687,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
+ if (btrfs_test_opt(root, SPACE_CACHE))
+ seq_puts(seq, ",space_cache");
+ if (btrfs_test_opt(root, CLEAR_CACHE))
+ seq_puts(seq, ",clear_cache");
+ if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ seq_puts(seq, ",user_subvol_rm_allowed");
return 0;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3d73c8d93bbb..5b158da7e0bb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -57,7 +57,8 @@ static noinline int join_transaction(struct btrfs_root *root)
if (!cur_trans) {
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
GFP_NOFS);
- BUG_ON(!cur_trans);
+ if (!cur_trans)
+ return -ENOMEM;
root->fs_info->generation++;
cur_trans->num_writers = 1;
cur_trans->num_joined = 0;
@@ -195,7 +196,12 @@ again:
wait_current_trans(root);
ret = join_transaction(root);
- BUG_ON(ret);
+ if (ret < 0) {
+ kmem_cache_free(btrfs_trans_handle_cachep, h);
+ if (type != TRANS_JOIN_NOLOCK)
+ mutex_unlock(&root->fs_info->trans_mutex);
+ return ERR_PTR(ret);
+ }
cur_trans = root->fs_info->running_transaction;
cur_trans->use_count++;
@@ -970,6 +976,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
+ btrfs_check_and_init_root_item(new_root_item);
root_flags = btrfs_root_flags(new_root_item);
if (pending->readonly)
@@ -1156,7 +1163,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans;
ac = kmalloc(sizeof(*ac), GFP_NOFS);
- BUG_ON(!ac);
+ if (!ac)
+ return -ENOMEM;
INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root;
@@ -1389,6 +1397,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
put_transaction(cur_trans);
put_transaction(cur_trans);
+ trace_btrfs_transaction_commit(root);
+
mutex_unlock(&root->fs_info->trans_mutex);
if (current->journal_info == trans)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a4bbb854dfd2..c50271ad3157 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -799,12 +799,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
struct inode *dir;
int ret;
struct btrfs_inode_ref *ref;
- struct btrfs_dir_item *di;
struct inode *inode;
char *name;
int namelen;
unsigned long ref_ptr;
unsigned long ref_end;
+ int search_done = 0;
/*
* it is possible that we didn't log all the parent directories
@@ -845,7 +845,10 @@ again:
* existing back reference, and we don't want to create
* dangling pointers in the directory.
*/
-conflict_again:
+
+ if (search_done)
+ goto insert;
+
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
if (ret == 0) {
char *victim_name;
@@ -886,37 +889,21 @@ conflict_again:
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
- kfree(victim_name);
- btrfs_release_path(root, path);
- goto conflict_again;
}
kfree(victim_name);
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
}
BUG_ON(ret);
- }
- btrfs_release_path(root, path);
-
- /* look for a conflicting sequence number */
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- btrfs_inode_ref_index(eb, ref),
- name, namelen, 0);
- if (di && !IS_ERR(di)) {
- ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
- }
- btrfs_release_path(root, path);
-
- /* look for a conflicting name */
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
- name, namelen, 0);
- if (di && !IS_ERR(di)) {
- ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ /*
+ * NOTE: we have searched root tree and checked the
+ * coresponding ref, it does not need to check again.
+ */
+ search_done = 1;
}
btrfs_release_path(root, path);
+insert:
/* insert our name */
ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
btrfs_inode_ref_index(eb, ref));
@@ -1286,6 +1273,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
+ if (verify_dir_item(root, eb, di))
+ return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
BUG_ON(ret);
@@ -1412,6 +1401,11 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
+ if (verify_dir_item(root, eb, di)) {
+ ret = -EIO;
+ goto out;
+ }
+
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
@@ -1821,7 +1815,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
int orig_level;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
level = btrfs_header_level(log->node);
orig_level = level;
@@ -3107,9 +3102,11 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
.stage = 0,
};
- fs_info->log_root_recovering = 1;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
+ fs_info->log_root_recovering = 1;
trans = btrfs_start_transaction(fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
@@ -3117,7 +3114,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
wc.trans = trans;
wc.pin = 1;
- walk_log_tree(trans, log_root_tree, &wc);
+ ret = walk_log_tree(trans, log_root_tree, &wc);
+ BUG_ON(ret);
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3141,8 +3139,7 @@ again:
log = btrfs_read_fs_root_no_radix(log_root_tree,
&found_key);
- BUG_ON(!log);
-
+ BUG_ON(IS_ERR(log));
tmp_key.objectid = found_key.offset;
tmp_key.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd13eb81ee40..309a57b9fc85 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -33,17 +33,6 @@
#include "volumes.h"
#include "async-thread.h"
-struct map_lookup {
- u64 type;
- int io_align;
- int io_width;
- int stripe_len;
- int sector_size;
- int num_stripes;
- int sub_stripes;
- struct btrfs_bio_stripe stripes[];
-};
-
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device);
@@ -162,7 +151,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
struct bio *cur;
int again = 0;
unsigned long num_run;
- unsigned long num_sync_run;
unsigned long batch_run = 0;
unsigned long limit;
unsigned long last_waited = 0;
@@ -173,11 +161,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
- /* we want to make sure that every time we switch from the sync
- * list to the normal list, we unplug
- */
- num_sync_run = 0;
-
loop:
spin_lock(&device->io_lock);
@@ -223,15 +206,6 @@ loop_lock:
spin_unlock(&device->io_lock);
- /*
- * if we're doing the regular priority list, make sure we unplug
- * for any high prio bios we've sent down
- */
- if (pending_bios == &device->pending_bios && num_sync_run > 0) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
-
while (pending) {
rmb();
@@ -259,19 +233,11 @@ loop_lock:
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
- if (cur->bi_rw & REQ_SYNC)
- num_sync_run++;
-
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
/*
* we made progress, there is more work to do and the bdi
@@ -304,13 +270,8 @@ loop_lock:
* against it before looping
*/
last_waited = ioc->last_waited;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
continue;
}
spin_lock(&device->io_lock);
@@ -323,22 +284,6 @@ loop_lock:
}
}
- if (num_sync_run) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
- /*
- * IO has already been through a long path to get here. Checksumming,
- * async helper threads, perhaps compression. We've done a pretty
- * good job of collecting a batch of IO and should just unplug
- * the device right away.
- *
- * This will help anyone who is waiting on the IO, they might have
- * already unplugged, but managed to do so before the bio they
- * cared about found its way down here.
- */
- blk_run_backing_dev(bdi, NULL);
-
cond_resched();
if (again)
goto loop;
@@ -1923,6 +1868,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
BUG_ON(ret);
+ trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
+
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
BUG_ON(ret);
@@ -2650,6 +2597,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
*num_bytes = chunk_bytes_by_type(type, calc_size,
map->num_stripes, sub_stripes);
+ trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
+
em = alloc_extent_map(GFP_NOFS);
if (!em) {
ret = -ENOMEM;
@@ -2758,6 +2707,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
item_size);
BUG_ON(ret);
}
+
kfree(chunk);
return 0;
}
@@ -2955,14 +2905,17 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret,
- int mirror_num, struct page *unplug_page)
+ int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
u64 offset;
u64 stripe_offset;
+ u64 stripe_end_offset;
u64 stripe_nr;
+ u64 stripe_nr_orig;
+ u64 stripe_nr_end;
int stripes_allocated = 8;
int stripes_required = 1;
int stripe_index;
@@ -2971,7 +2924,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
int max_errors = 0;
struct btrfs_multi_bio *multi = NULL;
- if (multi_ret && !(rw & REQ_WRITE))
+ if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
stripes_allocated = 1;
again:
if (multi_ret) {
@@ -2987,11 +2940,6 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page) {
- kfree(multi);
- return 0;
- }
-
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
(unsigned long long)logical,
@@ -3017,7 +2965,15 @@ again:
max_errors = 1;
}
}
- if (multi_ret && (rw & REQ_WRITE) &&
+ if (rw & REQ_DISCARD) {
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ stripes_required = map->num_stripes;
+ }
+ }
+ if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
stripes_allocated < stripes_required) {
stripes_allocated = map->num_stripes;
free_extent_map(em);
@@ -3037,23 +2993,37 @@ again:
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
- if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_DUP)) {
+ if (rw & REQ_DISCARD)
+ *length = min_t(u64, em->len - offset, *length);
+ else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_DUP)) {
/* we limit the length of each bio to what fits in a stripe */
*length = min_t(u64, em->len - offset,
- map->stripe_len - stripe_offset);
+ map->stripe_len - stripe_offset);
} else {
*length = em->len - offset;
}
- if (!multi_ret && !unplug_page)
+ if (!multi_ret)
goto out;
num_stripes = 1;
stripe_index = 0;
- if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (unplug_page || (rw & REQ_WRITE))
+ stripe_nr_orig = stripe_nr;
+ stripe_nr_end = (offset + *length + map->stripe_len - 1) &
+ (~(map->stripe_len - 1));
+ do_div(stripe_nr_end, map->stripe_len);
+ stripe_end_offset = stripe_nr_end * map->stripe_len -
+ (offset + *length);
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ if (rw & REQ_DISCARD)
+ num_stripes = min_t(u64, map->num_stripes,
+ stripe_nr_end - stripe_nr_orig);
+ stripe_index = do_div(stripe_nr, map->num_stripes);
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
+ if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -3064,7 +3034,7 @@ again:
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (rw & REQ_WRITE)
+ if (rw & (REQ_WRITE | REQ_DISCARD))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -3075,8 +3045,12 @@ again:
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
- if (unplug_page || (rw & REQ_WRITE))
+ if (rw & REQ_WRITE)
num_stripes = map->sub_stripes;
+ else if (rw & REQ_DISCARD)
+ num_stripes = min_t(u64, map->sub_stripes *
+ (stripe_nr_end - stripe_nr_orig),
+ map->num_stripes);
else if (mirror_num)
stripe_index += mirror_num - 1;
else {
@@ -3094,24 +3068,101 @@ again:
}
BUG_ON(stripe_index >= map->num_stripes);
- for (i = 0; i < num_stripes; i++) {
- if (unplug_page) {
- struct btrfs_device *device;
- struct backing_dev_info *bdi;
-
- device = map->stripes[stripe_index].dev;
- if (device->bdev) {
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, unplug_page);
- }
- } else {
+ if (rw & REQ_DISCARD) {
+ for (i = 0; i < num_stripes; i++) {
multi->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
multi->stripes[i].dev = map->stripes[stripe_index].dev;
+
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+ u64 stripes;
+ u32 last_stripe = 0;
+ int j;
+
+ div_u64_rem(stripe_nr_end - 1,
+ map->num_stripes,
+ &last_stripe);
+
+ for (j = 0; j < map->num_stripes; j++) {
+ u32 test;
+
+ div_u64_rem(stripe_nr_end - 1 - j,
+ map->num_stripes, &test);
+ if (test == stripe_index)
+ break;
+ }
+ stripes = stripe_nr_end - 1 - j;
+ do_div(stripes, map->num_stripes);
+ multi->stripes[i].length = map->stripe_len *
+ (stripes - stripe_nr + 1);
+
+ if (i == 0) {
+ multi->stripes[i].length -=
+ stripe_offset;
+ stripe_offset = 0;
+ }
+ if (stripe_index == last_stripe)
+ multi->stripes[i].length -=
+ stripe_end_offset;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+ u64 stripes;
+ int j;
+ int factor = map->num_stripes /
+ map->sub_stripes;
+ u32 last_stripe = 0;
+
+ div_u64_rem(stripe_nr_end - 1,
+ factor, &last_stripe);
+ last_stripe *= map->sub_stripes;
+
+ for (j = 0; j < factor; j++) {
+ u32 test;
+
+ div_u64_rem(stripe_nr_end - 1 - j,
+ factor, &test);
+
+ if (test ==
+ stripe_index / map->sub_stripes)
+ break;
+ }
+ stripes = stripe_nr_end - 1 - j;
+ do_div(stripes, factor);
+ multi->stripes[i].length = map->stripe_len *
+ (stripes - stripe_nr + 1);
+
+ if (i < map->sub_stripes) {
+ multi->stripes[i].length -=
+ stripe_offset;
+ if (i == map->sub_stripes - 1)
+ stripe_offset = 0;
+ }
+ if (stripe_index >= last_stripe &&
+ stripe_index <= (last_stripe +
+ map->sub_stripes - 1)) {
+ multi->stripes[i].length -=
+ stripe_end_offset;
+ }
+ } else
+ multi->stripes[i].length = *length;
+
+ stripe_index++;
+ if (stripe_index == map->num_stripes) {
+ /* This could only happen for RAID0/10 */
+ stripe_index = 0;
+ stripe_nr++;
+ }
+ }
+ } else {
+ for (i = 0; i < num_stripes; i++) {
+ multi->stripes[i].physical =
+ map->stripes[stripe_index].physical +
+ stripe_offset +
+ stripe_nr * map->stripe_len;
+ multi->stripes[i].dev =
+ map->stripes[stripe_index].dev;
+ stripe_index++;
}
- stripe_index++;
}
if (multi_ret) {
*multi_ret = multi;
@@ -3128,7 +3179,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
struct btrfs_multi_bio **multi_ret, int mirror_num)
{
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
- mirror_num, NULL);
+ mirror_num);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3196,14 +3247,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
return 0;
}
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
- u64 logical, struct page *page)
-{
- u64 length = PAGE_CACHE_SIZE;
- return __btrfs_map_block(map_tree, READ, logical, &length,
- NULL, 0, page);
-}
-
static void end_bio_multi_stripe(struct bio *bio, int err)
{
struct btrfs_multi_bio *multi = bio->bi_private;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 7fb59d45fe8c..cc2eadaf7a27 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -126,6 +126,7 @@ struct btrfs_fs_devices {
struct btrfs_bio_stripe {
struct btrfs_device *dev;
u64 physical;
+ u64 length; /* only used for discard mappings */
};
struct btrfs_multi_bio {
@@ -145,6 +146,17 @@ struct btrfs_device_info {
u64 max_avail;
};
+struct map_lookup {
+ u64 type;
+ int io_align;
+ int io_width;
+ int stripe_len;
+ int sector_size;
+ int num_stripes;
+ int sub_stripes;
+ struct btrfs_bio_stripe stripes[];
+};
+
/* Used to sort the devices by max_avail(descending sort) */
int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index d779cefcfd7d..a5303b871b13 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -242,6 +242,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
break;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ if (verify_dir_item(root, leaf, di))
+ continue;
name_len = btrfs_dir_name_len(leaf, di);
total_size += name_len + 1;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index f5ec2d44150d..faccd47c6c46 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -57,7 +57,8 @@ static struct list_head *zlib_alloc_workspace(void)
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize(
+ MAX_WBITS, MAX_MEM_LEVEL));
workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!workspace->def_strm.workspace ||
diff --git a/fs/buffer.c b/fs/buffer.c
index 2219a76e2caf..a08bb8e61c6f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
}
EXPORT_SYMBOL(init_buffer);
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
{
- struct block_device *bd;
- struct buffer_head *bh
- = container_of(word, struct buffer_head, b_state);
-
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
- wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__wait_on_buffer);
@@ -749,10 +741,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping, *prev_mapping = NULL;
+ struct address_space *mapping;
int err = 0, err2;
+ struct blk_plug plug;
INIT_LIST_HEAD(&tmp);
+ blk_start_plug(&plug);
spin_lock(lock);
while (!list_empty(list)) {
@@ -775,7 +769,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older
* contents.
*/
- write_dirty_buffer(bh, WRITE_SYNC_PLUG);
+ write_dirty_buffer(bh, WRITE_SYNC);
/*
* Kick off IO for the previous mapping. Note
@@ -783,16 +777,16 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
- if (prev_mapping && prev_mapping != mapping)
- blk_run_address_space(prev_mapping);
- prev_mapping = mapping;
-
brelse(bh);
spin_lock(lock);
}
}
}
+ spin_unlock(lock);
+ blk_finish_plug(&plug);
+ spin_lock(lock);
+
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
get_bh(bh);
@@ -1144,7 +1138,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
* inode list.
*
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and the global inode_lock.
+ * mapping->tree_lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
{
@@ -1614,14 +1608,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* prevents this contention from occurring.
*
* If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed. The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
@@ -1634,7 +1622,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
@@ -3138,17 +3126,6 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
-{
- struct address_space *mapping;
-
- smp_mb();
- mapping = page_mapping(page);
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 37fe101a4e0d..1064805e653b 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -197,7 +197,7 @@ struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
}
/*
- * update the auxilliary data for an object object on disk
+ * update the auxiliary data for an object object on disk
*/
static void cachefiles_update_object(struct fscache_object *_object)
{
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 561438b6a50c..e159c529fd2b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -24,7 +24,7 @@
* context needs to be associated with the osd write during writeback.
*
* Similarly, struct ceph_inode_info maintains a set of counters to
- * count dirty pages on the inode. In the absense of snapshots,
+ * count dirty pages on the inode. In the absence of snapshots,
* i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
*
* When a snapshot is taken (that is, when the client receives
@@ -92,7 +92,7 @@ static int ceph_set_page_dirty(struct page *page)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0)
- igrab(inode);
+ ihold(inode);
++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6b61ded701e1..5323c330bbf3 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -765,7 +765,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
if (touch) {
struct rb_node *q;
- /* touch this + preceeding caps */
+ /* touch this + preceding caps */
__touch_cap(cap);
for (q = rb_first(&ci->i_caps); q != p;
q = rb_next(q)) {
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 08f65faac112..0dba6915712b 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -210,8 +210,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_congestion_kb)
goto out;
- dout("a\n");
-
snprintf(name, sizeof(name), "../../bdi/%s",
dev_name(fsc->backing_dev_info.dev));
fsc->debugfs_bdi =
@@ -221,7 +219,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_bdi)
goto out;
- dout("b\n");
fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
0600,
fsc->client->debugfs_dir,
@@ -230,7 +227,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_mdsmap)
goto out;
- dout("ca\n");
fsc->debugfs_mdsc = debugfs_create_file("mdsc",
0600,
fsc->client->debugfs_dir,
@@ -239,7 +235,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_mdsc)
goto out;
- dout("da\n");
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
@@ -248,7 +243,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_caps)
goto out;
- dout("ea\n");
fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
0600,
fsc->client->debugfs_dir,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ebafa65a29b6..1a867a3601ae 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -161,7 +161,7 @@ more:
filp->f_pos = di->offset;
err = filldir(dirent, dentry->d_name.name,
dentry->d_name.len, di->offset,
- dentry->d_inode->i_ino,
+ ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
dentry->d_inode->i_mode >> 12);
if (last) {
@@ -245,15 +245,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
dout("readdir off 0 -> '.'\n");
if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
- inode->i_ino, inode->i_mode >> 12) < 0)
+ ceph_translate_ino(inode->i_sb, inode->i_ino),
+ inode->i_mode >> 12) < 0)
return 0;
filp->f_pos = 1;
off = 1;
}
if (filp->f_pos == 1) {
+ ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino;
dout("readdir off 1 -> '..'\n");
if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
- filp->f_dentry->d_parent->d_inode->i_ino,
+ ceph_translate_ino(inode->i_sb, ino),
inode->i_mode >> 12) < 0)
return 0;
filp->f_pos = 2;
@@ -377,7 +379,8 @@ more:
if (filldir(dirent,
rinfo->dir_dname[off - fi->offset],
rinfo->dir_dname_len[off - fi->offset],
- pos, ino, ftype) < 0) {
+ pos,
+ ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
dout("filldir stopping us...\n");
return 0;
}
@@ -1024,14 +1027,13 @@ out_touch:
}
/*
- * When a dentry is released, clear the dir I_COMPLETE if it was part
- * of the current dir gen or if this is in the snapshot namespace.
+ * Release our ceph_dentry_info.
*/
-static void ceph_dentry_release(struct dentry *dentry)
+static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- dout("dentry_release %p\n", dentry);
+ dout("d_release %p\n", dentry);
if (di) {
ceph_dentry_lru_del(dentry);
if (di->lease_session)
@@ -1256,14 +1258,14 @@ const struct inode_operations ceph_dir_iops = {
const struct dentry_operations ceph_dentry_ops = {
.d_revalidate = ceph_d_revalidate,
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
const struct dentry_operations ceph_snapdir_dentry_ops = {
.d_revalidate = ceph_snapdir_d_revalidate,
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
const struct dentry_operations ceph_snap_dentry_ops = {
- .d_release = ceph_dentry_release,
+ .d_release = ceph_d_release,
};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7d0e4a82d898..159b512d5a27 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -564,11 +564,19 @@ more:
* start_request so that a tid has been assigned.
*/
spin_lock(&ci->i_unsafe_lock);
- list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
+ list_add_tail(&req->r_unsafe_item,
+ &ci->i_unsafe_writes);
spin_unlock(&ci->i_unsafe_lock);
ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
}
+
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (ret < 0 && req->r_safe_callback) {
+ spin_lock(&ci->i_unsafe_lock);
+ list_del_init(&req->r_unsafe_item);
+ spin_unlock(&ci->i_unsafe_lock);
+ ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
+ }
}
if (file->f_flags & O_DIRECT)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 193bfa5e9cbd..b54c97da1c43 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -36,6 +36,13 @@ static void ceph_vmtruncate_work(struct work_struct *work);
/*
* find or create an inode, given the ceph ino number
*/
+static int ceph_set_ino_cb(struct inode *inode, void *data)
+{
+ ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
+ inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
+ return 0;
+}
+
struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
{
struct inode *inode;
@@ -1030,9 +1037,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
dout("fill_trace doing d_move %p -> %p\n",
req->r_old_dentry, dn);
- /* d_move screws up d_subdirs order */
- ceph_i_clear(dir, CEPH_I_COMPLETE);
-
d_move(req->r_old_dentry, dn);
dout(" src %p '%.*s' dst %p '%.*s'\n",
req->r_old_dentry,
@@ -1044,12 +1048,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(dn);
- /* take overwritten dentry's readdir offset */
- dout("dn %p gets %p offset %lld (old offset %lld)\n",
- req->r_old_dentry, dn, ceph_dentry(dn)->offset,
+ /*
+ * d_move() puts the renamed dentry at the end of
+ * d_subdirs. We need to assign it an appropriate
+ * directory offset so we can behave when holding
+ * I_COMPLETE.
+ */
+ ceph_set_dentry_offset(req->r_old_dentry);
+ dout("dn %p gets new offset %lld\n", req->r_old_dentry,
ceph_dentry(req->r_old_dentry)->offset);
- ceph_dentry(req->r_old_dentry)->offset =
- ceph_dentry(dn)->offset;
dn = req->r_old_dentry; /* use old_dentry */
in = dn->d_inode;
@@ -1809,7 +1816,7 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
if (!err) {
generic_fillattr(inode, stat);
- stat->ino = inode->i_ino;
+ stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
if (ceph_snap(inode) != CEPH_NOSNAP)
stat->dev = ceph_snap(inode);
else
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a1ee8fa3a8e7..f60b07b0feb0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3215,9 +3215,15 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
+ dout("mdsc_destroy %p\n", mdsc);
ceph_mdsc_stop(mdsc);
+
+ /* flush out any connection work with references to us */
+ ceph_msgr_flush();
+
fsc->mdsc = NULL;
kfree(mdsc);
+ dout("mdsc_destroy %p done\n", mdsc);
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f40b9139e437..e86ec1155f8f 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -342,7 +342,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
num = 0;
snapc->seq = realm->seq;
if (parent) {
- /* include any of parent's snaps occuring _after_ my
+ /* include any of parent's snaps occurring _after_ my
parent became my parent */
for (i = 0; i < parent->cached_context->num_snaps; i++)
if (parent->cached_context->snaps[i] >=
@@ -463,8 +463,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
capsnap, snapc);
- igrab(inode);
-
+ ihold(inode);
+
atomic_set(&capsnap->nref, 1);
capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9c5085465a63..f2f77fd3c14c 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -131,6 +131,7 @@ enum {
Opt_rbytes,
Opt_norbytes,
Opt_noasyncreaddir,
+ Opt_ino32,
};
static match_table_t fsopt_tokens = {
@@ -150,6 +151,7 @@ static match_table_t fsopt_tokens = {
{Opt_rbytes, "rbytes"},
{Opt_norbytes, "norbytes"},
{Opt_noasyncreaddir, "noasyncreaddir"},
+ {Opt_ino32, "ino32"},
{-1, NULL}
};
@@ -225,6 +227,9 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_noasyncreaddir:
fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
break;
+ case Opt_ino32:
+ fsopt->flags |= CEPH_MOUNT_OPT_INO32;
+ break;
default:
BUG_ON(token);
}
@@ -288,7 +293,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->sb_flags = flags;
fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
- fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
+ fsopt->rsize = CEPH_RSIZE_DEFAULT;
fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
@@ -348,7 +353,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (opt->name)
seq_printf(m, ",name=%s", opt->name);
- if (opt->secret)
+ if (opt->key)
seq_puts(m, ",secret=<hidden>");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
@@ -370,7 +375,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
- if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
+ if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20b907d76ae2..619fe719968f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -27,6 +27,7 @@
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
+#define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */
#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
@@ -35,6 +36,7 @@
#define ceph_test_mount_opt(fsc, opt) \
(!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
+#define CEPH_RSIZE_DEFAULT (512*1024) /* readahead */
#define CEPH_MAX_READDIR_DEFAULT 1024
#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
@@ -319,6 +321,16 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
return container_of(inode, struct ceph_inode_info, vfs_inode);
}
+static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
+{
+ return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
+}
+
+static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
+{
+ return (struct ceph_fs_client *)sb->s_fs_info;
+}
+
static inline struct ceph_vino ceph_vino(struct inode *inode)
{
return ceph_inode(inode)->i_vino;
@@ -327,19 +339,49 @@ static inline struct ceph_vino ceph_vino(struct inode *inode)
/*
* ino_t is <64 bits on many architectures, blech.
*
- * don't include snap in ino hash, at least for now.
+ * i_ino (kernel inode) st_ino (userspace)
+ * i386 32 32
+ * x86_64+ino32 64 32
+ * x86_64 64 64
+ */
+static inline u32 ceph_ino_to_ino32(ino_t ino)
+{
+ ino ^= ino >> (sizeof(ino) * 8 - 32);
+ if (!ino)
+ ino = 1;
+ return ino;
+}
+
+/*
+ * kernel i_ino value
*/
static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
{
ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
#if BITS_PER_LONG == 32
- ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
- if (!ino)
- ino = 1;
+ ino = ceph_ino_to_ino32(ino);
#endif
return ino;
}
+/*
+ * user-visible ino (stat, filldir)
+ */
+#if BITS_PER_LONG == 32
+static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
+{
+ return ino;
+}
+#else
+static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
+{
+ if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))
+ ino = ceph_ino_to_ino32(ino);
+ return ino;
+}
+#endif
+
+
/* for printf-style formatting */
#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
@@ -428,13 +470,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
return ((loff_t)frag << 32) | (loff_t)off;
}
-static inline int ceph_set_ino_cb(struct inode *inode, void *data)
-{
- ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
- inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
- return 0;
-}
-
/*
* caps helpers
*/
@@ -503,15 +538,6 @@ extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
-static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
-{
- return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
-}
-
-static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
-{
- return (struct ceph_fs_client *)sb->s_fs_info;
-}
/*
diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
index 7f7fa3c302af..ea940b1db77b 100644
--- a/fs/cifs/AUTHORS
+++ b/fs/cifs/AUTHORS
@@ -35,7 +35,7 @@ Adrian Bunk (kcalloc cleanups)
Miklos Szeredi
Kazeon team for various fixes especially for 2.4 version.
Asser Ferno (Change Notify support)
-Shaggy (Dave Kleikamp) for inumerable small fs suggestions and some good cleanup
+Shaggy (Dave Kleikamp) for innumerable small fs suggestions and some good cleanup
Gunter Kukkukk (testing and suggestions for support of old servers)
Igor Mammedov (DFS support)
Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 0a265ad9e426..2b68ac57d97d 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -53,7 +53,7 @@ void cifs_dfs_release_automount_timer(void)
*
* Extracts sharename form full UNC.
* i.e. strips from UNC trailing path that is not part of share
- * name and fixup missing '\' in the begining of DFS node refferal
+ * name and fixup missing '\' in the beginning of DFS node refferal
* if necessary.
* Returns pointer to share name on success or ERR_PTR on error.
* Caller is responsible for freeing returned string.
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 904aa47e3515..2644a5d6cc67 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -5247,7 +5247,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
* Samba server ignores set of file size to zero due to bugs in some
* older clients, but we should be precise - we use SetFileSize to
* set file size and do not want to truncate file size to zero
- * accidently as happened on one Samba server beta by putting
+ * accidentally as happened on one Samba server beta by putting
* zero instead of -1 here
*/
data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8d6c17ab593d..6e2b2addfc78 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1572,7 +1572,7 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
return false;
}
- /* now check if signing mode is acceptible */
+ /* now check if signing mode is acceptable */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
(server->secMode & SECMODE_SIGN_REQUIRED))
return false;
@@ -2933,7 +2933,7 @@ mount_fail_check:
if (mount_data != mount_data_global)
kfree(mount_data);
/* If find_unc succeeded then rc == 0 so we can not end */
- /* up accidently freeing someone elses tcon struct */
+ /* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd5f22918c33..9ea65cf36714 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -189,7 +189,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
- negotation. EREMOTE indicates DFS junction, which is not
+ negotiation. EREMOTE indicates DFS junction, which is not
handled in posix open */
if (rc == 0) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e964b1cd5dd0..c27d236738fc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
return rc;
}
-/* static void cifs_sync_page(struct page *page)
-{
- struct address_space *mapping;
- struct inode *inode;
- unsigned long index = page->index;
- unsigned int rpages = 0;
- int rc = 0;
-
- cFYI(1, "sync page %p", page);
- mapping = page->mapping;
- if (!mapping)
- return 0;
- inode = mapping->host;
- if (!inode)
- return; */
-
-/* fill in rpages then
- result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-
-/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
-
-#if 0
- if (rc < 0)
- return rc;
- return 0;
-#endif
-} */
-
/*
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
@@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
@@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index c6405ce3c50e..af56ad56a89a 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -13,7 +13,6 @@
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fs_table_header;
-#endif
static ctl_table coda_table[] = {
{
@@ -40,7 +39,6 @@ static ctl_table coda_table[] = {
{}
};
-#ifdef CONFIG_SYSCTL
static ctl_table fs_table[] = {
{
.procname = "coda",
@@ -49,22 +47,27 @@ static ctl_table fs_table[] = {
},
{}
};
-#endif
void coda_sysctl_init(void)
{
-#ifdef CONFIG_SYSCTL
if ( !fs_table_header )
fs_table_header = register_sysctl_table(fs_table);
-#endif
}
void coda_sysctl_clean(void)
{
-#ifdef CONFIG_SYSCTL
if ( fs_table_header ) {
unregister_sysctl_table(fs_table_header);
fs_table_header = NULL;
}
-#endif
}
+
+#else
+void coda_sysctl_init(void)
+{
+}
+
+void coda_sysctl_clean(void)
+{
+}
+#endif
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 90ff3cb10de3..3313dd19f543 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -990,7 +990,7 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
* This describes these functions and their helpers.
*
* Allow another kernel system to depend on a config_item. If this
- * happens, the item cannot go away until the dependant can live without
+ * happens, the item cannot go away until the dependent can live without
* it. The idea is to give client modules as simple an interface as
* possible. When a system asks them to depend on an item, they just
* call configfs_depend_item(). If the item is live and the client
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index c6bd815dc794..2f27e578d466 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -502,7 +502,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, s);
- if (!IS_ERR(dentry)) {
+ if (dentry) {
d_add(dentry, inode);
fsnotify_create(root->d_inode, dentry);
} else {
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dcb5577cde1d..ac5f164170e3 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED;
- if (ret != -EIOCBQUEUED) {
- /* All IO is now issued, send it on its way */
- blk_run_address_space(inode->i_mapping);
+ if (ret != -EIOCBQUEUED)
dio_await_completion(dio);
- }
/*
* Sync will always be dropping the final ref and completing the
@@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct dio *dio;
if (rw & WRITE)
- rw = WRITE_ODIRECT_PLUG;
+ rw = WRITE_ODIRECT;
if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 04b8c449303f..56d6bfcc1e48 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -519,7 +519,7 @@ static void toss_rsb(struct kref *kref)
}
}
-/* When all references to the rsb are gone it's transfered to
+/* When all references to the rsb are gone it's transferred to
the tossed list for later disposal. */
static void put_rsb(struct dlm_rsb *r)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index bffa1e73b9a9..5e2c71f05e46 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -810,7 +810,7 @@ static int tcp_accept_from_sock(struct connection *con)
/*
* Add it to the active queue in case we got data
- * beween processing the accept adding the socket
+ * between processing the accept adding the socket
* to the read_sockets list
*/
if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index eda43f362616..14638235f7b2 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -304,7 +304,7 @@ static void set_master_lkbs(struct dlm_rsb *r)
}
/*
- * Propogate the new master nodeid to locks
+ * Propagate the new master nodeid to locks
* The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
* The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
* rsb's to consider.
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 2195c213ab2f..98b77c89494c 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -8,6 +8,7 @@
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
+#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
@@ -16,20 +17,23 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
struct inode *inode, *toput_inode = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
- continue;
- if (inode->i_mapping->nrpages == 0)
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ (inode->i_mapping->nrpages == 0)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(toput_inode);
}
@@ -45,7 +49,11 @@ static void drop_slab(void)
int drop_caches_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, buffer, length, ppos);
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+ if (ret)
+ return ret;
if (write) {
if (sysctl_drop_caches & 1)
iterate_supers(drop_pagecache_sb, NULL);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index bfd8b680e648..d2a70a4561f9 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -266,7 +266,6 @@ void ecryptfs_destroy_mount_crypt_stat(
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
list_del(&auth_tok->mount_crypt_stat_list);
- mount_crypt_stat->num_global_auth_toks--;
if (auth_tok->global_auth_tok_key
&& !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
key_put(auth_tok->global_auth_tok_key);
@@ -1389,6 +1388,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = -ENOMEM;
goto out;
}
+ /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */
rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
ecryptfs_dentry);
if (unlikely(rc)) {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index e00753496e3e..bd3cafd0949d 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -233,7 +233,7 @@ ecryptfs_get_key_payload_data(struct key *key)
struct ecryptfs_key_sig {
struct list_head crypt_stat_list;
- char keysig[ECRYPTFS_SIG_SIZE_HEX];
+ char keysig[ECRYPTFS_SIG_SIZE_HEX + 1];
};
struct ecryptfs_filename {
@@ -257,19 +257,18 @@ struct ecryptfs_filename {
struct ecryptfs_crypt_stat {
#define ECRYPTFS_STRUCT_INITIALIZED 0x00000001
#define ECRYPTFS_POLICY_APPLIED 0x00000002
-#define ECRYPTFS_NEW_FILE 0x00000004
-#define ECRYPTFS_ENCRYPTED 0x00000008
-#define ECRYPTFS_SECURITY_WARNING 0x00000010
-#define ECRYPTFS_ENABLE_HMAC 0x00000020
-#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000040
-#define ECRYPTFS_KEY_VALID 0x00000080
-#define ECRYPTFS_METADATA_IN_XATTR 0x00000100
-#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200
-#define ECRYPTFS_KEY_SET 0x00000400
-#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000800
-#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
-#define ECRYPTFS_ENCFN_USE_FEK 0x00002000
-#define ECRYPTFS_UNLINK_SIGS 0x00004000
+#define ECRYPTFS_ENCRYPTED 0x00000004
+#define ECRYPTFS_SECURITY_WARNING 0x00000008
+#define ECRYPTFS_ENABLE_HMAC 0x00000010
+#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000020
+#define ECRYPTFS_KEY_VALID 0x00000040
+#define ECRYPTFS_METADATA_IN_XATTR 0x00000080
+#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000100
+#define ECRYPTFS_KEY_SET 0x00000200
+#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000400
+#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800
+#define ECRYPTFS_ENCFN_USE_FEK 0x00001000
+#define ECRYPTFS_UNLINK_SIGS 0x00002000
u32 flags;
unsigned int file_version;
size_t iv_bytes;
@@ -297,7 +296,6 @@ struct ecryptfs_inode_info {
struct inode vfs_inode;
struct inode *wii_inode;
struct file *lower_file;
- struct mutex lower_file_mutex;
struct ecryptfs_crypt_stat crypt_stat;
};
@@ -333,7 +331,6 @@ struct ecryptfs_global_auth_tok {
u32 flags;
struct list_head mount_crypt_stat_list;
struct key *global_auth_tok_key;
- struct ecryptfs_auth_tok *global_auth_tok;
unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
};
@@ -380,7 +377,6 @@ struct ecryptfs_mount_crypt_stat {
u32 flags;
struct list_head global_auth_tok_list;
struct mutex global_auth_tok_list_mutex;
- size_t num_global_auth_toks;
size_t global_default_cipher_key_size;
size_t global_default_fn_cipher_key_bytes;
unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 7d1050e254f9..cedc913d11ba 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -273,7 +273,14 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int
ecryptfs_fsync(struct file *file, int datasync)
{
- return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+ int rc = 0;
+
+ rc = generic_file_fsync(file, datasync);
+ if (rc)
+ goto out;
+ rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+out:
+ return rc;
}
static int ecryptfs_fasync(int fd, struct file *file, int flag)
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index b592938a84bc..f99051b7adab 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -143,26 +143,6 @@ out:
}
/**
- * grow_file
- * @ecryptfs_dentry: the eCryptfs dentry
- *
- * This is the code which will grow the file to its correct size.
- */
-static int grow_file(struct dentry *ecryptfs_dentry)
-{
- struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
- char zero_virt[] = { 0x00 };
- int rc = 0;
-
- rc = ecryptfs_write(ecryptfs_inode, zero_virt, 0, 1);
- i_size_write(ecryptfs_inode, 0);
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat.flags |=
- ECRYPTFS_NEW_FILE;
- return rc;
-}
-
-/**
* ecryptfs_initialize_file
*
* Cause the file to be changed from a basic empty file to an ecryptfs
@@ -181,7 +161,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
- crypt_stat->flags |= ECRYPTFS_NEW_FILE;
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
rc = ecryptfs_new_file_context(ecryptfs_dentry);
if (rc) {
@@ -202,9 +181,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
goto out;
}
- rc = grow_file(ecryptfs_dentry);
- if (rc)
- printk(KERN_ERR "Error growing file; rc = [%d]\n", rc);
out:
return rc;
}
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c1436cff6f2d..03e609c45012 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -65,6 +65,24 @@ static int process_request_key_err(long err_code)
return rc;
}
+static int process_find_global_auth_tok_for_sig_err(int err_code)
+{
+ int rc = err_code;
+
+ switch (err_code) {
+ case -ENOENT:
+ ecryptfs_printk(KERN_WARNING, "Missing auth tok\n");
+ break;
+ case -EINVAL:
+ ecryptfs_printk(KERN_WARNING, "Invalid auth tok\n");
+ break;
+ default:
+ rc = process_request_key_err(err_code);
+ break;
+ }
+ return rc;
+}
+
/**
* ecryptfs_parse_packet_length
* @data: Pointer to memory containing length at offset
@@ -403,27 +421,120 @@ out:
return rc;
}
+/**
+ * ecryptfs_verify_version
+ * @version: The version number to confirm
+ *
+ * Returns zero on good version; non-zero otherwise
+ */
+static int ecryptfs_verify_version(u16 version)
+{
+ int rc = 0;
+ unsigned char major;
+ unsigned char minor;
+
+ major = ((version >> 8) & 0xFF);
+ minor = (version & 0xFF);
+ if (major != ECRYPTFS_VERSION_MAJOR) {
+ ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
+ "Expected [%d]; got [%d]\n",
+ ECRYPTFS_VERSION_MAJOR, major);
+ rc = -EINVAL;
+ goto out;
+ }
+ if (minor != ECRYPTFS_VERSION_MINOR) {
+ ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
+ "Expected [%d]; got [%d]\n",
+ ECRYPTFS_VERSION_MINOR, minor);
+ rc = -EINVAL;
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * ecryptfs_verify_auth_tok_from_key
+ * @auth_tok_key: key containing the authentication token
+ * @auth_tok: authentication token
+ *
+ * Returns zero on valid auth tok; -EINVAL otherwise
+ */
+static int
+ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
+ struct ecryptfs_auth_tok **auth_tok)
+{
+ int rc = 0;
+
+ (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+ if (ecryptfs_verify_version((*auth_tok)->version)) {
+ printk(KERN_ERR "Data structure version mismatch. Userspace "
+ "tools must match eCryptfs kernel module with major "
+ "version [%d] and minor version [%d]\n",
+ ECRYPTFS_VERSION_MAJOR, ECRYPTFS_VERSION_MINOR);
+ rc = -EINVAL;
+ goto out;
+ }
+ if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
+ && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
+ printk(KERN_ERR "Invalid auth_tok structure "
+ "returned from key query\n");
+ rc = -EINVAL;
+ goto out;
+ }
+out:
+ return rc;
+}
+
static int
ecryptfs_find_global_auth_tok_for_sig(
- struct ecryptfs_global_auth_tok **global_auth_tok,
+ struct key **auth_tok_key,
+ struct ecryptfs_auth_tok **auth_tok,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig)
{
struct ecryptfs_global_auth_tok *walker;
int rc = 0;
- (*global_auth_tok) = NULL;
+ (*auth_tok_key) = NULL;
+ (*auth_tok) = NULL;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(walker,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
- if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX) == 0) {
- rc = key_validate(walker->global_auth_tok_key);
- if (!rc)
- (*global_auth_tok) = walker;
+ if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX))
+ continue;
+
+ if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) {
+ rc = -EINVAL;
goto out;
}
+
+ rc = key_validate(walker->global_auth_tok_key);
+ if (rc) {
+ if (rc == -EKEYEXPIRED)
+ goto out;
+ goto out_invalid_auth_tok;
+ }
+
+ down_write(&(walker->global_auth_tok_key->sem));
+ rc = ecryptfs_verify_auth_tok_from_key(
+ walker->global_auth_tok_key, auth_tok);
+ if (rc)
+ goto out_invalid_auth_tok_unlock;
+
+ (*auth_tok_key) = walker->global_auth_tok_key;
+ key_get(*auth_tok_key);
+ goto out;
}
- rc = -EINVAL;
+ rc = -ENOENT;
+ goto out;
+out_invalid_auth_tok_unlock:
+ up_write(&(walker->global_auth_tok_key->sem));
+out_invalid_auth_tok:
+ printk(KERN_WARNING "Invalidating auth tok with sig = [%s]\n", sig);
+ walker->flags |= ECRYPTFS_AUTH_TOK_INVALID;
+ key_put(walker->global_auth_tok_key);
+ walker->global_auth_tok_key = NULL;
out:
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
return rc;
@@ -451,14 +562,11 @@ ecryptfs_find_auth_tok_for_sig(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *sig)
{
- struct ecryptfs_global_auth_tok *global_auth_tok;
int rc = 0;
- (*auth_tok_key) = NULL;
- (*auth_tok) = NULL;
- if (ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok,
- mount_crypt_stat, sig)) {
-
+ rc = ecryptfs_find_global_auth_tok_for_sig(auth_tok_key, auth_tok,
+ mount_crypt_stat, sig);
+ if (rc == -ENOENT) {
/* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the
* mount_crypt_stat structure, we prevent to use auth toks that
* are not inserted through the ecryptfs_add_global_auth_tok
@@ -470,8 +578,7 @@ ecryptfs_find_auth_tok_for_sig(
rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok,
sig);
- } else
- (*auth_tok) = global_auth_tok->global_auth_tok;
+ }
return rc;
}
@@ -531,6 +638,16 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
}
s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
(*packet_size) = 0;
+ rc = ecryptfs_find_auth_tok_for_sig(
+ &auth_tok_key,
+ &s->auth_tok, mount_crypt_stat,
+ mount_crypt_stat->global_default_fnek_sig);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to find auth tok for "
+ "fnek sig [%s]; rc = [%d]\n", __func__,
+ mount_crypt_stat->global_default_fnek_sig, rc);
+ goto out;
+ }
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
&s->desc.tfm,
&s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
@@ -616,16 +733,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
goto out_free_unlock;
}
dest[s->i++] = s->cipher_code;
- rc = ecryptfs_find_auth_tok_for_sig(
- &auth_tok_key,
- &s->auth_tok, mount_crypt_stat,
- mount_crypt_stat->global_default_fnek_sig);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to find auth tok for "
- "fnek sig [%s]; rc = [%d]\n", __func__,
- mount_crypt_stat->global_default_fnek_sig, rc);
- goto out_free_unlock;
- }
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -765,8 +872,10 @@ out_free_unlock:
out_unlock:
mutex_unlock(s->tfm_mutex);
out:
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
kfree(s);
return rc;
}
@@ -879,6 +988,15 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
__func__, s->cipher_code);
goto out;
}
+ rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
+ &s->auth_tok, mount_crypt_stat,
+ s->fnek_sig_hex);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to find auth tok for "
+ "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
+ rc);
+ goto out;
+ }
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm,
&s->tfm_mutex,
s->cipher_string);
@@ -925,15 +1043,6 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
* >= ECRYPTFS_MAX_IV_BYTES. */
memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
s->desc.info = s->iv;
- rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
- &s->auth_tok, mount_crypt_stat,
- s->fnek_sig_hex);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to find auth tok for "
- "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
- rc);
- goto out_free_unlock;
- }
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -1002,8 +1111,10 @@ out:
(*filename_size) = 0;
(*filename) = NULL;
}
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
kfree(s);
return rc;
}
@@ -1520,38 +1631,6 @@ out:
return rc;
}
-/**
- * ecryptfs_verify_version
- * @version: The version number to confirm
- *
- * Returns zero on good version; non-zero otherwise
- */
-static int ecryptfs_verify_version(u16 version)
-{
- int rc = 0;
- unsigned char major;
- unsigned char minor;
-
- major = ((version >> 8) & 0xFF);
- minor = (version & 0xFF);
- if (major != ECRYPTFS_VERSION_MAJOR) {
- ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
- "Expected [%d]; got [%d]\n",
- ECRYPTFS_VERSION_MAJOR, major);
- rc = -EINVAL;
- goto out;
- }
- if (minor != ECRYPTFS_VERSION_MINOR) {
- ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
- "Expected [%d]; got [%d]\n",
- ECRYPTFS_VERSION_MINOR, minor);
- rc = -EINVAL;
- goto out;
- }
-out:
- return rc;
-}
-
int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
char *sig)
@@ -1563,31 +1642,16 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
printk(KERN_ERR "Could not find key with description: [%s]\n",
sig);
rc = process_request_key_err(PTR_ERR(*auth_tok_key));
+ (*auth_tok_key) = NULL;
goto out;
}
- (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key);
- if (ecryptfs_verify_version((*auth_tok)->version)) {
- printk(KERN_ERR
- "Data structure version mismatch. "
- "Userspace tools must match eCryptfs "
- "kernel module with major version [%d] "
- "and minor version [%d]\n",
- ECRYPTFS_VERSION_MAJOR,
- ECRYPTFS_VERSION_MINOR);
- rc = -EINVAL;
- goto out_release_key;
- }
- if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
- && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
- printk(KERN_ERR "Invalid auth_tok structure "
- "returned from key query\n");
- rc = -EINVAL;
- goto out_release_key;
- }
-out_release_key:
+ down_write(&(*auth_tok_key)->sem);
+ rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok);
if (rc) {
+ up_write(&(*auth_tok_key)->sem);
key_put(*auth_tok_key);
(*auth_tok_key) = NULL;
+ goto out;
}
out:
return rc;
@@ -1809,6 +1873,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
find_next_matching_auth_tok:
found_auth_tok = 0;
if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
auth_tok_key = NULL;
}
@@ -1895,8 +1960,10 @@ found_matching_auth_tok:
out_wipe_list:
wipe_auth_tok_list(&auth_tok_list);
out:
- if (auth_tok_key)
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
+ }
return rc;
}
@@ -2324,7 +2391,7 @@ ecryptfs_generate_key_packet_set(char *dest_base,
size_t max)
{
struct ecryptfs_auth_tok *auth_tok;
- struct ecryptfs_global_auth_tok *global_auth_tok;
+ struct key *auth_tok_key = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
@@ -2343,21 +2410,16 @@ ecryptfs_generate_key_packet_set(char *dest_base,
list_for_each_entry(key_sig, &crypt_stat->keysig_list,
crypt_stat_list) {
memset(key_rec, 0, sizeof(*key_rec));
- rc = ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok,
+ rc = ecryptfs_find_global_auth_tok_for_sig(&auth_tok_key,
+ &auth_tok,
mount_crypt_stat,
key_sig->keysig);
if (rc) {
- printk(KERN_ERR "Error attempting to get the global "
- "auth_tok; rc = [%d]\n", rc);
+ printk(KERN_WARNING "Unable to retrieve auth tok with "
+ "sig = [%s]\n", key_sig->keysig);
+ rc = process_find_global_auth_tok_for_sig_err(rc);
goto out_free;
}
- if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID) {
- printk(KERN_WARNING
- "Skipping invalid auth tok with sig = [%s]\n",
- global_auth_tok->sig);
- continue;
- }
- auth_tok = global_auth_tok->global_auth_tok;
if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
rc = write_tag_3_packet((dest_base + (*len)),
&max, auth_tok,
@@ -2395,6 +2457,9 @@ ecryptfs_generate_key_packet_set(char *dest_base,
rc = -EINVAL;
goto out_free;
}
+ up_write(&(auth_tok_key->sem));
+ key_put(auth_tok_key);
+ auth_tok_key = NULL;
}
if (likely(max > 0)) {
dest_base[(*len)] = 0x00;
@@ -2407,6 +2472,11 @@ out_free:
out:
if (rc)
(*len) = 0;
+ if (auth_tok_key) {
+ up_write(&(auth_tok_key->sem));
+ key_put(auth_tok_key);
+ }
+
mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
@@ -2424,6 +2494,7 @@ int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig)
return -ENOMEM;
}
memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX);
+ new_key_sig->keysig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
/* Caller must hold keysig_list_mutex */
list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list);
@@ -2453,7 +2524,6 @@ ecryptfs_add_global_auth_tok(struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_add(&new_auth_tok->mount_crypt_stat_list,
&mount_crypt_stat->global_auth_tok_list);
- mount_crypt_stat->num_global_auth_toks++;
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
out:
return rc;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 758323a0f09a..fdb2eb0ad09e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -122,7 +122,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
int rc = 0;
- mutex_lock(&inode_info->lower_file_mutex);
if (!inode_info->lower_file) {
struct dentry *lower_dentry;
struct vfsmount *lower_mnt =
@@ -138,7 +137,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
inode_info->lower_file = NULL;
}
}
- mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
@@ -241,14 +239,14 @@ static int ecryptfs_init_global_auth_toks(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
+ struct ecryptfs_auth_tok *auth_tok;
int rc = 0;
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
rc = ecryptfs_keyring_auth_tok_for_sig(
- &global_auth_tok->global_auth_tok_key,
- &global_auth_tok->global_auth_tok,
+ &global_auth_tok->global_auth_tok_key, &auth_tok,
global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Could not find valid key in user "
@@ -256,8 +254,10 @@ static int ecryptfs_init_global_auth_toks(
"option: [%s]\n", global_auth_tok->sig);
global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
goto out;
- } else
+ } else {
global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
+ up_write(&(global_auth_tok->global_auth_tok_key)->sem);
+ }
}
out:
return rc;
@@ -276,7 +276,7 @@ static void ecryptfs_init_mount_crypt_stat(
/**
* ecryptfs_parse_options
* @sb: The ecryptfs super block
- * @options: The options pased to the kernel
+ * @options: The options passed to the kernel
*
* Parse mount options:
* debug=N - ecryptfs_verbosity level for debug output
@@ -840,7 +840,7 @@ static int __init ecryptfs_init(void)
}
rc = ecryptfs_init_messaging();
if (rc) {
- printk(KERN_ERR "Failure occured while attempting to "
+ printk(KERN_ERR "Failure occurred while attempting to "
"initialize the communications channel to "
"ecryptfsd\n");
goto out_destroy_kthread;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cc64fca89f8d..6a44148c5fb9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -62,6 +62,18 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc;
+ /*
+ * Refuse to write the page out if we are called from reclaim context
+ * since our writepage() path may potentially allocate memory when
+ * calling into the lower fs vfs_write() which may in turn invoke
+ * us again.
+ */
+ if (current->flags & PF_MEMALLOC) {
+ redirty_page_for_writepage(wbc, page);
+ rc = 0;
+ goto out;
+ }
+
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting "
@@ -70,8 +82,8 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
goto out;
}
SetPageUptodate(page);
- unlock_page(page);
out:
+ unlock_page(page);
return rc;
}
@@ -193,11 +205,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
int rc = 0;
- if (!crypt_stat
- || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
- || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
- ecryptfs_printk(KERN_DEBUG,
- "Passing through unencrypted page\n");
+ if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
PAGE_CACHE_SIZE,
page->mapping->host);
@@ -295,8 +303,7 @@ static int ecryptfs_write_begin(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
- if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
- || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
+ if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_CACHE_SIZE, mapping->host);
if (rc) {
@@ -374,6 +381,11 @@ static int ecryptfs_write_begin(struct file *file,
&& (pos != 0))
zero_user(page, 0, PAGE_CACHE_SIZE);
out:
+ if (unlikely(rc)) {
+ unlock_page(page);
+ page_cache_release(page);
+ *pagep = NULL;
+ }
return rc;
}
@@ -486,13 +498,8 @@ static int ecryptfs_write_end(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc;
+ int need_unlock_page = 1;
- if (crypt_stat->flags & ECRYPTFS_NEW_FILE) {
- ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in "
- "crypt_stat at memory location [%p]\n", crypt_stat);
- crypt_stat->flags &= ~(ECRYPTFS_NEW_FILE);
- } else
- ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
@@ -512,26 +519,26 @@ static int ecryptfs_write_end(struct file *file,
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- rc = ecryptfs_encrypt_page(page);
- if (rc) {
- ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
- "index [0x%.16lx])\n", index);
- goto out;
- }
+ set_page_dirty(page);
+ unlock_page(page);
+ need_unlock_page = 0;
if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode));
+ balance_dirty_pages_ratelimited(mapping);
+ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+ if (rc) {
+ printk(KERN_ERR "Error writing inode size to metadata; "
+ "rc = [%d]\n", rc);
+ goto out;
+ }
}
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- if (rc)
- printk(KERN_ERR "Error writing inode size to metadata; "
- "rc = [%d]\n", rc);
- else
- rc = copied;
+ rc = copied;
out:
- unlock_page(page);
+ if (need_unlock_page)
+ unlock_page(page);
page_cache_release(page);
return rc;
}
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index db184ef15d3d..85d430963116 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -44,15 +44,11 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
ssize_t rc;
inode_info = ecryptfs_inode_to_private(ecryptfs_inode);
- mutex_lock(&inode_info->lower_file_mutex);
BUG_ON(!inode_info->lower_file);
- inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- rc = vfs_write(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_write(inode_info->lower_file, data, size, &offset);
set_fs(fs_save);
- mutex_unlock(&inode_info->lower_file_mutex);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
@@ -234,15 +230,11 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
mm_segment_t fs_save;
ssize_t rc;
- mutex_lock(&inode_info->lower_file_mutex);
BUG_ON(!inode_info->lower_file);
- inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- rc = vfs_read(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_read(inode_info->lower_file, data, size, &offset);
set_fs(fs_save);
- mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 3042fe123a34..bacc882e1ae4 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -55,7 +55,6 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
if (unlikely(!inode_info))
goto out;
ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
- mutex_init(&inode_info->lower_file_mutex);
inode_info->lower_file = NULL;
inode = &inode_info->vfs_inode;
out:
@@ -198,7 +197,7 @@ static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
const struct super_operations ecryptfs_sops = {
.alloc_inode = ecryptfs_alloc_inode,
.destroy_inode = ecryptfs_destroy_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = generic_drop_inode,
.statfs = ecryptfs_statfs,
.remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index a8e7797b9477..9c13412e6c99 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations efs_aops = {
.readpage = efs_readpage,
- .sync_page = block_sync_page,
.bmap = _efs_bmap
};
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ff12f7ac73ef..f9cfd168fbe2 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -181,7 +181,7 @@ struct eventpoll {
/*
* This is a single linked list that chains all the "struct epitem" that
- * happened while transfering ready events to userspace w/out
+ * happened while transferring ready events to userspace w/out
* holding ->lock.
*/
struct epitem *ovflist;
@@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls)
}
/**
+ * ep_events_available - Checks if ready events might be available.
+ *
+ * @ep: Pointer to the eventpoll context.
+ *
+ * Returns: Returns a value different than zero if ready events are available,
+ * or zero otherwise.
+ */
+static inline int ep_events_available(struct eventpoll *ep)
+{
+ return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
+}
+
+/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
* the same nested call (by the meaning of same cookie) is
@@ -593,7 +606,7 @@ static void ep_free(struct eventpoll *ep)
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
- * holding "epmutex" is sufficent here.
+ * holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
@@ -707,7 +720,7 @@ void eventpoll_release_file(struct file *file)
/*
* We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
- * cleanup path, and this means that noone is using this file anymore.
+ * cleanup path, and this means that no one is using this file anymore.
* So, for example, epoll_ctl() cannot hit here since if we reach this
* point, the file counter already went to zero and fget() would fail.
* The only hit might come from ep_free() but by holding the mutex
@@ -1099,7 +1112,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
- * availability. At this point, noone can insert
+ * availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
@@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms)
return timespec_add_safe(now, ts);
}
+/**
+ * ep_poll - Retrieves ready events, and delivers them to the caller supplied
+ * event buffer.
+ *
+ * @ep: Pointer to the eventpoll context.
+ * @events: Pointer to the userspace buffer where the ready events should be
+ * stored.
+ * @maxevents: Size (in terms of number of events) of the caller event buffer.
+ * @timeout: Maximum timeout for the ready events fetch operation, in
+ * milliseconds. If the @timeout is zero, the function will not block,
+ * while if the @timeout is less than zero, the function will block
+ * until at least one event has been retrieved (or an error
+ * occurred).
+ *
+ * Returns: Returns the number of ready events which have been fetched, or an
+ * error code, in case of error.
+ */
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
- int res, eavail, timed_out = 0;
+ int res = 0, eavail, timed_out = 0;
unsigned long flags;
- long slack;
+ long slack = 0;
wait_queue_t wait;
ktime_t expires, *to = NULL;
@@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
to = &expires;
*to = timespec_to_ktime(end_time);
} else if (timeout == 0) {
+ /*
+ * Avoid the unnecessary trip to the wait queue loop, if the
+ * caller specified a non blocking operation.
+ */
timed_out = 1;
+ spin_lock_irqsave(&ep->lock, flags);
+ goto check_events;
}
-retry:
+fetch_events:
spin_lock_irqsave(&ep->lock, flags);
- res = 0;
- if (list_empty(&ep->rdllist)) {
+ if (!ep_events_available(ep)) {
/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
@@ -1174,7 +1209,7 @@ retry:
* to TASK_INTERRUPTIBLE before doing the checks.
*/
set_current_state(TASK_INTERRUPTIBLE);
- if (!list_empty(&ep->rdllist) || timed_out)
+ if (ep_events_available(ep) || timed_out)
break;
if (signal_pending(current)) {
res = -EINTR;
@@ -1191,8 +1226,9 @@ retry:
set_current_state(TASK_RUNNING);
}
+check_events:
/* Is it worth to try to dig for events ? */
- eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
+ eavail = ep_events_available(ep);
spin_unlock_irqrestore(&ep->lock, flags);
@@ -1203,7 +1239,7 @@ retry:
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
- goto retry;
+ goto fetch_events;
return res;
}
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index f0d520312d8b..3bbd46956d77 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -53,10 +53,14 @@
#define EXOFS_ROOT_ID 0x10002 /* object ID for root directory */
/* exofs Application specific page/attribute */
+/* Inode attrs */
# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3)
# define EXOFS_ATTR_INODE_DATA 1
# define EXOFS_ATTR_INODE_FILE_LAYOUT 2
# define EXOFS_ATTR_INODE_DIR_LAYOUT 3
+/* Partition attrs */
+# define EXOFS_APAGE_SB_DATA (0xF0000000U + 3)
+# define EXOFS_ATTR_SB_STATS 1
/*
* The maximum number of files we can have is limited by the size of the
@@ -86,8 +90,8 @@ enum {
*/
enum {EXOFS_FSCB_VER = 1, EXOFS_DT_VER = 1};
struct exofs_fscb {
- __le64 s_nextid; /* Highest object ID used */
- __le64 s_numfiles; /* Number of files on fs */
+ __le64 s_nextid; /* Only used after mkfs */
+ __le64 s_numfiles; /* Only used after mkfs */
__le32 s_version; /* == EXOFS_FSCB_VER */
__le16 s_magic; /* Magic signature */
__le16 s_newfs; /* Non-zero if this is a new fs */
@@ -98,10 +102,20 @@ struct exofs_fscb {
} __packed;
/*
+ * This struct is set on the FS partition's attributes.
+ * [EXOFS_APAGE_SB_DATA, EXOFS_ATTR_SB_STATS] and is written together
+ * with the create command, to atomically persist the sb writeable information.
+ */
+struct exofs_sb_stats {
+ __le64 s_nextid; /* Highest object ID used */
+ __le64 s_numfiles; /* Number of files on fs */
+} __packed;
+
+/*
* Describes the raid used in the FS. It is part of the device table.
* This here is taken from the pNFS-objects definition. In exofs we
* use one raid policy through-out the filesystem. (NOTE: the funny
- * alignment at begining. We take care of it at exofs_device_table.
+ * alignment at beginning. We take care of it at exofs_device_table.
*/
struct exofs_dt_data_map {
__le32 cb_num_comps;
@@ -122,7 +136,7 @@ struct exofs_dt_device_info {
u8 systemid[OSD_SYSTEMID_LEN];
__le64 long_name_offset; /* If !0 then offset-in-file */
__le32 osdname_len; /* */
- u8 osdname[44]; /* Embbeded, Ususally an asci uuid */
+ u8 osdname[44]; /* Embbeded, Usually an asci uuid */
} __packed;
/*
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index dcc941d82d67..d0941c6a1f72 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -124,7 +124,7 @@ out:
Ebadsize:
EXOFS_ERR("ERROR [exofs_check_page]: "
- "size of directory #%lu is not a multiple of chunk size",
+ "size of directory(0x%lx) is not a multiple of chunk size\n",
dir->i_ino
);
goto fail;
@@ -142,8 +142,8 @@ Espan:
goto bad_entry;
bad_entry:
EXOFS_ERR(
- "ERROR [exofs_check_page]: bad entry in directory #%lu: %s - "
- "offset=%lu, inode=%llu, rec_len=%d, name_len=%d",
+ "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
+ "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
@@ -151,8 +151,8 @@ bad_entry:
Eend:
p = (struct exofs_dir_entry *)(kaddr + offs);
EXOFS_ERR("ERROR [exofs_check_page]: "
- "entry in directory #%lu spans the page boundary"
- "offset=%lu, inode=%llu",
+ "entry in directory(0x%lx) spans the page boundary"
+ "offset=%lu, inode=0x%llx\n",
dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)));
fail:
@@ -261,9 +261,8 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct page *page = exofs_get_page(inode, n);
if (IS_ERR(page)) {
- EXOFS_ERR("ERROR: "
- "bad page in #%lu",
- inode->i_ino);
+ EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
+ inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
return PTR_ERR(page);
}
@@ -283,7 +282,8 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
for (; (char *)de <= limit; de = exofs_next_entry(de)) {
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: "
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
exofs_put_page(page);
return -EIO;
}
@@ -342,9 +342,9 @@ struct exofs_dir_entry *exofs_find_entry(struct inode *dir,
kaddr += exofs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- EXOFS_ERR(
- "ERROR: exofs_find_entry: "
- "zero-length directory entry");
+ EXOFS_ERR("ERROR: zero-length entry in "
+ "directory(0x%lx)\n",
+ dir->i_ino);
exofs_put_page(page);
goto out;
}
@@ -472,7 +472,8 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
}
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: exofs_add_link: "
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
err = -EIO;
goto out_unlock;
}
@@ -491,7 +492,8 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
exofs_put_page(page);
}
- EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=%p", dentry, inode);
+ EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=0x%lx\n",
+ dentry, inode->i_ino);
return -EINVAL;
got_it:
@@ -542,7 +544,8 @@ int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page)
while (de < dir) {
if (de->rec_len == 0) {
EXOFS_ERR("ERROR: exofs_delete_entry:"
- "zero-length directory entry");
+ "zero-length entry in directory(0x%lx)\n",
+ inode->i_ino);
err = -EIO;
goto out;
}
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 2dc925fa1010..c965806c2821 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -77,7 +77,7 @@ struct exofs_layout {
* our extension to the in-memory superblock
*/
struct exofs_sb_info {
- struct exofs_fscb s_fscb; /* Written often, pre-allocate*/
+ struct exofs_sb_stats s_ess; /* Written often, pre-allocate*/
int s_timeout; /* timeout for OSD operations */
uint64_t s_nextid; /* highest object ID used */
uint32_t s_numfiles; /* number of files on fs */
@@ -256,6 +256,8 @@ static inline int exofs_oi_read(struct exofs_i_info *oi,
}
/* inode.c */
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -279,7 +281,7 @@ int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
/* super.c */
-int exofs_sync_fs(struct super_block *sb, int wait);
+int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
/*********************
* operation vectors *
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index b905c79b4f0a..45ca323d8363 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -45,22 +45,8 @@ static int exofs_release_file(struct inode *inode, struct file *filp)
static int exofs_file_fsync(struct file *filp, int datasync)
{
int ret;
- struct inode *inode = filp->f_mapping->host;
- struct super_block *sb;
-
- if (!(inode->i_state & I_DIRTY))
- return 0;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- return 0;
-
- ret = sync_inode_metadata(inode, 1);
-
- /* This is a good place to write the sb */
- /* TODO: Sechedule an sb-sync on create */
- sb = inode->i_sb;
- if (sb->s_dirt)
- exofs_sync_fs(sb, 1);
+ ret = sync_inode_metadata(filp->f_mapping->host, 1);
return ret;
}
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index a7555238c41a..8472c098445d 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -43,6 +43,17 @@ enum { BIO_MAX_PAGES_KMALLOC =
PAGE_SIZE / sizeof(struct page *),
};
+unsigned exofs_max_io_pages(struct exofs_layout *layout,
+ unsigned expected_pages)
+{
+ unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+
+ /* TODO: easily support bio chaining */
+ pages = min_t(unsigned, pages,
+ layout->group_width * BIO_MAX_PAGES_KMALLOC);
+ return pages;
+}
+
struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
@@ -97,8 +108,7 @@ static void _pcol_reset(struct page_collect *pcol)
static int pcol_try_alloc(struct page_collect *pcol)
{
- unsigned pages = min_t(unsigned, pcol->expected_pages,
- MAX_PAGES_KMALLOC);
+ unsigned pages;
if (!pcol->ios) { /* First time allocate io_state */
int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
@@ -108,8 +118,7 @@ static int pcol_try_alloc(struct page_collect *pcol)
}
/* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages,
- pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC);
+ pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
for (; pages; pages >>= 1) {
pcol->pages = kmalloc(pages * sizeof(struct page *),
@@ -350,8 +359,10 @@ static int readpage_strip(void *data, struct page *page)
if (!pcol->read_4_write)
unlock_page(page);
- EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
- " splitting\n", inode->i_ino, page->index);
+ EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
+ "read_4_write=%d index=0x%lx end_index=0x%lx "
+ "splitting\n", inode->i_ino, len,
+ pcol->read_4_write, page->index, end_index);
return read_exec(pcol);
}
@@ -722,11 +733,28 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
/* read modify write */
if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+ loff_t i_size = i_size_read(mapping->host);
+ pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ size_t rlen;
+
+ if (page->index < end_index)
+ rlen = PAGE_CACHE_SIZE;
+ else if (page->index == end_index)
+ rlen = i_size & ~PAGE_CACHE_MASK;
+ else
+ rlen = 0;
+
+ if (!rlen) {
+ clear_highpage(page);
+ SetPageUptodate(page);
+ goto out;
+ }
+
ret = _readpage(page, true);
if (ret) {
/*SetPageError was done by _readpage. Is it ok?*/
unlock_page(page);
- EXOFS_DBGMSG("__readpage_filler failed\n");
+ EXOFS_DBGMSG("__readpage failed\n");
}
}
out:
@@ -795,7 +823,6 @@ const struct address_space_operations exofs_aops = {
.direct_IO = NULL, /* TODO: Should be trivial to do */
/* With these NULL has special meaning or default is not exported */
- .sync_page = NULL,
.get_xip_mem = NULL,
.migratepage = NULL,
.launder_page = NULL,
@@ -1030,6 +1057,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1073,6 +1101,7 @@ int __exofs_wait_obj_created(struct exofs_i_info *oi)
}
return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
}
+
/*
* Callback function from exofs_new_inode(). The important thing is that we
* set the obj_created flag so that other methods know that the object exists on
@@ -1130,7 +1159,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
sbi = sb->s_fs_info;
- sb->s_dirt = 1;
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
inode->i_blkbits = EXOFS_BLKSHIFT;
@@ -1141,6 +1170,8 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
spin_unlock(&sbi->s_next_gen_lock);
insert_inode_hash(inode);
+ exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
+
mark_inode_dirty(inode);
ret = exofs_get_io_state(&sbi->layout, &ios);
@@ -1271,7 +1302,8 @@ out:
int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+ /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
+ return exofs_update_inode(inode, 1);
}
/*
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8c6c4669b381..06065bd37fc3 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -48,6 +48,7 @@
* struct to hold what we get from mount options
*/
struct exofs_mountopt {
+ bool is_osdname;
const char *dev_name;
uint64_t pid;
int timeout;
@@ -56,7 +57,7 @@ struct exofs_mountopt {
/*
* exofs-specific mount-time options.
*/
-enum { Opt_pid, Opt_to, Opt_mkfs, Opt_format, Opt_err };
+enum { Opt_name, Opt_pid, Opt_to, Opt_err };
/*
* Our mount-time options. These should ideally be 64-bit unsigned, but the
@@ -64,6 +65,7 @@ enum { Opt_pid, Opt_to, Opt_mkfs, Opt_format, Opt_err };
* sufficient for most applications now.
*/
static match_table_t tokens = {
+ {Opt_name, "osdname=%s"},
{Opt_pid, "pid=%u"},
{Opt_to, "to=%u"},
{Opt_err, NULL}
@@ -94,6 +96,14 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
token = match_token(p, tokens, args);
switch (token) {
+ case Opt_name:
+ opts->dev_name = match_strdup(&args[0]);
+ if (unlikely(!opts->dev_name)) {
+ EXOFS_ERR("Error allocating dev_name");
+ return -ENOMEM;
+ }
+ opts->is_osdname = true;
+ break;
case Opt_pid:
if (0 == match_strlcpy(str, &args[0], sizeof(str)))
return -EINVAL;
@@ -203,6 +213,101 @@ static void destroy_inodecache(void)
static const struct super_operations exofs_sops;
static const struct export_operations exofs_export_ops;
+static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
+ EXOFS_APAGE_SB_DATA,
+ EXOFS_ATTR_SB_STATS,
+ sizeof(struct exofs_sb_stats));
+
+static int __sbi_read_stats(struct exofs_sb_info *sbi)
+{
+ struct osd_attr attrs[] = {
+ [0] = g_attr_sb_stats,
+ };
+ struct exofs_io_state *ios;
+ int ret;
+
+ ret = exofs_get_io_state(&sbi->layout, &ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ return ret;
+ }
+
+ ios->cred = sbi->s_cred;
+
+ ios->in_attr = attrs;
+ ios->in_attr_len = ARRAY_SIZE(attrs);
+
+ ret = exofs_sbi_read(ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("Error reading super_block stats => %d\n", ret);
+ goto out;
+ }
+
+ ret = extract_attr_from_ios(ios, &attrs[0]);
+ if (ret) {
+ EXOFS_ERR("%s: extract_attr of sb_stats failed\n", __func__);
+ goto out;
+ }
+ if (attrs[0].len) {
+ struct exofs_sb_stats *ess;
+
+ if (unlikely(attrs[0].len != sizeof(*ess))) {
+ EXOFS_ERR("%s: Wrong version of exofs_sb_stats "
+ "size(%d) != expected(%zd)\n",
+ __func__, attrs[0].len, sizeof(*ess));
+ goto out;
+ }
+
+ ess = attrs[0].val_ptr;
+ sbi->s_nextid = le64_to_cpu(ess->s_nextid);
+ sbi->s_numfiles = le32_to_cpu(ess->s_numfiles);
+ }
+
+out:
+ exofs_put_io_state(ios);
+ return ret;
+}
+
+static void stats_done(struct exofs_io_state *ios, void *p)
+{
+ exofs_put_io_state(ios);
+ /* Good thanks nothing to do anymore */
+}
+
+/* Asynchronously write the stats attribute */
+int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
+{
+ struct osd_attr attrs[] = {
+ [0] = g_attr_sb_stats,
+ };
+ struct exofs_io_state *ios;
+ int ret;
+
+ ret = exofs_get_io_state(&sbi->layout, &ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ return ret;
+ }
+
+ sbi->s_ess.s_nextid = cpu_to_le64(sbi->s_nextid);
+ sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
+ attrs[0].val_ptr = &sbi->s_ess;
+
+ ios->cred = sbi->s_cred;
+ ios->done = stats_done;
+ ios->private = sbi;
+ ios->out_attr = attrs;
+ ios->out_attr_len = ARRAY_SIZE(attrs);
+
+ ret = exofs_sbi_write(ios);
+ if (unlikely(ret)) {
+ EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
+ exofs_put_io_state(ios);
+ }
+
+ return ret;
+}
+
/*
* Write the superblock to the OSD
*/
@@ -213,18 +318,25 @@ int exofs_sync_fs(struct super_block *sb, int wait)
struct exofs_io_state *ios;
int ret = -ENOMEM;
- lock_super(sb);
+ fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
+ if (unlikely(!fscb))
+ return -ENOMEM;
+
sbi = sb->s_fs_info;
- fscb = &sbi->s_fscb;
+ /* NOTE: We no longer dirty the super_block anywhere in exofs. The
+ * reason we write the fscb here on unmount is so we can stay backwards
+ * compatible with fscb->s_version == 1. (What we are not compatible
+ * with is if a new version FS crashed and then we try to mount an old
+ * version). Otherwise the exofs_fscb is read-only from mkfs time. All
+ * the writeable info is set in exofs_sbi_write_stats() above.
+ */
ret = exofs_get_io_state(&sbi->layout, &ios);
- if (ret)
+ if (unlikely(ret))
goto out;
- /* Note: We only write the changing part of the fscb. .i.e upto the
- * the fscb->s_dev_table_oid member. There is no read-modify-write
- * here.
- */
+ lock_super(sb);
+
ios->length = offsetof(struct exofs_fscb, s_dev_table_oid);
memset(fscb, 0, ios->length);
fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
@@ -239,16 +351,17 @@ int exofs_sync_fs(struct super_block *sb, int wait)
ios->cred = sbi->s_cred;
ret = exofs_sbi_write(ios);
- if (unlikely(ret)) {
+ if (unlikely(ret))
EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
- goto out;
- }
- sb->s_dirt = 0;
+ else
+ sb->s_dirt = 0;
+
+ unlock_super(sb);
out:
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
exofs_put_io_state(ios);
- unlock_super(sb);
+ kfree(fscb);
return ret;
}
@@ -292,13 +405,14 @@ static void exofs_put_super(struct super_block *sb)
int num_pend;
struct exofs_sb_info *sbi = sb->s_fs_info;
- if (sb->s_dirt)
- exofs_write_super(sb);
-
/* make sure there are no pending commands */
for (num_pend = atomic_read(&sbi->s_curr_pending); num_pend > 0;
num_pend = atomic_read(&sbi->s_curr_pending)) {
wait_queue_head_t wq;
+
+ printk(KERN_NOTICE "%s: !!Pending operations in flight. "
+ "This is a BUG. please report to osd-dev@open-osd.org\n",
+ __func__);
init_waitqueue_head(&wq);
wait_event_timeout(wq,
(atomic_read(&sbi->s_curr_pending) == 0),
@@ -390,6 +504,23 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
return 0;
}
+static unsigned __ra_pages(struct exofs_layout *layout)
+{
+ const unsigned _MIN_RA = 32; /* min 128K read-ahead */
+ unsigned ra_pages = layout->group_width * layout->stripe_unit /
+ PAGE_SIZE;
+ unsigned max_io_pages = exofs_max_io_pages(layout, ~0);
+
+ ra_pages *= 2; /* two stripes */
+ if (ra_pages < _MIN_RA)
+ ra_pages = roundup(_MIN_RA, ra_pages / 2);
+
+ if (ra_pages > max_io_pages)
+ ra_pages = max_io_pages;
+
+ return ra_pages;
+}
+
/* @odi is valid only as long as @fscb_dev is valid */
static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
struct osd_dev_info *odi)
@@ -495,7 +626,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
}
od = osduld_info_lookup(&odi);
- if (unlikely(IS_ERR(od))) {
+ if (IS_ERR(od)) {
ret = PTR_ERR(od);
EXOFS_ERR("ERROR: device requested is not found "
"osd_name-%s =>%d\n", odi.osdname, ret);
@@ -558,9 +689,17 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_bdi;
/* use mount options to fill superblock */
- od = osduld_path_lookup(opts->dev_name);
+ if (opts->is_osdname) {
+ struct osd_dev_info odi = {.systemid_len = 0};
+
+ odi.osdname_len = strlen(opts->dev_name);
+ odi.osdname = (u8 *)opts->dev_name;
+ od = osduld_info_lookup(&odi);
+ } else {
+ od = osduld_path_lookup(opts->dev_name);
+ }
if (IS_ERR(od)) {
- ret = PTR_ERR(od);
+ ret = -EINVAL;
goto free_sbi;
}
@@ -594,6 +733,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
sb->s_magic = le16_to_cpu(fscb.s_magic);
+ /* NOTE: we read below to be backward compatible with old versions */
sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
sbi->s_numfiles = le32_to_cpu(fscb.s_numfiles);
@@ -604,7 +744,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
ret = -EINVAL;
goto free_sbi;
}
- if (le32_to_cpu(fscb.s_version) != EXOFS_FSCB_VER) {
+ if (le32_to_cpu(fscb.s_version) > EXOFS_FSCB_VER) {
EXOFS_ERR("ERROR: Bad FSCB version expected-%d got-%d\n",
EXOFS_FSCB_VER, le32_to_cpu(fscb.s_version));
ret = -EINVAL;
@@ -622,7 +762,10 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
+ __sbi_read_stats(sbi);
+
/* set up operation vectors */
+ sbi->bdi.ra_pages = __ra_pages(&sbi->layout);
sb->s_bdi = &sbi->bdi;
sb->s_fs_info = sbi;
sb->s_op = &exofs_sops;
@@ -652,6 +795,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
_exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
sbi->layout.s_pid);
+ if (opts->is_osdname)
+ kfree(opts->dev_name);
return 0;
free_sbi:
@@ -660,6 +805,8 @@ free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
opts->dev_name, sbi->layout.s_pid, ret);
exofs_free_sbi(sbi);
+ if (opts->is_osdname)
+ kfree(opts->dev_name);
return ret;
}
@@ -677,7 +824,8 @@ static struct dentry *exofs_mount(struct file_system_type *type,
if (ret)
return ERR_PTR(ret);
- opts.dev_name = dev_name;
+ if (!opts.dev_name)
+ opts.dev_name = dev_name;
return mount_nodev(type, flags, &opts, exofs_fill_super);
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 7b4180554a62..abea5a17c764 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -406,7 +406,7 @@ ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (value) {
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 0d06f4e75699..8f44cef1b3ef 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -850,7 +850,7 @@ static int find_next_reservable_window(
rsv_window_remove(sb, my_rsv);
/*
- * Let's book the whole avaliable window for now. We will check the
+ * Let's book the whole available window for now. We will check the
* disk bitmap later and then, if there are free blocks then we adjust
* the window size if it's larger than requested.
* Otherwise, we will remove this node from the tree next time
@@ -1357,9 +1357,9 @@ retry_alloc:
goto allocated;
}
/*
- * We may end up a bogus ealier ENOSPC error due to
+ * We may end up a bogus earlier ENOSPC error due to
* filesystem is "full" of reservations, but
- * there maybe indeed free blocks avaliable on disk
+ * there maybe indeed free blocks available on disk
* In this case, we just forget about the reservations
* just do block allocation as without reservations.
*/
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 1b48c3370872..645be9e7ee47 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -174,3 +174,9 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
return group_no * (ext2_fsblk_t)EXT2_BLOCKS_PER_GROUP(sb) +
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
}
+
+#define ext2_set_bit __test_and_set_bit_le
+#define ext2_clear_bit __test_and_clear_bit_le
+#define ext2_test_bit test_bit_le
+#define ext2_find_first_zero_bit find_first_zero_bit_le
+#define ext2_find_next_zero_bit find_next_zero_bit_le
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 40ad210a5049..788e09a07f7e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -305,7 +305,7 @@ static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
return ind->bh->b_blocknr;
/*
- * It is going to be refered from inode itself? OK, just put it into
+ * It is going to be referred from inode itself? OK, just put it into
* the same cylinder group then.
*/
bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_nobh_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_nobh_write_begin,
.write_end = nobh_write_end,
.bmap = ext2_bmap,
@@ -915,7 +913,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several indirect
* blocks but leave the blocks themselves alive. Block is partially
- * truncated if some data below the new i_size is refered from it (and
+ * truncated if some data below the new i_size is referred from it (and
* it is on the path to the first completely truncated data block, indeed).
* We have to free the top of that path along with everything to the right
* of the path. Since no allocation past the truncation point is possible
@@ -992,7 +990,7 @@ no_top:
* @p: array of block numbers
* @q: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are
+ * We are freeing all blocks referred from that array (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
@@ -1032,7 +1030,7 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
* @q: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e7431309bdca..f81e250ac5c4 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -39,7 +39,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
return ret;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto setflags_out;
}
@@ -89,7 +89,7 @@ setflags_out:
case EXT2_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *) arg);
case EXT2_IOC_SETVERSION:
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
ret = mnt_want_write(filp->f_path.mnt);
if (ret)
@@ -115,7 +115,7 @@ setflags_out:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7731695e65d9..0a78dae7e2cb 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1382,7 +1382,7 @@ static struct dentry *ext2_mount(struct file_system_type *fs_type,
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index c2e4dce984d2..529970617a21 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -35,7 +35,7 @@
* +------------------+
*
* The block header is followed by multiple entry descriptors. These entry
- * descriptors are variable in size, and alligned to EXT2_XATTR_PAD
+ * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
* byte boundaries. The entry descriptors are sorted by attribute name,
* so that two extended attribute blocks can be compared efficiently.
*
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e4fa49e6c539..9d021c0d472a 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -435,7 +435,7 @@ ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 153242187fce..fe52297e31ad 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -590,7 +590,7 @@ do_more:
BUFFER_TRACE(debug_bh, "Deleted!");
if (!bh2jh(bitmap_bh)->b_committed_data)
BUFFER_TRACE(debug_bh,
- "No commited data in bitmap");
+ "No committed data in bitmap");
BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
__brelse(debug_bh);
}
@@ -1063,7 +1063,7 @@ static int find_next_reservable_window(
rsv_window_remove(sb, my_rsv);
/*
- * Let's book the whole avaliable window for now. We will check the
+ * Let's book the whole available window for now. We will check the
* disk bitmap later and then, if there are free blocks then we adjust
* the window size if it's larger than requested.
* Otherwise, we will remove this node from the tree next time
@@ -1456,7 +1456,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
*
* ext3_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
- * for the current or commiting transaction to complete, and then
+ * for the current or committing transaction to complete, and then
* return TRUE.
*
* if the total number of retries exceed three times, return FALSE.
@@ -1632,9 +1632,9 @@ retry_alloc:
goto allocated;
}
/*
- * We may end up a bogus ealier ENOSPC error due to
+ * We may end up a bogus earlier ENOSPC error due to
* filesystem is "full" of reservations, but
- * there maybe indeed free blocks avaliable on disk
+ * there maybe indeed free blocks available on disk
* In this case, we just forget about the reservations
* just do block allocation as without reservations.
*/
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 7f5db46308a5..68b2e43d7c35 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_ordered_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_ordered_write_end,
.bmap = ext3_bmap,
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_writeback_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_writeback_write_end,
.bmap = ext3_bmap,
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_journalled_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_journalled_write_end,
.set_page_dirty = ext3_journalled_set_page_dirty,
@@ -2058,7 +2055,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is refered
+ * partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
@@ -2187,7 +2184,7 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
* @first: array of block numbers
* @last: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are stored as
+ * We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -2275,7 +2272,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index fc080dd561f7..f4090bd2f345 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -38,7 +38,7 @@ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int oldflags;
unsigned int jflag;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -123,7 +123,7 @@ flags_out:
__u32 generation;
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
err = mnt_want_write(filp->f_path.mnt);
@@ -192,7 +192,7 @@ setversion_out:
if (err)
return err;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setrsvsz_out;
}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 108b142e11ed..7916e4ce166a 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -1009,7 +1009,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
- " upto "E3FSBLK" blocks\n",
+ " up to "E3FSBLK" blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 071689f86e18..3c6a9e0eadc1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2925,7 +2925,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index e0270d1f8d82..21eacd7b7d79 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -433,7 +433,7 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index adf96b822781..1c67139ad4b4 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -21,6 +21,8 @@
#include "ext4_jbd2.h"
#include "mballoc.h"
+#include <trace/events/ext4.h>
+
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
@@ -342,6 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
* We do it here so the bitmap uptodate bit
* get set with buffer lock held.
*/
+ trace_ext4_read_block_bitmap_load(sb, block_group);
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
@@ -544,7 +547,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
*
* ext4_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
- * for the current or commiting transaction to complete, and then
+ * for the current or committing transaction to complete, and then
* return TRUE.
*
* if the total number of retries exceed three times, return FALSE.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3aa0b72b3b94..4daaf2b753f4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -923,14 +923,14 @@ struct ext4_inode_info {
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
EXT4_MOUNT2_##opt)
-#define ext4_set_bit ext2_set_bit
+#define ext4_set_bit __test_and_set_bit_le
#define ext4_set_bit_atomic ext2_set_bit_atomic
-#define ext4_clear_bit ext2_clear_bit
+#define ext4_clear_bit __test_and_clear_bit_le
#define ext4_clear_bit_atomic ext2_clear_bit_atomic
-#define ext4_test_bit ext2_test_bit
-#define ext4_find_first_zero_bit ext2_find_first_zero_bit
-#define ext4_find_next_zero_bit ext2_find_next_zero_bit
-#define ext4_find_next_bit ext2_find_next_bit
+#define ext4_test_bit test_bit_le
+#define ext4_find_first_zero_bit find_first_zero_bit_le
+#define ext4_find_next_zero_bit find_next_zero_bit_le
+#define ext4_find_next_bit find_next_bit_le
/*
* Maximal mount counts between two filesystem checks
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index d8b992e658c1..e25e99bf7ee1 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -202,13 +202,6 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
return 1;
}
-static inline void ext4_journal_release_buffer(handle_t *handle,
- struct buffer_head *bh)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_release_buffer(handle, bh);
-}
-
static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
{
return ext4_journal_start_sb(inode->i_sb, nblocks);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7516fb9c0bd5..4890d6f3ad15 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -44,6 +44,8 @@
#include "ext4_jbd2.h"
#include "ext4_extents.h"
+#include <trace/events/ext4.h>
+
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
@@ -664,6 +666,8 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
if (unlikely(!bh))
goto err;
if (!bh_uptodate_or_lock(bh)) {
+ trace_ext4_ext_load_extent(inode, block,
+ path[ppos].p_block);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto err;
@@ -1034,7 +1038,7 @@ cleanup:
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
- ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
@@ -1725,7 +1729,7 @@ repeat:
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
- ext_debug("next leaf isnt full(%d)\n",
+ ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto repeat;
@@ -2059,7 +2063,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
if (err)
return err;
ext_debug("index is empty, remove it, free block %llu\n", leaf);
- ext4_free_blocks(handle, inode, 0, leaf, 1,
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return err;
}
@@ -2156,7 +2160,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
num = le32_to_cpu(ex->ee_block) + ee_len - from;
start = ext4_ext_pblock(ex) + ee_len - num;
ext_debug("free last %u blocks starting %llu\n", num, start);
- ext4_free_blocks(handle, inode, 0, start, num, flags);
+ ext4_free_blocks(handle, inode, NULL, start, num, flags);
} else if (from == le32_to_cpu(ex->ee_block)
&& to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
@@ -2529,7 +2533,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
/*
* This function is called by ext4_ext_map_blocks() if someone tries to write
* to an uninitialized extent. It may result in splitting the uninitialized
- * extent into multiple extents (upto three - one initialized and two
+ * extent into multiple extents (up to three - one initialized and two
* uninitialized).
* There are three possibilities:
* a> There is no split required: Entire extent should be initialized
@@ -3108,14 +3112,13 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
{
int i, depth;
struct ext4_extent_header *eh;
- struct ext4_extent *ex, *last_ex;
+ struct ext4_extent *last_ex;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
return 0;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
- ex = path[depth].p_ext;
if (unlikely(!eh->eh_entries)) {
EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
@@ -3171,7 +3174,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
path, flags);
/*
* Flag the inode(non aio case) or end_io struct (aio case)
- * that this IO needs to convertion to written when IO is
+ * that this IO needs to conversion to written when IO is
* completed
*/
if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
@@ -3295,9 +3298,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
- struct ext4_extent_header *eh;
struct ext4_extent newex, *ex;
- ext4_fsblk_t newblock;
+ ext4_fsblk_t newblock = 0;
int err = 0, depth, ret;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
@@ -3305,6 +3307,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
+ trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* check in cache */
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
@@ -3352,7 +3355,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
err = -EIO;
goto out2;
}
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (ex) {
@@ -3458,10 +3460,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext4_ext_mark_uninitialized(&newex);
/*
* io_end structure was created for every IO write to an
- * uninitialized extent. To avoid unecessary conversion,
+ * uninitialized extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
- * that we need to perform convertion when IO is done.
+ * that we need to perform conversion when IO is done.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
@@ -3485,7 +3487,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
- ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
+ ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
ext4_ext_get_actual_len(&newex), 0);
goto out2;
}
@@ -3525,6 +3527,8 @@ out2:
ext4_ext_drop_refs(path);
kfree(path);
}
+ trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
+ newblock, map->m_len, err ? err : allocated);
return err ? err : allocated;
}
@@ -3658,6 +3662,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EOPNOTSUPP;
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
@@ -3673,6 +3678,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ret = inode_newsize_ok(inode, (len + offset));
if (ret) {
mutex_unlock(&inode->i_mutex);
+ trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
return ret;
}
retry:
@@ -3717,6 +3723,8 @@ retry:
goto retry;
}
mutex_unlock(&inode->i_mutex);
+ trace_ext4_fallocate_exit(inode, offset, max_blocks,
+ ret > 0 ? ret2 : ret);
return ret > 0 ? ret2 : ret;
}
@@ -3775,6 +3783,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
}
return ret > 0 ? ret2 : ret;
}
+
/*
* Callback function called for each extent to gather FIEMAP information.
*/
@@ -3782,38 +3791,162 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
struct ext4_ext_cache *newex, struct ext4_extent *ex,
void *data)
{
- struct fiemap_extent_info *fieinfo = data;
- unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
__u64 logical;
__u64 physical;
__u64 length;
+ loff_t size;
__u32 flags = 0;
- int error;
+ int ret = 0;
+ struct fiemap_extent_info *fieinfo = data;
+ unsigned char blksize_bits;
- logical = (__u64)newex->ec_block << blksize_bits;
+ blksize_bits = inode->i_sb->s_blocksize_bits;
+ logical = (__u64)newex->ec_block << blksize_bits;
if (newex->ec_start == 0) {
- pgoff_t offset;
- struct page *page;
+ /*
+ * No extent in extent-tree contains block @newex->ec_start,
+ * then the block may stay in 1)a hole or 2)delayed-extent.
+ *
+ * Holes or delayed-extents are processed as follows.
+ * 1. lookup dirty pages with specified range in pagecache.
+ * If no page is got, then there is no delayed-extent and
+ * return with EXT_CONTINUE.
+ * 2. find the 1st mapped buffer,
+ * 3. check if the mapped buffer is both in the request range
+ * and a delayed buffer. If not, there is no delayed-extent,
+ * then return.
+ * 4. a delayed-extent is found, the extent will be collected.
+ */
+ ext4_lblk_t end = 0;
+ pgoff_t last_offset;
+ pgoff_t offset;
+ pgoff_t index;
+ struct page **pages = NULL;
struct buffer_head *bh = NULL;
+ struct buffer_head *head = NULL;
+ unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
+
+ pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (pages == NULL)
+ return -ENOMEM;
offset = logical >> PAGE_SHIFT;
- page = find_get_page(inode->i_mapping, offset);
- if (!page || !page_has_buffers(page))
- return EXT_CONTINUE;
+repeat:
+ last_offset = offset;
+ head = NULL;
+ ret = find_get_pages_tag(inode->i_mapping, &offset,
+ PAGECACHE_TAG_DIRTY, nr_pages, pages);
+
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /* First time, try to find a mapped buffer. */
+ if (ret == 0) {
+out:
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ /* just a hole. */
+ kfree(pages);
+ return EXT_CONTINUE;
+ }
- bh = page_buffers(page);
+ /* Try to find the 1st mapped buffer. */
+ end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
+ blksize_bits;
+ if (!page_has_buffers(pages[0]))
+ goto out;
+ head = page_buffers(pages[0]);
+ if (!head)
+ goto out;
- if (!bh)
- return EXT_CONTINUE;
+ bh = head;
+ do {
+ if (buffer_mapped(bh)) {
+ /* get the 1st mapped buffer. */
+ if (end > newex->ec_block +
+ newex->ec_len)
+ /* The buffer is out of
+ * the request range.
+ */
+ goto out;
+ goto found_mapped_buffer;
+ }
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
- if (buffer_delay(bh)) {
- flags |= FIEMAP_EXTENT_DELALLOC;
- page_cache_release(page);
+ /* No mapped buffer found. */
+ goto out;
} else {
- page_cache_release(page);
- return EXT_CONTINUE;
+ /*Find contiguous delayed buffers. */
+ if (ret > 0 && pages[0]->index == last_offset)
+ head = page_buffers(pages[0]);
+ bh = head;
}
+
+found_mapped_buffer:
+ if (bh != NULL && buffer_delay(bh)) {
+ /* 1st or contiguous delayed buffer found. */
+ if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
+ /*
+ * 1st delayed buffer found, record
+ * the start of extent.
+ */
+ flags |= FIEMAP_EXTENT_DELALLOC;
+ newex->ec_block = end;
+ logical = (__u64)end << blksize_bits;
+ }
+ /* Find contiguous delayed buffers. */
+ do {
+ if (!buffer_delay(bh))
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+
+ for (index = 1; index < ret; index++) {
+ if (!page_has_buffers(pages[index])) {
+ bh = NULL;
+ break;
+ }
+ head = page_buffers(pages[index]);
+ if (!head) {
+ bh = NULL;
+ break;
+ }
+ if (pages[index]->index !=
+ pages[0]->index + index) {
+ /* Blocks are not contiguous. */
+ bh = NULL;
+ break;
+ }
+ bh = head;
+ do {
+ if (!buffer_delay(bh))
+ /* Delayed-extent ends. */
+ goto found_delayed_extent;
+ bh = bh->b_this_page;
+ end++;
+ } while (bh != head);
+ }
+ } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
+ /* a hole found. */
+ goto out;
+
+found_delayed_extent:
+ newex->ec_len = min(end - newex->ec_block,
+ (ext4_lblk_t)EXT_INIT_MAX_LEN);
+ if (ret == nr_pages && bh != NULL &&
+ newex->ec_len < EXT_INIT_MAX_LEN &&
+ buffer_delay(bh)) {
+ /* Have not collected an extent and continue. */
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ goto repeat;
+ }
+
+ for (index = 0; index < ret; index++)
+ page_cache_release(pages[index]);
+ kfree(pages);
}
physical = (__u64)newex->ec_start << blksize_bits;
@@ -3822,32 +3955,16 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
if (ex && ext4_ext_is_uninitialized(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
- /*
- * If this extent reaches EXT_MAX_BLOCK, it must be last.
- *
- * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
- * this also indicates no more allocated blocks.
- *
- * XXX this might miss a single-block extent at EXT_MAX_BLOCK
- */
- if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
- newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
- loff_t size = i_size_read(inode);
- loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
-
+ size = i_size_read(inode);
+ if (logical + length >= size)
flags |= FIEMAP_EXTENT_LAST;
- if ((flags & FIEMAP_EXTENT_DELALLOC) &&
- logical+length > size)
- length = (size - logical + bs - 1) & ~(bs-1);
- }
- error = fiemap_fill_next_extent(fieinfo, logical, physical,
+ ret = fiemap_fill_next_extent(fieinfo, logical, physical,
length, flags);
- if (error < 0)
- return error;
- if (error == 1)
+ if (ret < 0)
+ return ret;
+ if (ret == 1)
return EXT_BREAK;
-
return EXT_CONTINUE;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 7829b287822a..4673bc05274f 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -101,7 +101,7 @@ extern int ext4_flush_completed_IO(struct inode *inode)
* to the work-to-be schedule is freed.
*
* Thus we need to keep the io structure still valid here after
- * convertion finished. The io structure has a flag to
+ * conversion finished. The io structure has a flag to
* avoid double converting from both fsync and background work
* queue work.
*/
@@ -164,20 +164,20 @@ int ext4_sync_file(struct file *file, int datasync)
J_ASSERT(ext4_journal_current_handle() == NULL);
- trace_ext4_sync_file(file, datasync);
+ trace_ext4_sync_file_enter(file, datasync);
if (inode->i_sb->s_flags & MS_RDONLY)
return 0;
ret = ext4_flush_completed_IO(inode);
if (ret < 0)
- return ret;
+ goto out;
if (!journal) {
ret = generic_file_fsync(file, datasync);
if (!ret && !list_empty(&inode->i_dentry))
ext4_sync_parent(inode);
- return ret;
+ goto out;
}
/*
@@ -194,8 +194,10 @@ int ext4_sync_file(struct file *file, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext4_should_journal_data(inode))
- return ext4_force_commit(inode->i_sb);
+ if (ext4_should_journal_data(inode)) {
+ ret = ext4_force_commit(inode->i_sb);
+ goto out;
+ }
commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
if (jbd2_log_start_commit(journal, commit_tid)) {
@@ -215,5 +217,7 @@ int ext4_sync_file(struct file *file, int datasync)
ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ out:
+ trace_ext4_sync_file_exit(inode, ret);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 78b79e1bd7ed..21bb2f61e502 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -152,6 +152,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
* We do it here so the bitmap uptodate bit
* get set with buffer lock held.
*/
+ trace_ext4_load_inode_bitmap(sb, block_group);
set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
@@ -649,7 +650,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
*group = parent_group + flex_size;
if (*group > ngroups)
*group = 0;
- return find_group_orlov(sb, parent, group, mode, 0);
+ return find_group_orlov(sb, parent, group, mode, NULL);
}
/*
@@ -1054,6 +1055,11 @@ got:
}
}
+ if (ext4_handle_valid(handle)) {
+ ei->i_sync_tid = handle->h_transaction->t_tid;
+ ei->i_datasync_tid = handle->h_transaction->t_tid;
+ }
+
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_std_error(sb, err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9f7f9e49914f..ad8e303c0d29 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -173,7 +173,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
+ ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
@@ -720,7 +720,7 @@ allocated:
return ret;
failed_out:
for (i = 0; i < index; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
return ret;
}
@@ -823,20 +823,20 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
return err;
failed:
/* Allocation failed, free what we already allocated */
- ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
for (i = 1; i <= n ; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
EXT4_FREE_BLOCKS_FORGET);
}
for (i = n+1; i < indirect_blks; i++)
- ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
- ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
return err;
}
@@ -924,7 +924,7 @@ err_out:
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
@@ -973,6 +973,7 @@ static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
int count = 0;
ext4_fsblk_t first_block = 0;
+ trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
@@ -1058,6 +1059,8 @@ cleanup:
partial--;
}
out:
+ trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
+ map->m_pblk, map->m_len, err);
return err;
}
@@ -2060,7 +2063,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
- int commit_write = 0, redirty_page = 0;
+ int commit_write = 0, skip_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
@@ -2086,14 +2089,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
* If the page does not have buffers (for
* whatever reason), try to create them using
* __block_write_begin. If this fails,
- * redirty the page and move on.
+ * skip the page and move on.
*/
if (!page_has_buffers(page)) {
if (__block_write_begin(page, 0, len,
noalloc_get_block_write)) {
- redirty_page:
- redirty_page_for_writepage(mpd->wbc,
- page);
+ skip_page:
unlock_page(page);
continue;
}
@@ -2104,7 +2105,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
block_start = 0;
do {
if (!bh)
- goto redirty_page;
+ goto skip_page;
if (map && (cur_logical >= map->m_lblk) &&
(cur_logical <= (map->m_lblk +
(map->m_len - 1)))) {
@@ -2120,22 +2121,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh);
}
- /* redirty page if block allocation undone */
+ /* skip page if block allocation undone */
if (buffer_delay(bh) || buffer_unwritten(bh))
- redirty_page = 1;
+ skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
cur_logical++;
pblock++;
} while (bh != page_bufs);
- if (redirty_page)
- goto redirty_page;
+ if (skip_page)
+ goto skip_page;
if (commit_write)
/* mark the buffer_heads as dirty & uptodate */
block_commit_write(page, 0, len);
+ clear_page_dirty_for_io(page);
/*
* Delalloc doesn't support data journalling,
* but eventually maybe we'll lift this
@@ -2165,8 +2167,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
return ret;
}
-static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
- sector_t logical, long blk_cnt)
+static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
{
int nr_pages, i;
pgoff_t index, end;
@@ -2174,9 +2175,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
- index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- end = (logical + blk_cnt - 1) >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
@@ -2279,9 +2279,8 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
err = blks;
/*
* If get block returns EAGAIN or ENOSPC and there
- * appears to be free blocks we will call
- * ext4_writepage() for all of the pages which will
- * just redirty the pages.
+ * appears to be free blocks we will just let
+ * mpage_da_submit_io() unlock all of the pages.
*/
if (err == -EAGAIN)
goto submit_io;
@@ -2312,8 +2311,10 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
ext4_print_free_blocks(mpd->inode);
}
/* invalidate all the pages */
- ext4_da_block_invalidatepages(mpd, next,
- mpd->b_size >> mpd->inode->i_blkbits);
+ ext4_da_block_invalidatepages(mpd);
+
+ /* Mark this page range as having been completed */
+ mpd->io_done = 1;
return;
}
BUG_ON(blks == 0);
@@ -2438,102 +2439,6 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
}
/*
- * __mpage_da_writepage - finds extent of pages and blocks
- *
- * @page: page to consider
- * @wbc: not used, we just follow rules
- * @data: context
- *
- * The function finds extents of pages and scan them for all blocks.
- */
-static int __mpage_da_writepage(struct page *page,
- struct writeback_control *wbc,
- struct mpage_da_data *mpd)
-{
- struct inode *inode = mpd->inode;
- struct buffer_head *bh, *head;
- sector_t logical;
-
- /*
- * Can we merge this page to current extent?
- */
- if (mpd->next_page != page->index) {
- /*
- * Nope, we can't. So, we map non-allocated blocks
- * and start IO on them
- */
- if (mpd->next_page != mpd->first_page) {
- mpage_da_map_and_submit(mpd);
- /*
- * skip rest of the page in the page_vec
- */
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return MPAGE_DA_EXTENT_TAIL;
- }
-
- /*
- * Start next extent of pages ...
- */
- mpd->first_page = page->index;
-
- /*
- * ... and blocks
- */
- mpd->b_size = 0;
- mpd->b_state = 0;
- mpd->b_blocknr = 0;
- }
-
- mpd->next_page = page->index + 1;
- logical = (sector_t) page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- if (!page_has_buffers(page)) {
- mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
- (1 << BH_Dirty) | (1 << BH_Uptodate));
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else {
- /*
- * Page with regular buffer heads, just add all dirty ones
- */
- head = page_buffers(page);
- bh = head;
- do {
- BUG_ON(buffer_locked(bh));
- /*
- * We need to try to allocate
- * unmapped blocks in the same page.
- * Otherwise we won't make progress
- * with the page in ext4_writepage
- */
- if (ext4_bh_delay_or_unwritten(NULL, bh)) {
- mpage_add_bh_to_extent(mpd, logical,
- bh->b_size,
- bh->b_state);
- if (mpd->io_done)
- return MPAGE_DA_EXTENT_TAIL;
- } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
- /*
- * mapped dirty buffer. We need to update
- * the b_state because we look at
- * b_state in mpage_da_map_blocks. We don't
- * update b_size because if we find an
- * unmapped buffer_head later we need to
- * use the b_state flag of that buffer_head.
- */
- if (mpd->b_size == 0)
- mpd->b_state = bh->b_state & BH_FLAGS;
- }
- logical++;
- } while ((bh = bh->b_this_page) != head);
- }
-
- return 0;
-}
-
-/*
* This is a special get_blocks_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
@@ -2597,7 +2502,6 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* for partial write.
*/
set_buffer_new(bh);
- set_buffer_mapped(bh);
}
return 0;
}
@@ -2684,7 +2588,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), noone guarantees in which
+ * we are writing back data modified via mmap(), no one guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
@@ -2786,7 +2690,7 @@ static int ext4_writepage(struct page *page,
/*
* This is called via ext4_da_writepages() to
- * calulate the total number of credits to reserve to fit
+ * calculate the total number of credits to reserve to fit
* a single extent allocation into a single transaction,
* ext4_da_writpeages() will loop calling this before
* the block allocation.
@@ -2811,27 +2715,27 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
/*
* write_cache_pages_da - walk the list of dirty pages of the given
- * address space and call the callback function (which usually writes
- * the pages).
- *
- * This is a forked version of write_cache_pages(). Differences:
- * Range cyclic is ignored.
- * no_nrwrite_index_update is always presumed true
+ * address space and accumulate pages that need writing, and call
+ * mpage_da_map_and_submit to map a single contiguous memory region
+ * and then write them.
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
struct mpage_da_data *mpd,
pgoff_t *done_index)
{
- int ret = 0;
- int done = 0;
- struct pagevec pvec;
- unsigned nr_pages;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- long nr_to_write = wbc->nr_to_write;
- int tag;
-
+ struct buffer_head *bh, *head;
+ struct inode *inode = mapping->host;
+ struct pagevec pvec;
+ unsigned int nr_pages;
+ sector_t logical;
+ pgoff_t index, end;
+ long nr_to_write = wbc->nr_to_write;
+ int i, tag, ret = 0;
+
+ memset(mpd, 0, sizeof(struct mpage_da_data));
+ mpd->wbc = wbc;
+ mpd->inode = inode;
pagevec_init(&pvec, 0);
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
@@ -2842,13 +2746,11 @@ static int write_cache_pages_da(struct address_space *mapping,
tag = PAGECACHE_TAG_DIRTY;
*done_index = index;
- while (!done && (index <= end)) {
- int i;
-
+ while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
- break;
+ return 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2860,60 +2762,100 @@ static int write_cache_pages_da(struct address_space *mapping,
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
- if (page->index > end) {
- done = 1;
- break;
- }
+ if (page->index > end)
+ goto out;
*done_index = page->index + 1;
+ /*
+ * If we can't merge this page, and we have
+ * accumulated an contiguous region, write it
+ */
+ if ((mpd->next_page != page->index) &&
+ (mpd->next_page != mpd->first_page)) {
+ mpage_da_map_and_submit(mpd);
+ goto ret_extent_tail;
+ }
+
lock_page(page);
/*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data interity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
+ * If the page is no longer dirty, or its
+ * mapping no longer corresponds to inode we
+ * are writing (which means it has been
+ * truncated or invalidated), or the page is
+ * already under writeback and we are not
+ * doing a data integrity writeback, skip the page
*/
- if (unlikely(page->mapping != mapping)) {
-continue_unlock:
+ if (!PageDirty(page) ||
+ (PageWriteback(page) &&
+ (wbc->sync_mode == WB_SYNC_NONE)) ||
+ unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
-
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- else
- goto continue_unlock;
- }
+ if (PageWriteback(page))
+ wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
- goto continue_unlock;
- ret = __mpage_da_writepage(page, wbc, mpd);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- done = 1;
- break;
- }
+ if (mpd->next_page != page->index)
+ mpd->first_page = page->index;
+ mpd->next_page = page->index + 1;
+ logical = (sector_t) page->index <<
+ (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+ if (!page_has_buffers(page)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ PAGE_CACHE_SIZE,
+ (1 << BH_Dirty) | (1 << BH_Uptodate));
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else {
+ /*
+ * Page with regular buffer heads,
+ * just add all dirty ones
+ */
+ head = page_buffers(page);
+ bh = head;
+ do {
+ BUG_ON(buffer_locked(bh));
+ /*
+ * We need to try to allocate
+ * unmapped blocks in the same page.
+ * Otherwise we won't make progress
+ * with the page in ext4_writepage
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+ mpage_add_bh_to_extent(mpd, logical,
+ bh->b_size,
+ bh->b_state);
+ if (mpd->io_done)
+ goto ret_extent_tail;
+ } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
+ /*
+ * mapped dirty buffer. We need
+ * to update the b_state
+ * because we look at b_state
+ * in mpage_da_map_blocks. We
+ * don't update b_size because
+ * if we find an unmapped
+ * buffer_head later we need to
+ * use the b_state flag of that
+ * buffer_head.
+ */
+ if (mpd->b_size == 0)
+ mpd->b_state = bh->b_state & BH_FLAGS;
+ }
+ logical++;
+ } while ((bh = bh->b_this_page) != head);
}
if (nr_to_write > 0) {
nr_to_write--;
if (nr_to_write == 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ wbc->sync_mode == WB_SYNC_NONE)
/*
* We stop writing back only if we are
* not doing integrity sync. In case of
@@ -2924,14 +2866,18 @@ continue_unlock:
* pages, but have not synced all of the
* old dirty pages.
*/
- done = 1;
- break;
- }
+ goto out;
}
}
pagevec_release(&pvec);
cond_resched();
}
+ return 0;
+ret_extent_tail:
+ ret = MPAGE_DA_EXTENT_TAIL;
+out:
+ pagevec_release(&pvec);
+ cond_resched();
return ret;
}
@@ -2945,7 +2891,6 @@ static int ext4_da_writepages(struct address_space *mapping,
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int pages_written = 0;
- long pages_skipped;
unsigned int max_pages;
int range_cyclic, cycled = 1, io_done = 0;
int needed_blocks, ret = 0;
@@ -3028,11 +2973,6 @@ static int ext4_da_writepages(struct address_space *mapping,
wbc->nr_to_write = desired_nr_to_write;
}
- mpd.wbc = wbc;
- mpd.inode = mapping->host;
-
- pages_skipped = wbc->pages_skipped;
-
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
@@ -3059,22 +2999,10 @@ retry:
}
/*
- * Now call __mpage_da_writepage to find the next
+ * Now call write_cache_pages_da() to find the next
* contiguous region of logical blocks that need
- * blocks to be allocated by ext4. We don't actually
- * submit the blocks for I/O here, even though
- * write_cache_pages thinks it will, and will set the
- * pages as clean for write before calling
- * __mpage_da_writepage().
+ * blocks to be allocated by ext4 and submit them.
*/
- mpd.b_size = 0;
- mpd.b_state = 0;
- mpd.b_blocknr = 0;
- mpd.first_page = 0;
- mpd.next_page = 0;
- mpd.io_done = 0;
- mpd.pages_written = 0;
- mpd.retval = 0;
ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
@@ -3096,7 +3024,6 @@ retry:
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
- wbc->pages_skipped = pages_skipped;
ret = 0;
} else if (ret == MPAGE_DA_EXTENT_TAIL) {
/*
@@ -3104,7 +3031,6 @@ retry:
* rest of the pages
*/
pages_written += mpd.pages_written;
- wbc->pages_skipped = pages_skipped;
ret = 0;
io_done = 1;
} else if (wbc->nr_to_write)
@@ -3122,11 +3048,6 @@ retry:
wbc->range_end = mapping->writeback_index - 1;
goto retry;
}
- if (pages_skipped != wbc->pages_skipped)
- ext4_msg(inode->i_sb, KERN_CRIT,
- "This should not happen leaving %s "
- "with nr_to_write = %ld ret = %d",
- __func__, wbc->nr_to_write, ret);
/* Update index */
wbc->range_cyclic = range_cyclic;
@@ -3383,7 +3304,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
* the pages by calling redirty_page_for_writepage() but that
* would be ugly in the extreme. So instead we would need to
* replicate parts of the code in the above functions,
- * simplifying them becuase we wouldn't actually intend to
+ * simplifying them because we wouldn't actually intend to
* write out the pages, but rather only collect contiguous
* logical block extents, call the multi-block allocator, and
* then update the buffer heads with the block allocations.
@@ -3460,6 +3381,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
static int ext4_readpage(struct file *file, struct page *page)
{
+ trace_ext4_readpage(page);
return mpage_readpage(page, ext4_get_block);
}
@@ -3494,6 +3416,8 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ trace_ext4_invalidatepage(page, offset);
+
/*
* free any io_end structure allocated for buffers to be discarded
*/
@@ -3515,6 +3439,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+ trace_ext4_releasepage(page);
+
WARN_ON(PageChecked(page));
if (!page_has_buffers(page))
return 0;
@@ -3768,7 +3694,7 @@ retry:
*
* The unwrritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
- * set up an end_io call back function, which will do the convertion
+ * set up an end_io call back function, which will do the conversion
* when async direct IO completed.
*
* If the O_DIRECT write will extend the file then add this inode to the
@@ -3791,7 +3717,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* We could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as uninitialized
- * to prevent paralel buffered read to expose the stale data
+ * to prevent parallel buffered read to expose the stale data
* before DIO complete the data IO.
*
* As to previously fallocated extents, ext4 get_block
@@ -3852,7 +3778,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
int err;
/*
* for non AIO case, since the IO is already
- * completed, we could do the convertion right here
+ * completed, we could do the conversion right here
*/
err = ext4_convert_unwritten_extents(inode,
offset, ret);
@@ -3873,11 +3799,16 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+ trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
-
- return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+ else
+ ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ trace_ext4_direct_IO_exit(inode, offset,
+ iov_length(iov, nr_segs), rw, ret);
+ return ret;
}
/*
@@ -3903,7 +3834,6 @@ static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end,
.bmap = ext4_bmap,
@@ -3919,7 +3849,6 @@ static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end,
.bmap = ext4_bmap,
@@ -3935,7 +3864,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
@@ -3951,7 +3879,6 @@ static const struct address_space_operations ext4_da_aops = {
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_da_writepages,
- .sync_page = block_sync_page,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
@@ -4098,7 +4025,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is refered
+ * partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
@@ -4177,6 +4104,9 @@ no_top:
*
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
+ *
+ * Return 0 on success, 1 on invalid block range
+ * and < 0 on fatal error.
*/
static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh,
@@ -4203,33 +4133,32 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
}
err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
err = ext4_truncate_restart_trans(handle, inode,
blocks_for_truncate(inode));
- if (unlikely(err)) {
- ext4_std_error(inode->i_sb, err);
- return 1;
- }
+ if (unlikely(err))
+ goto out_err;
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
- ext4_journal_get_write_access(handle, bh);
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err))
+ goto out_err;
}
}
for (p = first; p < last; p++)
*p = 0;
- ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
+ ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
+out_err:
+ ext4_std_error(inode->i_sb, err);
+ return err;
}
/**
@@ -4240,7 +4169,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
* @first: array of block numbers
* @last: points immediately past the end of array
*
- * We are freeing all blocks refered from that array (numbers are stored as
+ * We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
@@ -4263,7 +4192,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
ext4_fsblk_t nr; /* Current block # */
__le32 *p; /* Pointer into inode/ind
for current block */
- int err;
+ int err = 0;
if (this_bh) { /* For indirect block */
BUFFER_TRACE(this_bh, "get_write_access");
@@ -4285,9 +4214,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
} else if (nr == block_to_free + count) {
count++;
} else {
- if (ext4_clear_blocks(handle, inode, this_bh,
- block_to_free, count,
- block_to_free_p, p))
+ err = ext4_clear_blocks(handle, inode, this_bh,
+ block_to_free, count,
+ block_to_free_p, p);
+ if (err)
break;
block_to_free = nr;
block_to_free_p = p;
@@ -4296,9 +4226,12 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
}
}
- if (count > 0)
- ext4_clear_blocks(handle, inode, this_bh, block_to_free,
- count, block_to_free_p, p);
+ if (!err && count > 0)
+ err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
+ count, block_to_free_p, p);
+ if (err < 0)
+ /* fatal error */
+ return;
if (this_bh) {
BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
@@ -4328,7 +4261,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
- * We are freeing all blocks refered from these branches (numbers are
+ * We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
@@ -4416,7 +4349,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* transaction where the data blocks are
* actually freed.
*/
- ext4_free_blocks(handle, inode, 0, nr, 1,
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
@@ -4500,6 +4433,8 @@ void ext4_truncate(struct inode *inode)
ext4_lblk_t last_block;
unsigned blocksize = inode->i_sb->s_blocksize;
+ trace_ext4_truncate_enter(inode);
+
if (!ext4_can_truncate(inode))
return;
@@ -4510,6 +4445,7 @@ void ext4_truncate(struct inode *inode)
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ext4_ext_truncate(inode);
+ trace_ext4_truncate_exit(inode);
return;
}
@@ -4639,6 +4575,7 @@ out_stop:
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
+ trace_ext4_truncate_exit(inode);
}
/*
@@ -4770,6 +4707,7 @@ make_io:
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
+ trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ_META, bh);
@@ -4875,7 +4813,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
return inode;
ei = EXT4_I(inode);
- iloc.bh = 0;
+ iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
@@ -5540,7 +5478,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
}
/*
- * Calulate the total number of credits to reserve to fit
+ * Calculate the total number of credits to reserve to fit
* the modification of a single pages into a single transaction,
* which may include multiple chunks of block allocations.
*
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index eb3bc2fe647e..808c554e773f 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int oldflags;
unsigned int jflag;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -146,7 +146,7 @@ flags_out:
__u32 generation;
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
err = mnt_want_write(filp->f_path.mnt);
@@ -298,7 +298,7 @@ mext_out:
case EXT4_IOC_MIGRATE:
{
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write(filp->f_path.mnt);
@@ -320,7 +320,7 @@ mext_out:
case EXT4_IOC_ALLOC_DA_BLKS:
{
int err;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = mnt_want_write(filp->f_path.mnt);
@@ -334,16 +334,22 @@ mext_out:
case FITRIM:
{
struct super_block *sb = inode->i_sb;
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&range, (struct fstrim_range *)arg,
sizeof(range)))
return -EFAULT;
+ range.minlen = max((unsigned int)range.minlen,
+ q->limits.discard_granularity);
ret = ext4_trim_fs(sb, &range);
if (ret < 0)
return ret;
@@ -421,6 +427,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
case EXT4_IOC_MOVE_EXT:
+ case FITRIM:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index d1fe09aea73d..d8a16eecf1d5 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -92,7 +92,7 @@
* between CPUs. It is possible to get scheduled at this point.
*
* The locality group prealloc space is used looking at whether we have
- * enough free space (pa_free) withing the prealloc space.
+ * enough free space (pa_free) within the prealloc space.
*
* If we can't allocate blocks via inode prealloc or/and locality group
* prealloc then we look at the buddy cache. The buddy cache is represented
@@ -432,9 +432,10 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
}
/* at order 0 we see each particular block */
- *max = 1 << (e4b->bd_blkbits + 3);
- if (order == 0)
+ if (order == 0) {
+ *max = 1 << (e4b->bd_blkbits + 3);
return EXT4_MB_BITMAP(e4b);
+ }
bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
@@ -616,7 +617,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
grp = ext4_get_group_info(sb, e4b->bd_group);
- buddy = mb_find_buddy(e4b, 0, &max);
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
@@ -635,7 +635,12 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
#define mb_check_buddy(e4b)
#endif
-/* FIXME!! need more doc */
+/*
+ * Divide blocks started from @first with length @len into
+ * smaller chunks with power of 2 blocks.
+ * Clear the bits in bitmap which the blocks of the chunk(s) covered,
+ * then increase bb_counters[] for corresponded chunk size.
+ */
static void ext4_mb_mark_free_simple(struct super_block *sb,
void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
struct ext4_group_info *grp)
@@ -2381,7 +2386,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
* kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
* So a two level scheme suffices for now. */
- sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
+ sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
if (sbi->s_group_info == NULL) {
printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
return -ENOMEM;
@@ -3208,7 +3213,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
cur_distance = abs(goal_block - cpa->pa_pstart);
new_distance = abs(goal_block - pa->pa_pstart);
- if (cur_distance < new_distance)
+ if (cur_distance <= new_distance)
return cpa;
/* drop the previous reference */
@@ -3907,7 +3912,8 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
struct super_block *sb = ac->ac_sb;
ext4_group_t ngroups, i;
- if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ if (!mb_enable_debug ||
+ (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
printk(KERN_ERR "EXT4-fs: Can't allocate:"
@@ -4753,7 +4759,8 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
* bitmap. Then issue a TRIM command on this extent and free the extent in
* the group buddy bitmap. This is done until whole group is scanned.
*/
-ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
+static ext4_grpblk_t
+ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
{
void *bitmap;
@@ -4863,10 +4870,15 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
break;
}
- if (len >= EXT4_BLOCKS_PER_GROUP(sb))
- len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
- else
+ /*
+ * For all the groups except the last one, last block will
+ * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
+ * change it for the last group in which case start +
+ * len < EXT4_BLOCKS_PER_GROUP(sb).
+ */
+ if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb))
last_block = first_block + len;
+ len -= last_block - first_block;
if (e4b.bd_info->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b619322c76f0..22bd4d7f289b 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -169,7 +169,7 @@ struct ext4_allocation_context {
/* original request */
struct ext4_free_extent ac_o_ex;
- /* goal request (after normalization) */
+ /* goal request (normalized ac_o_ex) */
struct ext4_free_extent ac_g_ex;
/* the best found extent */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b0a126f23c20..92816b4e0f16 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -263,7 +263,7 @@ static int free_dind_blocks(handle_t *handle,
for (i = 0; i < max_entries; i++) {
if (tmp_idata[i]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(tmp_idata[i]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -271,7 +271,7 @@ static int free_dind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -302,7 +302,7 @@ static int free_tind_blocks(handle_t *handle,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1,
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
return 0;
@@ -315,7 +315,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
/* ei->i_data[EXT4_IND_BLOCK] */
if (i_data[0]) {
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0,
+ ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(i_data[0]), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -428,7 +428,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
}
put_bh(bh);
extend_credit_for_blkdel(handle, inode);
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
return retval;
}
@@ -517,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
* start with one credit accounted for
* superblock modification.
*
- * For the tmp_inode we already have commited the
+ * For the tmp_inode we already have committed the
* trascation that created the inode. Later as and
* when we add extents we extent the journal
*/
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e781b7ea5630..67fd0b025858 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -40,6 +40,7 @@
#include "xattr.h"
#include "acl.h"
+#include <trace/events/ext4.h>
/*
* define how far ahead to read directories while searching them.
*/
@@ -2183,6 +2184,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
struct ext4_dir_entry_2 *de;
handle_t *handle;
+ trace_ext4_unlink_enter(dir, dentry);
/* Initialize quotas before so that eventual writes go
* in separate transaction */
dquot_initialize(dir);
@@ -2228,6 +2230,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
end_unlink:
ext4_journal_stop(handle);
brelse(bh);
+ trace_ext4_unlink_exit(dentry, retval);
return retval;
}
@@ -2402,6 +2405,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!new_inode && new_dir != old_dir &&
EXT4_DIR_LINK_MAX(new_dir))
goto end_rename;
+ BUFFER_TRACE(dir_bh, "get_write_access");
+ retval = ext4_journal_get_write_access(handle, dir_bh);
+ if (retval)
+ goto end_rename;
}
if (!new_bh) {
retval = ext4_add_entry(handle, new_dentry, old_inode);
@@ -2409,7 +2416,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
} else {
BUFFER_TRACE(new_bh, "get write access");
- ext4_journal_get_write_access(handle, new_bh);
+ retval = ext4_journal_get_write_access(handle, new_bh);
+ if (retval)
+ goto end_rename;
new_de->inode = cpu_to_le32(old_inode->i_ino);
if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
EXT4_FEATURE_INCOMPAT_FILETYPE))
@@ -2470,8 +2479,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
- BUFFER_TRACE(dir_bh, "get_write_access");
- ext4_journal_get_write_access(handle, dir_bh);
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 955cc309142f..b6dbd056fcb1 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -259,6 +259,11 @@ static void ext4_end_bio(struct bio *bio, int error)
bi_sector >> (inode->i_blkbits - 9));
}
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ return;
+ }
+
/* Add the io_end to per-inode completed io list*/
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
@@ -279,9 +284,9 @@ void ext4_io_submit(struct ext4_io_submit *io)
BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
bio_put(io->io_bio);
}
- io->io_bio = 0;
+ io->io_bio = NULL;
io->io_op = 0;
- io->io_end = 0;
+ io->io_end = NULL;
}
static int io_submit_init(struct ext4_io_submit *io,
@@ -310,8 +315,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio;
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
@@ -381,8 +385,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- ClearPageError(page);
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
@@ -393,6 +395,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
+ set_page_writeback(page);
+ ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3ecc6e45d2f9..80bbc9c60c24 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -230,7 +230,7 @@ static int setup_new_group_blocks(struct super_block *sb,
}
/* Zero out all of the reserved backup group descriptor table blocks */
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
GFP_NOFS);
@@ -248,7 +248,7 @@ static int setup_new_group_blocks(struct super_block *sb,
/* Zero out all of the inode table blocks */
block = input->inode_table;
- ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
block, sbi->s_itb_per_group);
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
if (err)
@@ -499,12 +499,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
- /* ext4_journal_release_buffer(handle, iloc.bh); */
+ /* ext4_handle_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
exit_dindj:
- /* ext4_journal_release_buffer(handle, dind); */
+ /* ext4_handle_release_buffer(handle, dind); */
exit_sbh:
- /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
+ /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
exit_dind:
brelse(dind);
exit_bh:
@@ -586,7 +586,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
/*
int j;
for (j = 0; j < i; j++)
- ext4_journal_release_buffer(handle, primary[j]);
+ ext4_handle_release_buffer(handle, primary[j]);
*/
goto exit_bh;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 203f9e4a70be..056474b7b8e0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -54,9 +54,9 @@
static struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
-struct ext4_lazy_init *ext4_li_info;
-struct mutex ext4_li_mtx;
-struct ext4_features *ext4_feat;
+static struct ext4_lazy_init *ext4_li_info;
+static struct mutex ext4_li_mtx;
+static struct ext4_features *ext4_feat;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
+static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
@@ -594,7 +595,7 @@ __acquires(bitlock)
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
sb->s_id, function, line, grp);
if (ino)
printk(KERN_CONT "inode %lu: ", ino);
@@ -616,7 +617,7 @@ __acquires(bitlock)
* filesystem will have already been marked read/only and the
* journal has been aborted. We return 1 as a hint to callers
* who might what to use the return value from
- * ext4_grp_locked_error() to distinguish beween the
+ * ext4_grp_locked_error() to distinguish between the
* ERRORS_CONT and ERRORS_RO case, and perhaps return more
* aggressively from the ext4 function in question, with a
* more appropriate error code.
@@ -997,13 +998,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, OLDALLOC))
seq_puts(seq, ",oldalloc");
#ifdef CONFIG_EXT4_FS_XATTR
- if (test_opt(sb, XATTR_USER) &&
- !(def_mount_opts & EXT4_DEFM_XATTR_USER))
+ if (test_opt(sb, XATTR_USER))
seq_puts(seq, ",user_xattr");
- if (!test_opt(sb, XATTR_USER) &&
- (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
+ if (!test_opt(sb, XATTR_USER))
seq_puts(seq, ",nouser_xattr");
- }
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
@@ -1041,8 +1039,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
!(def_mount_opts & EXT4_DEFM_NODELALLOC))
seq_puts(seq, ",nodelalloc");
- if (test_opt(sb, MBLK_IO_SUBMIT))
- seq_puts(seq, ",mblk_io_submit");
+ if (!test_opt(sb, MBLK_IO_SUBMIT))
+ seq_puts(seq, ",nomblk_io_submit");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
@@ -1451,7 +1449,7 @@ static int parse_options(char *options, struct super_block *sb,
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
- args[0].to = args[0].from = 0;
+ args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
@@ -1771,7 +1769,7 @@ set_qf_format:
return 0;
if (option < 0 || option > (1 << 30))
return 0;
- if (!is_power_of_2(option)) {
+ if (option && !is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
@@ -2120,6 +2118,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
return;
}
+ /* Check if feature set would not allow a r/w mount */
+ if (!ext4_feature_set_ok(sb, 0)) {
+ ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
+ "unknown ROCOMPAT features");
+ return;
+ }
+
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
@@ -2412,7 +2417,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
if (parse_strtoul(buf, 0x40000000, &t))
return -EINVAL;
- if (!is_power_of_2(t))
+ if (t && !is_power_of_2(t))
return -EINVAL;
sbi->s_inode_readahead_blks = t;
@@ -3095,14 +3100,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
+ /* xattr user namespace & acls are now defaulted on */
#ifdef CONFIG_EXT4_FS_XATTR
- if (def_mount_opts & EXT4_DEFM_XATTR_USER)
- set_opt(sb, XATTR_USER);
+ set_opt(sb, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
- if (def_mount_opts & EXT4_DEFM_ACL)
- set_opt(sb, POSIX_ACL);
+ set_opt(sb, POSIX_ACL);
#endif
+ set_opt(sb, MBLK_IO_SUBMIT);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3516,7 +3521,7 @@ no_journal:
* concurrency isn't really necessary. Limit it to 1.
*/
EXT4_SB(sb)->dio_unwritten_wq =
- alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1);
+ alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
goto failed_mount_wq;
@@ -3531,17 +3536,16 @@ no_journal:
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
+ root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- iput(root);
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
- iput(root);
ret = -ENOMEM;
goto failed_mount4;
}
@@ -3657,6 +3661,8 @@ cantfind_ext4:
goto failed_mount;
failed_mount4:
+ iput(root);
+ sb->s_root = NULL;
ext4_msg(sb, KERN_ERR, "mount failed");
destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
@@ -4618,7 +4624,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fc32176eee39..b545ca1c459c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -735,7 +735,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
int offset = (char *)s->here - bs->bh->b_data;
unlock_buffer(bs->bh);
- jbd2_journal_release_buffer(handle, bs->bh);
+ ext4_handle_release_buffer(handle, bs->bh);
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
@@ -833,7 +833,7 @@ inserted:
new_bh = sb_getblk(sb, block);
if (!new_bh) {
getblk_failed:
- ext4_free_blocks(handle, inode, 0, block, 1,
+ ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA);
error = -EIO;
goto cleanup;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0e277ec4b612..8d68690bdcf1 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
.readpages = fat_readpages,
.writepage = fat_writepage,
.writepages = fat_writepages,
- .sync_page = block_sync_page,
.write_begin = fat_write_begin,
.write_end = fat_write_end,
.direct_IO = fat_direct_IO,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6c82e5bac039..22764c7c8382 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -159,7 +159,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/* required for strict SunOS emulation */
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index 78948b4b1894..c9a6a94e58e9 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -164,7 +164,7 @@ vxfs_read_fshead(struct super_block *sbp)
goto out_free_pfp;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
- printk(KERN_ERR "vxfs: structual list inode is of wrong type (%x)\n",
+ printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_stilist;
}
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 6c5131d592f0..3360f1e678ad 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -162,7 +162,7 @@ vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
/**
* vxfs_inode_by_name - find inode number for dentry
* @dip: directory to search in
- * @dp: dentry we seach for
+ * @dp: dentry we search for
*
* Description:
* vxfs_inode_by_name finds out the inode number of
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index d8324296486f..b7b3af502615 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -60,7 +60,7 @@ enum {
*
* The Object Location Table header is placed at the beginning of each
* OLT extent. It is used to fing certain filesystem-wide metadata, e.g.
- * the inital inode list, the fileset header or the device configuration.
+ * the initial inode list, the fileset header or the device configuration.
*/
struct vxfs_olt {
u_int32_t olt_magic; /* magic number */
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 1429f3ae1e86..5d318c44f855 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage,
.bmap = vxfs_bmap,
- .sync_page = block_sync_page,
};
inline void
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 59c6e4956786..34591ee804b5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -144,7 +144,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
- * started when this function returns, we make no guarentees on
+ * started when this function returns, we make no guarantees on
* completion. Caller need not hold sb s_umount semaphore.
*
*/
@@ -176,6 +176,17 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
}
/*
+ * Remove the inode from the writeback list it is on.
+ */
+void inode_wb_list_del(struct inode *inode)
+{
+ spin_lock(&inode_wb_list_lock);
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&inode_wb_list_lock);
+}
+
+
+/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
@@ -188,6 +199,7 @@ static void redirty_tail(struct inode *inode)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ assert_spin_locked(&inode_wb_list_lock);
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
@@ -205,14 +217,17 @@ static void requeue_io(struct inode *inode)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ assert_spin_locked(&inode_wb_list_lock);
list_move(&inode->i_wb_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
{
/*
- * Prevent speculative execution through spin_unlock(&inode_lock);
+ * Prevent speculative execution through
+ * spin_unlock(&inode_wb_list_lock);
*/
+
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
@@ -286,6 +301,7 @@ static void move_expired_inodes(struct list_head *delaying_queue,
*/
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
+ assert_spin_locked(&inode_wb_list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
@@ -306,25 +322,25 @@ static void inode_wait_for_writeback(struct inode *inode)
wait_queue_head_t *wqh;
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- while (inode->i_state & I_SYNC) {
- spin_unlock(&inode_lock);
+ while (inode->i_state & I_SYNC) {
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
}
}
/*
- * Write out an inode's dirty pages. Called under inode_lock. Either the
- * caller has ref on the inode (either via __iget or via syscall against an fd)
- * or the inode has I_WILL_FREE set (via generic_forget_inode)
+ * Write out an inode's dirty pages. Called under inode_wb_list_lock and
+ * inode->i_lock. Either the caller has an active reference on the inode or
+ * the inode has I_WILL_FREE set.
*
* If `wait' is set, wait on the writeout.
*
* The whole writeout design is quite complex and fragile. We want to avoid
* starvation of particular inodes when others are being redirtied, prevent
* livelocks, etc.
- *
- * Called under inode_lock.
*/
static int
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
@@ -333,6 +349,9 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
unsigned dirty;
int ret;
+ assert_spin_locked(&inode_wb_list_lock);
+ assert_spin_locked(&inode->i_lock);
+
if (!atomic_read(&inode->i_count))
WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
else
@@ -363,7 +382,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
/* Set I_SYNC, reset I_DIRTY_PAGES */
inode->i_state |= I_SYNC;
inode->i_state &= ~I_DIRTY_PAGES;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
ret = do_writepages(mapping, wbc);
@@ -383,10 +403,10 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* due to delalloc, clear dirty metadata flags right before
* write_inode()
*/
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
@@ -394,7 +414,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
ret = err;
}
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & I_FREEING)) {
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
@@ -506,7 +527,9 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
* kind does not need peridic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
+ spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
requeue_io(inode);
continue;
}
@@ -515,10 +538,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
*/
- if (inode_dirtied_after(inode, wbc->wb_start))
+ if (inode_dirtied_after(inode, wbc->wb_start)) {
+ spin_unlock(&inode->i_lock);
return 1;
+ }
__iget(inode);
+
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
if (wbc->pages_skipped != pages_skipped) {
@@ -528,10 +554,11 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
*/
redirty_tail(inode);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
iput(inode);
cond_resched();
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (wbc->nr_to_write <= 0) {
wbc->more_io = 1;
return 1;
@@ -550,7 +577,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
if (!wbc->wb_start)
wbc->wb_start = jiffies; /* livelock avoidance */
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
@@ -568,7 +595,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
if (ret)
break;
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
/* Leave any unwritten inodes on b_io */
}
@@ -577,11 +604,11 @@ static void __writeback_inodes_sb(struct super_block *sb,
{
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
writeback_sb_inodes(sb, wb, wbc, true);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
}
/*
@@ -720,13 +747,15 @@ static long wb_writeback(struct bdi_writeback *wb,
* become available for writeback. Otherwise
* we'll just busyloop.
*/
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
if (!list_empty(&wb->b_more_io)) {
inode = wb_inode(wb->b_more_io.prev);
trace_wbc_writeback_wait(&wbc, wb->bdi);
+ spin_lock(&inode->i_lock);
inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_wb_list_lock);
}
return wrote;
@@ -992,7 +1021,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
struct backing_dev_info *bdi = NULL;
- bool wakeup_bdi = false;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -1016,7 +1044,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
@@ -1028,7 +1056,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* superblock list, based upon its state.
*/
if (inode->i_state & I_SYNC)
- goto out;
+ goto out_unlock_inode;
/*
* Only add valid (hashed) inodes to the superblock's
@@ -1036,16 +1064,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/
if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
- goto out;
+ goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
- goto out;
+ goto out_unlock_inode;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
+ bool wakeup_bdi = false;
bdi = inode_to_bdi(inode);
if (bdi_cap_writeback_dirty(bdi)) {
@@ -1062,15 +1091,20 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wakeup_bdi = true;
}
+ spin_unlock(&inode->i_lock);
+ spin_lock(&inode_wb_list_lock);
inode->dirtied_when = jiffies;
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+ spin_unlock(&inode_wb_list_lock);
+
+ if (wakeup_bdi)
+ bdi_wakeup_thread_delayed(bdi);
+ return;
}
}
-out:
- spin_unlock(&inode_lock);
+out_unlock_inode:
+ spin_unlock(&inode->i_lock);
- if (wakeup_bdi)
- bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);
@@ -1101,7 +1135,7 @@ static void wait_sb_inodes(struct super_block *sb)
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
/*
* Data integrity sync. Must wait for all pages under writeback,
@@ -1111,22 +1145,25 @@ static void wait_sb_inodes(struct super_block *sb)
* we still have to wait for that writeout.
*/
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- struct address_space *mapping;
+ struct address_space *mapping = inode->i_mapping;
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
- continue;
- mapping = inode->i_mapping;
- if (mapping->nrpages == 0)
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ (mapping->nrpages == 0)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
+
/*
- * We hold a reference to 'inode' so it couldn't have
- * been removed from s_inodes list while we dropped the
- * inode_lock. We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it
- * under inode_lock. So we keep the reference and iput
- * it later.
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
*/
iput(old_inode);
old_inode = inode;
@@ -1135,9 +1172,9 @@ static void wait_sb_inodes(struct super_block *sb)
cond_resched();
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
}
@@ -1271,9 +1308,11 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, &wbc);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
if (sync)
inode_sync_wait(inode);
return ret;
@@ -1295,9 +1334,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
- spin_lock(&inode_lock);
+ spin_lock(&inode_wb_list_lock);
+ spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, wbc);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_wb_list_lock);
return ret;
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 7c39b885f969..b6cca47f7b07 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -305,7 +305,7 @@ static void cuse_gendev_release(struct device *dev)
static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
{
struct cuse_conn *cc = fc_to_cc(fc);
- struct cuse_init_out *arg = &req->misc.cuse_init_out;
+ struct cuse_init_out *arg = req->out.args[0].value;
struct page *page = req->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
@@ -384,6 +384,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
+ kfree(arg);
__free_page(page);
return;
@@ -405,6 +406,7 @@ static int cuse_send_init(struct cuse_conn *cc)
struct page *page;
struct fuse_conn *fc = &cc->fc;
struct cuse_init_in *arg;
+ void *outarg;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
@@ -419,6 +421,10 @@ static int cuse_send_init(struct cuse_conn *cc)
if (!page)
goto err_put_req;
+ outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
+ if (!outarg)
+ goto err_free_page;
+
arg = &req->misc.cuse_init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
@@ -429,7 +435,7 @@ static int cuse_send_init(struct cuse_conn *cc)
req->in.args[0].value = arg;
req->out.numargs = 2;
req->out.args[0].size = sizeof(struct cuse_init_out);
- req->out.args[0].value = &req->misc.cuse_init_out;
+ req->out.args[0].value = outarg;
req->out.args[1].size = CUSE_INIT_INFO_MAX;
req->out.argvar = 1;
req->out.argpages = 1;
@@ -440,6 +446,8 @@ static int cuse_send_init(struct cuse_conn *cc)
return 0;
+err_free_page:
+ __free_page(page);
err_put_req:
fuse_put_request(fc, req);
err:
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cf8d28d1fbad..640fc229df10 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -737,14 +737,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (WARN_ON(PageMlocked(oldpage)))
goto out_fallback_unlock;
- remove_from_page_cache(oldpage);
- page_cache_release(oldpage);
-
- err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
+ err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
if (err) {
- printk(KERN_WARNING "fuse_try_move_page: failed to add page");
- goto out_fallback_unlock;
+ unlock_page(newpage);
+ return err;
}
+
page_cache_get(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
@@ -1910,6 +1908,21 @@ __acquires(fc->lock)
kfree(dequeue_forget(fc, 1, NULL));
}
+static void end_polls(struct fuse_conn *fc)
+{
+ struct rb_node *p;
+
+ p = rb_first(&fc->polled_files);
+
+ while (p) {
+ struct fuse_file *ff;
+ ff = rb_entry(p, struct fuse_file, polled_node);
+ wake_up_interruptible_all(&ff->poll_wait);
+
+ p = rb_next(p);
+ }
+}
+
/*
* Abort all requests.
*
@@ -1937,6 +1950,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
fc->blocked = 0;
end_io_requests(fc);
end_queued_requests(fc);
+ end_polls(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1953,6 +1967,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
fc->connected = 0;
fc->blocked = 0;
end_queued_requests(fc);
+ end_polls(fc);
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8bd0ef9286c3..c6ba49bd95b3 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,10 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
{
struct inode *inode;
- if (nd && nd->flags & LOOKUP_RCU)
- return -ECHILD;
-
- inode = entry->d_inode;
+ inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
return 0;
else if (fuse_dentry_time(entry) < get_jiffies_64()) {
@@ -177,6 +174,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
if (!inode)
return 0;
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
fc = get_fuse_conn(inode);
req = fuse_get_req(fc);
if (IS_ERR(req))
@@ -970,6 +970,14 @@ static int fuse_access(struct inode *inode, int mask)
return err;
}
+static int fuse_perm_getattr(struct inode *inode, int flags)
+{
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
+ return fuse_do_getattr(inode, NULL, NULL);
+}
+
/*
* Check permission. The two basic access models of FUSE are:
*
@@ -989,9 +997,6 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
bool refreshed = false;
int err = 0;
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-
if (!fuse_allow_task(fc, current))
return -EACCES;
@@ -1000,9 +1005,15 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
*/
if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
- err = fuse_update_attributes(inode, NULL, NULL, &refreshed);
- if (err)
- return err;
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ if (fi->i_time < get_jiffies_64()) {
+ refreshed = true;
+
+ err = fuse_perm_getattr(inode, flags);
+ if (err)
+ return err;
+ }
}
if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
@@ -1012,7 +1023,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
attributes. This is also needed, because the root
node will at first have no permissions */
if (err == -EACCES && !refreshed) {
- err = fuse_do_getattr(inode, NULL, NULL);
+ err = fuse_perm_getattr(inode, flags);
if (!err)
err = generic_permission(inode, mask,
flags, NULL);
@@ -1023,13 +1034,16 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
noticed immediately, only after the attribute
timeout has expired */
} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
err = fuse_access(inode, mask);
} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
if (!(inode->i_mode & S_IXUGO)) {
if (refreshed)
return -EACCES;
- err = fuse_do_getattr(inode, NULL, NULL);
+ err = fuse_perm_getattr(inode, flags);
if (!err && !(inode->i_mode & S_IXUGO))
return -EACCES;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9e0832dbb1e3..82a66466a24c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -222,7 +222,7 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
- wake_up_interruptible_sync(&ff->poll_wait);
+ wake_up_interruptible_all(&ff->poll_wait);
inarg->fh = ff->fh;
inarg->flags = flags;
@@ -523,7 +523,7 @@ static int fuse_readpage(struct file *file, struct page *page)
goto out;
/*
- * Page writeback can extend beyond the liftime of the
+ * Page writeback can extend beyond the lifetime of the
* page-cache page, so make sure we read a properly synced
* page.
*/
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d4286947bc2c..b788becada76 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -272,7 +272,6 @@ struct fuse_req {
struct fuse_init_in init_in;
struct fuse_init_out init_out;
struct cuse_init_in cuse_init_in;
- struct cuse_init_out cuse_init_out;
struct {
struct fuse_read_in in;
u64 attr_ver;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 051b1a084528..cc6ec4b2f0ff 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -870,7 +870,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 06c48a891832..8f26d1a58912 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -74,7 +74,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
return -EINVAL;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
acl = posix_acl_from_xattr(value, size);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aad77e4f61b5..c71995b111bf 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1117,7 +1117,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writeback_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.bmap = gfs2_bmap,
@@ -1133,7 +1132,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepage = gfs2_ordered_writepage,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
@@ -1151,7 +1149,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef3dc4b9fae2..74add2ddcc3f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1136,7 +1136,7 @@ void gfs2_trim_blocks(struct inode *inode)
* earlier versions of GFS2 have a bug in the stuffed file reading
* code which will result in a buffer overrun if the size is larger
* than the max stuffed file size. In order to prevent this from
- * occuring, such files are unstuffed, but in other cases we can
+ * occurring, such files are unstuffed, but in other cases we can
* just update the inode size directly.
*
* Returns: 0 on success, or -ve on error
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4074b952b059..b2682e073eee 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -221,7 +221,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
goto out_drop_write;
error = -EACCES;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
goto out;
error = 0;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e2431313491f..f07643e21bfa 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1123,7 +1123,7 @@ void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
* @number: the lock number
* @glops: the glock operations for the type of glock
* @state: the state to acquire the glock in
- * @flags: modifier flags for the aquisition
+ * @flags: modifier flags for the acquisition
* @gh: the struct gfs2_holder
*
* Returns: errno
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e7ed31f858dd..5b102c1887fd 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -121,7 +121,7 @@ __acquires(&sdp->sd_ail_lock)
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
lock_buffer(bh);
if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index e919abf25ecd..51d27f00ebb4 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -204,7 +204,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_log_unlock(sdp);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
n = 0;
@@ -214,7 +214,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
gfs2_log_unlock(sdp);
lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
if (++n >= num)
break;
@@ -356,7 +356,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data;
@@ -373,7 +373,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -575,7 +575,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
ptr = bh_log_ptr(bh);
get_bh(bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
while(!list_empty(list)) {
bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
@@ -601,7 +601,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
} else {
bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
}
- submit_bh(WRITE_SYNC_PLUG, bh1);
+ submit_bh(WRITE_SYNC, bh1);
gfs2_log_lock(sdp);
ptr += 2;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 01d97f486553..675349b5a133 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head;
int nr_underway = 0;
int write_op = REQ_META |
- (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
- .sync_page = block_sync_page,
};
/**
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ec73ed70bae1..a4e23d68a398 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -657,7 +657,7 @@ out:
* @sdp: the file system
*
* This function flushes data and meta data for all machines by
- * aquiring the transaction log exclusively. All journals are
+ * acquiring the transaction log exclusively. All journals are
* ensured to be in a clean state as well.
*
* Returns: errno
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index dffb4e996643..fff16c968e67 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
const struct address_space_operations hfs_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index a8df651747f0..b248a6cfcad9 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
const struct address_space_operations hfsplus_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 508ce662ce12..fbaa6690c8e0 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -47,7 +47,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
if (err)
goto out;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto out_drop_write;
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 2dbae20450f8..9b9eb6933e43 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -119,7 +119,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations hpfs_aops = {
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hpfs_write_begin,
.write_end = generic_write_end,
.bmap = _hpfs_bmap
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9885082b470f..b9eeb1cd03ff 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,8 +332,7 @@ static void truncate_huge_page(struct page *page)
{
cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
ClearPageUptodate(page);
- remove_from_page_cache(page);
- put_page(page);
+ delete_from_page_cache(page);
}
static void truncate_hugepages(struct inode *inode, loff_t lstart)
diff --git a/fs/inode.c b/fs/inode.c
index 16fefd373fc2..33c963d08ab4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -25,6 +25,39 @@
#include <linux/async.h>
#include <linux/posix_acl.h>
#include <linux/ima.h>
+#include <linux/cred.h>
+#include "internal.h"
+
+/*
+ * inode locking rules.
+ *
+ * inode->i_lock protects:
+ * inode->i_state, inode->i_hash, __iget()
+ * inode_lru_lock protects:
+ * inode_lru, inode->i_lru
+ * inode_sb_list_lock protects:
+ * sb->s_inodes, inode->i_sb_list
+ * inode_wb_list_lock protects:
+ * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
+ * inode_hash_lock protects:
+ * inode_hashtable, inode->i_hash
+ *
+ * Lock ordering:
+ *
+ * inode_sb_list_lock
+ * inode->i_lock
+ * inode_lru_lock
+ *
+ * inode_wb_list_lock
+ * inode->i_lock
+ *
+ * inode_hash_lock
+ * inode_sb_list_lock
+ * inode->i_lock
+ *
+ * iunique_lock
+ * inode_hash_lock
+ */
/*
* This is needed for the following functions:
@@ -59,6 +92,8 @@
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
+static struct hlist_head *inode_hashtable __read_mostly;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
/*
* Each inode can be on two separate lists. One is
@@ -73,15 +108,10 @@ static unsigned int i_hash_shift __read_mostly;
*/
static LIST_HEAD(inode_lru);
-static struct hlist_head *inode_hashtable __read_mostly;
+static DEFINE_SPINLOCK(inode_lru_lock);
-/*
- * A simple spinlock to protect the list manipulations.
- *
- * NOTE! You also have to own the lock if you change
- * the i_state of an inode while it is in use..
- */
-DEFINE_SPINLOCK(inode_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
/*
* iprune_sem provides exclusion between the icache shrinking and the
@@ -95,6 +125,14 @@ DEFINE_SPINLOCK(inode_lock);
static DECLARE_RWSEM(iprune_sem);
/*
+ * Empty aops. Can be used for the cases where the user does not
+ * define any of the address_space operations.
+ */
+const struct address_space_operations empty_aops = {
+};
+EXPORT_SYMBOL(empty_aops);
+
+/*
* Statistics gathering..
*/
struct inodes_stat_t inodes_stat;
@@ -136,15 +174,6 @@ int proc_nr_inodes(ctl_table *table, int write,
}
#endif
-static void wake_up_inode(struct inode *inode)
-{
- /*
- * Prevent speculative execution through spin_unlock(&inode_lock);
- */
- smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
-}
-
/**
* inode_init_always - perform inode structure intialisation
* @sb: superblock inode belongs to
@@ -155,7 +184,6 @@ static void wake_up_inode(struct inode *inode)
*/
int inode_init_always(struct super_block *sb, struct inode *inode)
{
- static const struct address_space_operations empty_aops;
static const struct inode_operations empty_iops;
static const struct file_operations empty_fops;
struct address_space *const mapping = &inode->i_data;
@@ -335,7 +363,7 @@ static void init_once(void *foo)
}
/*
- * inode_lock must be held
+ * inode->i_lock must be held
*/
void __iget(struct inode *inode)
{
@@ -353,23 +381,22 @@ EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode)
{
+ spin_lock(&inode_lru_lock);
if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode_lru);
inodes_stat.nr_unused++;
}
+ spin_unlock(&inode_lru_lock);
}
static void inode_lru_list_del(struct inode *inode)
{
+ spin_lock(&inode_lru_lock);
if (!list_empty(&inode->i_lru)) {
list_del_init(&inode->i_lru);
inodes_stat.nr_unused--;
}
-}
-
-static inline void __inode_sb_list_add(struct inode *inode)
-{
- list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+ spin_unlock(&inode_lru_lock);
}
/**
@@ -378,15 +405,17 @@ static inline void __inode_sb_list_add(struct inode *inode)
*/
void inode_sb_list_add(struct inode *inode)
{
- spin_lock(&inode_lock);
- __inode_sb_list_add(inode);
- spin_unlock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
+ list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
+ spin_unlock(&inode_sb_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
-static inline void __inode_sb_list_del(struct inode *inode)
+static inline void inode_sb_list_del(struct inode *inode)
{
+ spin_lock(&inode_sb_list_lock);
list_del_init(&inode->i_sb_list);
+ spin_unlock(&inode_sb_list_lock);
}
static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -411,24 +440,15 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
+ spin_lock(&inode->i_lock);
hlist_add_head(&inode->i_hash, b);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(__insert_inode_hash);
/**
- * __remove_inode_hash - remove an inode from the hash
- * @inode: inode to unhash
- *
- * Remove an inode from the superblock.
- */
-static void __remove_inode_hash(struct inode *inode)
-{
- hlist_del_init(&inode->i_hash);
-}
-
-/**
* remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
@@ -436,9 +456,11 @@ static void __remove_inode_hash(struct inode *inode)
*/
void remove_inode_hash(struct inode *inode)
{
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
+ spin_lock(&inode->i_lock);
hlist_del_init(&inode->i_hash);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
}
EXPORT_SYMBOL(remove_inode_hash);
@@ -455,10 +477,29 @@ void end_writeback(struct inode *inode)
}
EXPORT_SYMBOL(end_writeback);
+/*
+ * Free the inode passed in, removing it from the lists it is still connected
+ * to. We remove any pages still attached to the inode and wait for any IO that
+ * is still in progress before finally destroying the inode.
+ *
+ * An inode must already be marked I_FREEING so that we avoid the inode being
+ * moved back onto lists if we race with other code that manipulates the lists
+ * (e.g. writeback_single_inode). The caller is responsible for setting this.
+ *
+ * An inode must already be removed from the LRU list before being evicted from
+ * the cache. This should occur atomically with setting the I_FREEING state
+ * flag, so no inodes here should ever be on the LRU when being evicted.
+ */
static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
+ BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(!list_empty(&inode->i_lru));
+
+ inode_wb_list_del(inode);
+ inode_sb_list_del(inode);
+
if (op->evict_inode) {
op->evict_inode(inode);
} else {
@@ -470,6 +511,15 @@ static void evict(struct inode *inode)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
+
+ remove_inode_hash(inode);
+
+ spin_lock(&inode->i_lock);
+ wake_up_bit(&inode->i_state, __I_NEW);
+ BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
+ spin_unlock(&inode->i_lock);
+
+ destroy_inode(inode);
}
/*
@@ -488,14 +538,6 @@ static void dispose_list(struct list_head *head)
list_del_init(&inode->i_lru);
evict(inode);
-
- spin_lock(&inode_lock);
- __remove_inode_hash(inode);
- __inode_sb_list_del(inode);
- spin_unlock(&inode_lock);
-
- wake_up_inode(inode);
- destroy_inode(inode);
}
}
@@ -513,25 +555,23 @@ void evict_inodes(struct super_block *sb)
struct inode *inode, *next;
LIST_HEAD(dispose);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (atomic_read(&inode->i_count))
continue;
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
- list_move(&inode->i_lru, &dispose);
- list_del_init(&inode->i_wb_list);
- if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- inodes_stat.nr_unused--;
+ inode_lru_list_del(inode);
+ spin_unlock(&inode->i_lock);
+ list_add(&inode->i_lru, &dispose);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose);
@@ -560,31 +600,30 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next;
LIST_HEAD(dispose);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & I_DIRTY && !kill_dirty) {
+ spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
if (atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
- list_move(&inode->i_lru, &dispose);
- list_del_init(&inode->i_wb_list);
- if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- inodes_stat.nr_unused--;
+ inode_lru_list_del(inode);
+ spin_unlock(&inode->i_lock);
+ list_add(&inode->i_lru, &dispose);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose);
@@ -606,7 +645,7 @@ static int can_unuse(struct inode *inode)
/*
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a
- * temporary list and then are freed outside inode_lock by dispose_list().
+ * temporary list and then are freed outside inode_lru_lock by dispose_list().
*
* Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to
@@ -627,7 +666,7 @@ static void prune_icache(int nr_to_scan)
unsigned long reap = 0;
down_read(&iprune_sem);
- spin_lock(&inode_lock);
+ spin_lock(&inode_lru_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode;
@@ -637,53 +676,67 @@ static void prune_icache(int nr_to_scan)
inode = list_entry(inode_lru.prev, struct inode, i_lru);
/*
+ * we are inverting the inode_lru_lock/inode->i_lock here,
+ * so use a trylock. If we fail to get the lock, just move the
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+ list_move(&inode->i_lru, &inode_lru);
+ continue;
+ }
+
+ /*
* Referenced or dirty inodes are still in use. Give them
* another pass through the LRU as we canot reclaim them now.
*/
if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) {
list_del_init(&inode->i_lru);
+ spin_unlock(&inode->i_lock);
inodes_stat.nr_unused--;
continue;
}
/* recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) {
- list_move(&inode->i_lru, &inode_lru);
inode->i_state &= ~I_REFERENCED;
+ list_move(&inode->i_lru, &inode_lru);
+ spin_unlock(&inode->i_lock);
continue;
}
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_lru_lock);
if (remove_inode_buffers(inode))
reap += invalidate_mapping_pages(&inode->i_data,
0, -1);
iput(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode_lru_lock);
if (inode != list_entry(inode_lru.next,
struct inode, i_lru))
continue; /* wrong inode or list_empty */
- if (!can_unuse(inode))
+ /* avoid lock inversions with trylock */
+ if (!spin_trylock(&inode->i_lock))
+ continue;
+ if (!can_unuse(inode)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
+ spin_unlock(&inode->i_lock);
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
list_move(&inode->i_lru, &freeable);
- list_del_init(&inode->i_wb_list);
inodes_stat.nr_unused--;
}
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
__count_vm_events(PGINODESTEAL, reap);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_lru_lock);
dispose_list(&freeable);
up_read(&iprune_sem);
@@ -732,15 +785,21 @@ static struct inode *find_inode(struct super_block *sb,
repeat:
hlist_for_each_entry(inode, node, head, i_hash) {
- if (inode->i_sb != sb)
+ spin_lock(&inode->i_lock);
+ if (inode->i_sb != sb) {
+ spin_unlock(&inode->i_lock);
continue;
- if (!test(inode, data))
+ }
+ if (!test(inode, data)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
__iget(inode);
+ spin_unlock(&inode->i_lock);
return inode;
}
return NULL;
@@ -758,15 +817,21 @@ static struct inode *find_inode_fast(struct super_block *sb,
repeat:
hlist_for_each_entry(inode, node, head, i_hash) {
- if (inode->i_ino != ino)
+ spin_lock(&inode->i_lock);
+ if (inode->i_ino != ino) {
+ spin_unlock(&inode->i_lock);
continue;
- if (inode->i_sb != sb)
+ }
+ if (inode->i_sb != sb) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
__iget(inode);
+ spin_unlock(&inode->i_lock);
return inode;
}
return NULL;
@@ -826,19 +891,26 @@ struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
- spin_lock_prefetch(&inode_lock);
+ spin_lock_prefetch(&inode_sb_list_lock);
inode = alloc_inode(sb);
if (inode) {
- spin_lock(&inode_lock);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = 0;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
}
return inode;
}
EXPORT_SYMBOL(new_inode);
+/**
+ * unlock_new_inode - clear the I_NEW state and wake up any waiters
+ * @inode: new inode to unlock
+ *
+ * Called when the inode is fully initialised to clear the new state of the
+ * inode and wake up anyone waiting for the inode to finish initialisation.
+ */
void unlock_new_inode(struct inode *inode)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -858,51 +930,67 @@ void unlock_new_inode(struct inode *inode)
}
}
#endif
- /*
- * This is special! We do not need the spinlock when clearing I_NEW,
- * because we're guaranteed that nobody else tries to do anything about
- * the state of the inode when it is locked, as we just created it (so
- * there can be no old holders that haven't tested I_NEW).
- * However we must emit the memory barrier so that other CPUs reliably
- * see the clearing of I_NEW after the other inode initialisation has
- * completed.
- */
- smp_mb();
+ spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
inode->i_state &= ~I_NEW;
- wake_up_inode(inode);
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
-/*
- * This is called without the inode lock held.. Be careful.
+/**
+ * iget5_locked - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @hashval: hash value (usually inode number) to get
+ * @test: callback used for comparisons between inodes
+ * @set: callback used to initialize a new struct inode
+ * @data: opaque data pointer to pass to @test and @set
*
- * We no longer cache the sb_flags in i_flags - see fs.h
- * -- rmk@arm.uk.linux.org
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * and if present it is return it with an increased reference count. This is
+ * a generalized version of iget_locked() for file systems where the inode
+ * number is not sufficient for unique identification of an inode.
+ *
+ * If the inode is not in cache, allocate a new inode and return it locked,
+ * hashed, and with the I_NEW flag set. The file system gets to fill it in
+ * before unlocking it via unlock_new_inode().
+ *
+ * Note both @test and @set are called with the inode_hash_lock held, so can't
+ * sleep.
*/
-static struct inode *get_new_inode(struct super_block *sb,
- struct hlist_head *head,
- int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *),
- void *data)
+struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode(sb, head, test, data);
+ spin_unlock(&inode_hash_lock);
+
+ if (inode) {
+ wait_on_inode(inode);
+ return inode;
+ }
+
inode = alloc_inode(sb);
if (inode) {
struct inode *old;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode(sb, head, test, data);
if (!old) {
if (set(inode, data))
goto set_failed;
- hlist_add_head(&inode->i_hash, head);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- spin_unlock(&inode_lock);
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
+ spin_unlock(&inode_hash_lock);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -915,7 +1003,7 @@ static struct inode *get_new_inode(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
@@ -923,33 +1011,53 @@ static struct inode *get_new_inode(struct super_block *sb,
return inode;
set_failed:
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
return NULL;
}
+EXPORT_SYMBOL(iget5_locked);
-/*
- * get_new_inode_fast is the fast path version of get_new_inode, see the
- * comment at iget_locked for details.
+/**
+ * iget_locked - obtain an inode from a mounted file system
+ * @sb: super block of file system
+ * @ino: inode number to get
+ *
+ * Search for the inode specified by @ino in the inode cache and if present
+ * return it with an increased reference count. This is for file systems
+ * where the inode number is sufficient for unique identification of an inode.
+ *
+ * If the inode is not in cache, allocate a new inode and return it locked,
+ * hashed, and with the I_NEW flag set. The file system gets to fill it in
+ * before unlocking it via unlock_new_inode().
*/
-static struct inode *get_new_inode_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
+struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino);
+ spin_unlock(&inode_hash_lock);
+ if (inode) {
+ wait_on_inode(inode);
+ return inode;
+ }
+
inode = alloc_inode(sb);
if (inode) {
struct inode *old;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
/* We released the lock, so.. */
old = find_inode_fast(sb, head, ino);
if (!old) {
inode->i_ino = ino;
- hlist_add_head(&inode->i_hash, head);
- __inode_sb_list_add(inode);
+ spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- spin_unlock(&inode_lock);
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode->i_lock);
+ inode_sb_list_add(inode);
+ spin_unlock(&inode_hash_lock);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -962,13 +1070,14 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_hash_lock);
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
}
return inode;
}
+EXPORT_SYMBOL(iget_locked);
/*
* search the inode cache for a matching inode number.
@@ -983,10 +1092,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
struct hlist_node *node;
struct inode *inode;
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(inode, node, b, i_hash) {
- if (inode->i_ino == ino && inode->i_sb == sb)
+ if (inode->i_ino == ino && inode->i_sb == sb) {
+ spin_unlock(&inode_hash_lock);
return 0;
+ }
}
+ spin_unlock(&inode_hash_lock);
return 1;
}
@@ -1016,7 +1129,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
static unsigned int counter;
ino_t res;
- spin_lock(&inode_lock);
spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
@@ -1024,7 +1136,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
res = counter++;
} while (!test_inode_iunique(sb, res));
spin_unlock(&iunique_lock);
- spin_unlock(&inode_lock);
return res;
}
@@ -1032,116 +1143,50 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
- spin_lock(&inode_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
__iget(inode);
- else
+ spin_unlock(&inode->i_lock);
+ } else {
+ spin_unlock(&inode->i_lock);
/*
* Handle the case where s_op->clear_inode is not been
* called yet, and somebody is calling igrab
* while the inode is getting freed.
*/
inode = NULL;
- spin_unlock(&inode_lock);
+ }
return inode;
}
EXPORT_SYMBOL(igrab);
/**
- * ifind - internal function, you want ilookup5() or iget5().
- * @sb: super block of file system to search
- * @head: the head of the list to search
- * @test: callback used for comparisons between inodes
- * @data: opaque data pointer to pass to @test
- * @wait: if true wait for the inode to be unlocked, if false do not
- *
- * ifind() searches for the inode specified by @data in the inode
- * cache. This is a generalized version of ifind_fast() for file systems where
- * the inode number is not sufficient for unique identification of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
- *
- * Note, @test is called with the inode_lock held, so can't sleep.
- */
-static struct inode *ifind(struct super_block *sb,
- struct hlist_head *head, int (*test)(struct inode *, void *),
- void *data, const int wait)
-{
- struct inode *inode;
-
- spin_lock(&inode_lock);
- inode = find_inode(sb, head, test, data);
- if (inode) {
- spin_unlock(&inode_lock);
- if (likely(wait))
- wait_on_inode(inode);
- return inode;
- }
- spin_unlock(&inode_lock);
- return NULL;
-}
-
-/**
- * ifind_fast - internal function, you want ilookup() or iget().
- * @sb: super block of file system to search
- * @head: head of the list to search
- * @ino: inode number to search for
- *
- * ifind_fast() searches for the inode @ino in the inode cache. This is for
- * file systems where the inode number is sufficient for unique identification
- * of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
- */
-static struct inode *ifind_fast(struct super_block *sb,
- struct hlist_head *head, unsigned long ino)
-{
- struct inode *inode;
-
- spin_lock(&inode_lock);
- inode = find_inode_fast(sb, head, ino);
- if (inode) {
- spin_unlock(&inode_lock);
- wait_on_inode(inode);
- return inode;
- }
- spin_unlock(&inode_lock);
- return NULL;
-}
-
-/**
* ilookup5_nowait - search for an inode in the inode cache
* @sb: super block of file system to search
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
- * ilookup5() uses ifind() to search for the inode specified by @hashval and
- * @data in the inode cache. This is a generalized version of ilookup() for
- * file systems where the inode number is not sufficient for unique
- * identification of an inode.
- *
+ * Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented
- * reference count. Note, the inode lock is not waited upon so you have to be
- * very careful what you do with the returned inode. You probably should be
- * using ilookup5() instead.
+ * reference count.
*
- * Otherwise NULL is returned.
+ * Note: I_NEW is not waited upon so you have to be very careful what you do
+ * with the returned inode. You probably should be using ilookup5() instead.
*
- * Note, @test is called with the inode_lock held, so can't sleep.
+ * Note2: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode;
+
+ spin_lock(&inode_hash_lock);
+ inode = find_inode(sb, head, test, data);
+ spin_unlock(&inode_hash_lock);
- return ifind(sb, head, test, data, 0);
+ return inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
@@ -1152,24 +1197,24 @@ EXPORT_SYMBOL(ilookup5_nowait);
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
*
- * ilookup5() uses ifind() to search for the inode specified by @hashval and
- * @data in the inode cache. This is a generalized version of ilookup() for
- * file systems where the inode number is not sufficient for unique
- * identification of an inode.
- *
- * If the inode is in the cache, the inode lock is waited upon and the inode is
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * and if the inode is in the cache, return the inode with an incremented
+ * reference count. Waits on I_NEW before returning the inode.
* returned with an incremented reference count.
*
- * Otherwise NULL is returned.
+ * This is a generalized version of ilookup() for file systems where the
+ * inode number is not sufficient for unique identification of an inode.
*
- * Note, @test is called with the inode_lock held, so can't sleep.
+ * Note: @test is called with the inode_hash_lock held, so can't sleep.
*/
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
- return ifind(sb, head, test, data, 1);
+ if (inode)
+ wait_on_inode(inode);
+ return inode;
}
EXPORT_SYMBOL(ilookup5);
@@ -1178,91 +1223,23 @@ EXPORT_SYMBOL(ilookup5);
* @sb: super block of file system to search
* @ino: inode number to search for
*
- * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
- * This is for file systems where the inode number is sufficient for unique
- * identification of an inode.
- *
- * If the inode is in the cache, the inode is returned with an incremented
- * reference count.
- *
- * Otherwise NULL is returned.
+ * Search for the inode @ino in the inode cache, and if the inode is in the
+ * cache, the inode is returned with an incremented reference count.
*/
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
-
- return ifind_fast(sb, head, ino);
-}
-EXPORT_SYMBOL(ilookup);
-
-/**
- * iget5_locked - obtain an inode from a mounted file system
- * @sb: super block of file system
- * @hashval: hash value (usually inode number) to get
- * @test: callback used for comparisons between inodes
- * @set: callback used to initialize a new struct inode
- * @data: opaque data pointer to pass to @test and @set
- *
- * iget5_locked() uses ifind() to search for the inode specified by @hashval
- * and @data in the inode cache and if present it is returned with an increased
- * reference count. This is a generalized version of iget_locked() for file
- * systems where the inode number is not sufficient for unique identification
- * of an inode.
- *
- * If the inode is not in cache, get_new_inode() is called to allocate a new
- * inode and this is returned locked, hashed, and with the I_NEW flag set. The
- * file system gets to fill it in before unlocking it via unlock_new_inode().
- *
- * Note both @test and @set are called with the inode_lock held, so can't sleep.
- */
-struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
- int (*test)(struct inode *, void *),
- int (*set)(struct inode *, void *), void *data)
-{
- struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
- inode = ifind(sb, head, test, data, 1);
- if (inode)
- return inode;
- /*
- * get_new_inode() will do the right thing, re-trying the search
- * in case it had to block at any point.
- */
- return get_new_inode(sb, head, test, set, data);
-}
-EXPORT_SYMBOL(iget5_locked);
-
-/**
- * iget_locked - obtain an inode from a mounted file system
- * @sb: super block of file system
- * @ino: inode number to get
- *
- * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
- * the inode cache and if present it is returned with an increased reference
- * count. This is for file systems where the inode number is sufficient for
- * unique identification of an inode.
- *
- * If the inode is not in cache, get_new_inode_fast() is called to allocate a
- * new inode and this is returned locked, hashed, and with the I_NEW flag set.
- * The file system gets to fill it in before unlocking it via
- * unlock_new_inode().
- */
-struct inode *iget_locked(struct super_block *sb, unsigned long ino)
-{
- struct hlist_head *head = inode_hashtable + hash(sb, ino);
- struct inode *inode;
+ spin_lock(&inode_hash_lock);
+ inode = find_inode_fast(sb, head, ino);
+ spin_unlock(&inode_hash_lock);
- inode = ifind_fast(sb, head, ino);
if (inode)
- return inode;
- /*
- * get_new_inode_fast() will do the right thing, re-trying the search
- * in case it had to block at any point.
- */
- return get_new_inode_fast(sb, head, ino);
+ wait_on_inode(inode);
+ return inode;
}
-EXPORT_SYMBOL(iget_locked);
+EXPORT_SYMBOL(ilookup);
int insert_inode_locked(struct inode *inode)
{
@@ -1270,27 +1247,33 @@ int insert_inode_locked(struct inode *inode)
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
- inode->i_state |= I_NEW;
while (1) {
struct hlist_node *node;
struct inode *old = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) {
if (old->i_ino != ino)
continue;
if (old->i_sb != sb)
continue;
- if (old->i_state & (I_FREEING|I_WILL_FREE))
+ spin_lock(&old->i_lock);
+ if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ spin_unlock(&old->i_lock);
continue;
+ }
break;
}
if (likely(!node)) {
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
return 0;
}
__iget(old);
- spin_unlock(&inode_lock);
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
@@ -1307,29 +1290,34 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
struct super_block *sb = inode->i_sb;
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
- inode->i_state |= I_NEW;
-
while (1) {
struct hlist_node *node;
struct inode *old = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
hlist_for_each_entry(old, node, head, i_hash) {
if (old->i_sb != sb)
continue;
if (!test(old, data))
continue;
- if (old->i_state & (I_FREEING|I_WILL_FREE))
+ spin_lock(&old->i_lock);
+ if (old->i_state & (I_FREEING|I_WILL_FREE)) {
+ spin_unlock(&old->i_lock);
continue;
+ }
break;
}
if (likely(!node)) {
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
return 0;
}
__iget(old);
- spin_unlock(&inode_lock);
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
wait_on_inode(old);
if (unlikely(!inode_unhashed(old))) {
iput(old);
@@ -1374,47 +1362,35 @@ static void iput_final(struct inode *inode)
const struct super_operations *op = inode->i_sb->s_op;
int drop;
+ WARN_ON(inode->i_state & I_NEW);
+
if (op && op->drop_inode)
drop = op->drop_inode(inode);
else
drop = generic_drop_inode(inode);
+ if (!drop && (sb->s_flags & MS_ACTIVE)) {
+ inode->i_state |= I_REFERENCED;
+ if (!(inode->i_state & (I_DIRTY|I_SYNC)))
+ inode_lru_list_add(inode);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
if (!drop) {
- if (sb->s_flags & MS_ACTIVE) {
- inode->i_state |= I_REFERENCED;
- if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
- inode_lru_list_add(inode);
- }
- spin_unlock(&inode_lock);
- return;
- }
- WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_WILL_FREE;
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
write_inode_now(inode, 1);
- spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
- __remove_inode_hash(inode);
}
- WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
-
- /*
- * Move the inode off the IO lists and LRU once I_FREEING is
- * set so that it won't get moved back on there if it is dirty.
- */
inode_lru_list_del(inode);
- list_del_init(&inode->i_wb_list);
+ spin_unlock(&inode->i_lock);
- __inode_sb_list_del(inode);
- spin_unlock(&inode_lock);
evict(inode);
- remove_inode_hash(inode);
- wake_up_inode(inode);
- BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
- destroy_inode(inode);
}
/**
@@ -1431,7 +1407,7 @@ void iput(struct inode *inode)
if (inode) {
BUG_ON(inode->i_state & I_CLEAR);
- if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
+ if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
iput_final(inode);
}
}
@@ -1610,9 +1586,8 @@ EXPORT_SYMBOL(inode_wait);
* to recheck inode state.
*
* It doesn't matter if I_NEW is not set initially, a call to
- * wake_up_inode() after removing from the hash list will DTRT.
- *
- * This is called with inode_lock held.
+ * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
+ * will DTRT.
*/
static void __wait_on_freeing_inode(struct inode *inode)
{
@@ -1620,10 +1595,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
wq = bit_waitqueue(&inode->i_state, __I_NEW);
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
schedule();
finish_wait(wq, &wait.wait);
- spin_lock(&inode_lock);
+ spin_lock(&inode_hash_lock);
}
static __initdata unsigned long ihash_entries;
@@ -1733,3 +1709,22 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
+
+/**
+ * inode_owner_or_capable - check current task permissions to inode
+ * @inode: inode being checked
+ *
+ * Return true if current either has CAP_FOWNER to the inode, or
+ * owns the file.
+ */
+bool inode_owner_or_capable(const struct inode *inode)
+{
+ struct user_namespace *ns = inode_userns(inode);
+
+ if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
+ return true;
+ if (ns_capable(ns, CAP_FOWNER))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(inode_owner_or_capable);
diff --git a/fs/internal.h b/fs/internal.h
index 8318059b42c6..b29c46e4e32f 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -125,6 +125,13 @@ extern long do_handle_open(int mountdirfd,
/*
* inode.c
*/
+extern spinlock_t inode_sb_list_lock;
+
+/*
+ * fs-writeback.c
+ */
+extern void inode_wb_list_del(struct inode *inode);
+
extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index a0f3833c0dbf..3db5ba4568fc 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
- .sync_page = block_sync_page,
.bmap = _isofs_bmap
};
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 34a4861c14b8..69b180459463 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
+#include <linux/blkdev.h>
/*
* Default IO end handler for temporary BJ_IO buffer_heads.
@@ -294,7 +295,7 @@ void journal_commit_transaction(journal_t *journal)
int first_tag = 0;
int tag_flag;
int i;
- int write_op = WRITE_SYNC;
+ struct blk_plug plug;
/*
* First job: lock down the current transaction and wait for
@@ -327,13 +328,6 @@ void journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
- /*
- * Use plugged writes here, since we want to submit several before
- * we unplug the device. We don't do explicit unplugging in here,
- * instead we rely on sync_buffer() doing the unplug for us.
- */
- if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) {
DEFINE_WAIT(wait);
@@ -368,7 +362,7 @@ void journal_commit_transaction(journal_t *journal)
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple journal_get_write_access() calls to the same
- * buffer are perfectly permissable.
+ * buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -418,8 +412,10 @@ void journal_commit_transaction(journal_t *journal)
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
+ blk_start_plug(&plug);
err = journal_submit_data_buffers(journal, commit_transaction,
- write_op);
+ WRITE_SYNC);
+ blk_finish_plug(&plug);
/*
* Wait for all previously submitted IO to complete.
@@ -480,7 +476,9 @@ void journal_commit_transaction(journal_t *journal)
err = 0;
}
- journal_write_revoke_records(journal, commit_transaction, write_op);
+ blk_start_plug(&plug);
+
+ journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
/*
* If we found any dirty or locked buffers, then we should have
@@ -650,7 +648,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(write_op, bh);
+ submit_bh(WRITE_SYNC, bh);
}
cond_resched();
@@ -661,6 +659,8 @@ start_journal_io:
}
}
+ blk_finish_plug(&plug);
+
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index eb11601f2e00..b3713afaaa9e 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -770,7 +770,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
@@ -831,7 +831,7 @@ journal_t * journal_init_inode (struct inode *inode)
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d29018307e2e..305a90763154 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -71,7 +71,7 @@
* switching hash tables under them. For operations on the lists of entries in
* the hash table j_revoke_lock is used.
*
- * Finally, also replay code uses the hash tables but at this moment noone else
+ * Finally, also replay code uses the hash tables but at this moment no one else
* can touch them (filesystem isn't mounted yet) and hence no locking is
* needed.
*/
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 5b2e4c30a2a1..60d2319651b2 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1392,7 +1392,7 @@ int journal_stop(handle_t *handle)
* by 30x or more...
*
* We try and optimize the sleep time against what the underlying disk
- * can do, instead of having a static sleep time. This is usefull for
+ * can do, instead of having a static sleep time. This is useful for
* the case where our storage is so fast that it is more optimal to go
* ahead and force a flush and wait for the transaction to be committed
* than it is to wait for an arbitrary amount of time for new writers to
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f3ad1598b201..20af62f4304b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
- ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
+ ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else
- ret = submit_bh(WRITE_SYNC_PLUG, bh);
+ ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh;
return ret;
@@ -329,7 +329,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
int tag_bytes = journal_tag_bytes(journal);
struct buffer_head *cbh = NULL; /* For transactional checksums */
__u32 crc32_sum = ~0;
- int write_op = WRITE_SYNC;
+ struct blk_plug plug;
/*
* First job: lock down the current transaction and wait for
@@ -363,13 +363,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
- /*
- * Use plugged writes here, since we want to submit several before
- * we unplug the device. We don't do explicit unplugging in here,
- * instead we rely on sync_buffer() doing the unplug for us.
- */
- if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_locked = jiffies;
@@ -410,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
- * buffer are perfectly permissable.
+ * buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -469,8 +462,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (err)
jbd2_journal_abort(journal, err);
+ blk_start_plug(&plug);
jbd2_journal_write_revoke_records(journal, commit_transaction,
- write_op);
+ WRITE_SYNC);
+ blk_finish_plug(&plug);
jbd_debug(3, "JBD: commit phase 2\n");
@@ -497,6 +492,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
err = 0;
descriptor = NULL;
bufs = 0;
+ blk_start_plug(&plug);
while (commit_transaction->t_buffers) {
/* Find the next buffer to be journaled... */
@@ -658,7 +654,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(write_op, bh);
+ submit_bh(WRITE_SYNC, bh);
}
cond_resched();
stats.run.rs_blocks_logged += bufs;
@@ -699,6 +695,8 @@ start_journal_io:
__jbd2_journal_abort_hard(journal);
}
+ blk_finish_plug(&plug);
+
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 90407b8fece7..aba8ebaec25c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -917,7 +917,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
@@ -983,7 +983,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+ printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 9ad321fd63fd..69fd93588118 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -71,7 +71,7 @@
* switching hash tables under them. For operations on the lists of entries in
* the hash table j_revoke_lock is used.
*
- * Finally, also replay code uses the hash tables but at this moment noone else
+ * Finally, also replay code uses the hash tables but at this moment no one else
* can touch them (filesystem isn't mounted yet) and hence no locking is
* needed.
*/
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 1d1191050f99..05fa77a23711 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1403,7 +1403,7 @@ int jbd2_journal_stop(handle_t *handle)
/*
* Once we drop t_updates, if it goes to zero the transaction
- * could start commiting on us and eventually disappear. So
+ * could start committing on us and eventually disappear. So
* once we do this, we must not dereference transaction
* pointer again.
*/
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
index 5d3ea4070f01..ca28964abd4b 100644
--- a/fs/jffs2/TODO
+++ b/fs/jffs2/TODO
@@ -11,7 +11,7 @@
- checkpointing (do we need this? scan is quite fast)
- make the scan code populate real inodes so read_inode just after
mount doesn't have to read the flash twice for large files.
- Make this a per-inode option, changable with chattr, so you can
+ Make this a per-inode option, changeable with chattr, so you can
decide which inodes should be in-core immediately after mount.
- test, test, test
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 95b79672150a..828a0e1ea438 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -402,7 +402,7 @@ static int jffs2_acl_setxattr(struct dentry *dentry, const char *name,
if (name[0] != '\0')
return -EINVAL;
- if (!is_owner_or_cap(dentry->d_inode))
+ if (!inode_owner_or_capable(dentry->d_inode))
return -EPERM;
if (value) {
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index fd05a0b9431d..5a001020c542 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -40,12 +40,13 @@ static z_stream inf_strm, def_strm;
static int __init alloc_workspaces(void)
{
- def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
+ MAX_MEM_LEVEL));
if (!def_strm.workspace) {
- printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize());
+ printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
return -ENOMEM;
}
- D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize()));
+ D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)));
inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (!inf_strm.workspace) {
printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index d32ee9412cb9..2ab1a0d91210 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -24,7 +24,7 @@
*
* Returns: 0 if the data CRC is correct;
* 1 - if incorrect;
- * error code if an error occured.
+ * error code if an error occurred.
*/
static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
{
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 800171dca53b..e537fb0e0184 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -121,7 +121,7 @@ int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
temp->nodetype = ri->nodetype;
temp->inode = ri->ino;
temp->version = ri->version;
- temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */
+ temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */
temp->totlen = ri->totlen;
temp->next = NULL;
@@ -139,7 +139,7 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r
temp->nodetype = rd->nodetype;
temp->totlen = rd->totlen;
- temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */
+ temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */
temp->pino = rd->pino;
temp->version = rd->version;
temp->ino = rd->ino;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 07ee1546b2fa..4515bea0268f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1116,7 +1116,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
/*
* On NAND we try to mark this block bad. If the block was erased more
- * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * than MAX_ERASE_FAILURES we mark it finally bad.
* Don't care about failures. This block remains on the erase-pending
* or badblock list as long as nobody manipulates the flash with
* a bootloader or something like that.
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 4f9cc0482949..3e93cdd19005 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -31,7 +31,7 @@
* is used to release xattr name/value pair and detach from c->xattrindex.
* reclaim_xattr_datum(c)
* is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
- * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
+ * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
* is hard coded as 32KiB.
* do_verify_xattr_datum(c, xd)
* is used to load the xdatum informations without name/value pair from the medium.
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9978803ceedc..eddbb373209e 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
.readpages = jfs_readpages,
.writepage = jfs_writepage,
.writepages = jfs_writepages,
- .sync_page = block_sync_page,
.write_begin = jfs_write_begin,
.write_end = nobh_write_end,
.bmap = jfs_bmap,
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index afe222bf300f..6f98a1866776 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -72,7 +72,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
return err;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setflags_out;
}
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index c92ea3b3ea5e..4496872cf4e7 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1649,7 +1649,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
}
/* search the tree within the dmap control page for
- * sufficent free space. if sufficient free space is found,
+ * sufficient free space. if sufficient free space is found,
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
@@ -2744,7 +2744,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
/* check which (leafno or buddy) is the left buddy.
* the left buddy gets to claim the blocks resulting
* from the join while the right gets to claim none.
- * the left buddy is also eligable to participate in
+ * the left buddy is also eligible to participate in
* a join at the next higher level while the right
* is not.
*
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5d3bbd10f8db..e5fe8506ed16 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
- * if this fails (xlen contiguous free blocks not avaliable), it'll
+ * if this fails (xlen contiguous free blocks not available), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
@@ -481,7 +481,7 @@ int extFill(struct inode *ip, xad_t * xp)
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
- * contiguous free blocks not avaliable), we'll try to allocate
+ * contiguous free blocks not available), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
@@ -575,7 +575,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* to a new set of blocks. If moving the extent, we initially
* will try to allocate disk blocks for the requested size
* (newnblks). if this fails (new contiguous free blocks not
- * avaliable), we'll try to allocate a smaller number of
+ * available), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of
* blocks rounded down to the next smaller power of 2
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 3a09423b6c22..ed53a4740168 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1069,7 +1069,7 @@ int diFree(struct inode *ip)
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
/* in preparation for removing the iag from the
- * ag extent free list, read the iags preceeding
+ * ag extent free list, read the iags preceding
* and following the iag on the ag extent free
* list.
*/
@@ -1095,7 +1095,7 @@ int diFree(struct inode *ip)
int inofreefwd = le32_to_cpu(iagp->inofreefwd);
/* in preparation for removing the iag from the
- * ag inode free list, read the iags preceeding
+ * ag inode free list, read the iags preceding
* and following the iag on the ag inode free
* list. before reading these iags, we must make
* sure that we already don't have them in hand
@@ -1681,7 +1681,7 @@ diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
* try to allocate a new extent of free inodes.
*/
if (addext) {
- /* if free space is not avaliable for this new extent, try
+ /* if free space is not available for this new extent, try
* below to allocate a free and existing (already backed)
* inode from the ag.
*/
@@ -2036,7 +2036,7 @@ static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
/* check if this is the last free inode within the iag.
* if so, it will have to be removed from the ag free
- * inode list, so get the iags preceeding and following
+ * inode list, so get the iags preceding and following
* it on the list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
@@ -2208,7 +2208,7 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
/* check if this is the last free extent within the
* iag. if so, the iag must be removed from the ag
- * free extent list, so get the iags preceeding and
+ * free extent list, so get the iags preceding and
* following the iag on this list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
@@ -2504,7 +2504,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
}
- /* get the next avaliable iag number */
+ /* get the next available iag number */
iagno = imap->im_nextiag;
/* make sure that we have not exceeded the maximum inode
@@ -2615,7 +2615,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
duplicateIXtree(sb, blkno, xlen, &xaddr);
- /* update the next avaliable iag number */
+ /* update the next available iag number */
imap->im_nextiag += 1;
/* Add the iag to the iag free list so we don't lose the iag
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 9236bc49ae7f..e38c21598850 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -288,7 +288,7 @@ struct lrd {
/*
* SYNCPT: log sync point
*
- * replay log upto syncpt address specified;
+ * replay log up to syncpt address specified;
*/
struct {
__le32 sync; /* 4: syncpt address (0 = here) */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 48b44bd8267b..6740d34cd82b 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.writepage = metapage_writepage,
- .sync_page = block_sync_page,
.releasepage = metapage_releasepage,
.invalidatepage = metapage_invalidatepage,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index d94f8d9e87d7..a78beda85f68 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -75,7 +75,7 @@ extern void grab_metapage(struct metapage *);
extern void force_metapage(struct metapage *);
/*
- * hold_metapage and put_metapage are used in conjuction. The page lock
+ * hold_metapage and put_metapage are used in conjunction. The page lock
* is not dropped between the two, so no other threads can get or release
* the metapage
*/
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 9466957ec841..f6cc0c09ec63 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -636,7 +636,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
* the inode of the page and available to all anonymous
* transactions until txCommit() time at which point
* they are transferred to the transaction tlock list of
- * the commiting transaction of the inode)
+ * the committing transaction of the inode)
*/
if (xtid == 0) {
tlck->tid = tid;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 1aba0039f1c9..8ea5efb5a34e 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -57,7 +57,7 @@
* 2. compute new FSCKSize from new LVSize;
* 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
* assert(new FSSize >= old FSSize),
- * i.e., file system must not be shrinked;
+ * i.e., file system must not be shrunk;
*/
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
@@ -182,7 +182,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
*/
newFSSize = newLVSize - newLogSize - newFSCKSize;
- /* file system cannot be shrinked */
+ /* file system cannot be shrunk */
if (newFSSize < bmp->db_mapsize) {
rc = -EINVAL;
goto out;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index eeca48a031ab..06c8a67cbe76 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -644,7 +644,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 3fa4c32272df..24838f1eeee5 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -678,7 +678,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
struct posix_acl *acl;
int rc;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
/*
diff --git a/fs/locks.c b/fs/locks.c
index 822c3d1843af..0a4f50dfadfb 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -414,17 +414,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
- switch (l->l_type) {
- case F_RDLCK:
- case F_WRLCK:
- case F_UNLCK:
- fl->fl_type = l->l_type;
- break;
- default:
- return -EINVAL;
- }
-
- return (0);
+ return assign_type(fl, l->l_type);
}
#endif
diff --git a/fs/logfs/compr.c b/fs/logfs/compr.c
index 44bbfd249abc..961f02b86d97 100644
--- a/fs/logfs/compr.c
+++ b/fs/logfs/compr.c
@@ -81,7 +81,7 @@ error:
int __init logfs_compr_init(void)
{
- size_t size = max(zlib_deflate_workspacesize(),
+ size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
stream.workspace = vmalloc(size);
if (!stream.workspace)
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 723bc5bca09a..1adc8d455f0e 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_end_io = request_complete;
submit_bio(rw, &bio);
- generic_unplug_device(bdev_get_queue(bdev));
wait_for_completion(&complete);
return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
}
len = PAGE_ALIGN(len);
__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
- generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
}
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 7466e9dcc8c5..339e17e9133d 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -60,7 +60,7 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
* asynchronous properties. So just to prevent the first implementor of such
* a thing from breaking logfs in 2350, we do the usual pointless dance to
* declare a completion variable and wait for completion before returning
- * from mtd_erase(). What an excercise in futility!
+ * from mtd_erase(). What an exercise in futility!
*/
static void logfs_erase_callback(struct erase_info *ei)
{
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index f9ddf0c388c8..9ed89d1663f8 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -92,7 +92,7 @@ static int beyond_eof(struct inode *inode, loff_t bix)
* so short names (len <= 9) don't even occupy the complete 32bit name
* space. A prime >256 ensures short names quickly spread the 32bit
* name space. Add about 26 for the estimated amount of information
- * of each character and pick a prime nearby, preferrably a bit-sparse
+ * of each character and pick a prime nearby, preferably a bit-sparse
* one.
*/
static u32 hash_32(const char *s, int len, u32 seed)
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index e86376b87af1..c2ad7028def4 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -196,7 +196,7 @@ long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_RDONLY(inode))
return -EROFS;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
err = get_user(flags, (int __user *)arg);
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 03b8c240aeda..edfea7a3a747 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -293,7 +293,7 @@ static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
-/* called with inode_lock held */
+/* called with inode->i_lock held */
static int logfs_drop_inode(struct inode *inode)
{
struct logfs_super *super = logfs_super(inode->i_sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index ee99a9f5dfd3..9e22085231b3 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1616,7 +1616,7 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
err = logfs_write_buf(inode, page, flags);
if (!err && shrink_level(gc_level) == 0) {
/* Rewrite cannot mark the inode dirty but has to
- * write it immediatly.
+ * write it immediately.
* Q: Can't we just create an alias for the inode
* instead? And if not, why not?
*/
diff --git a/fs/mbcache.c b/fs/mbcache.c
index a25444ab2baf..2f174be06555 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -542,7 +542,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
* mb_cache_entry_find_first()
*
* Find the first cache entry on a given device with a certain key in
- * an additional index. Additonal matches can be found with
+ * an additional index. Additional matches can be found with
* mb_cache_entry_find_next(). Returns NULL if no match was found. The
* returned cache entry is locked for shared access ("multiple readers").
*
diff --git a/fs/minix/Kconfig b/fs/minix/Kconfig
index 0fd7ca994264..6624684dd5de 100644
--- a/fs/minix/Kconfig
+++ b/fs/minix/Kconfig
@@ -15,3 +15,11 @@ config MINIX_FS
module will be called minix. Note that the file system of your root
partition (the one containing the directory /) cannot be compiled as
a module.
+
+config MINIX_FS_NATIVE_ENDIAN
+ def_bool MINIX_FS
+ depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
+
+config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED
+ def_bool MINIX_FS
+ depends on M68K && MMU
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index ae0b83f476a6..adcdc0a4e182 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.readpage = minix_readpage,
.writepage = minix_writepage,
- .sync_page = block_sync_page,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
.bmap = minix_bmap
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 407b1c84911e..341e2122879a 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -88,4 +88,78 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
+ defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
+
+#error Minix file system byte order broken
+
+#elif defined(CONFIG_MINIX_FS_NATIVE_ENDIAN)
+
+/*
+ * big-endian 32 or 64 bit indexed bitmaps on big-endian system or
+ * little-endian bitmaps on little-endian system
+ */
+
+#define minix_test_and_set_bit(nr, addr) \
+ __test_and_set_bit((nr), (unsigned long *)(addr))
+#define minix_set_bit(nr, addr) \
+ __set_bit((nr), (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) \
+ __test_and_clear_bit((nr), (unsigned long *)(addr))
+#define minix_test_bit(nr, addr) \
+ test_bit((nr), (unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr, size) \
+ find_first_zero_bit((unsigned long *)(addr), (size))
+
+#elif defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
+
+/*
+ * big-endian 16bit indexed bitmaps
+ */
+
+static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
+{
+ const unsigned short *p = vaddr, *addr = vaddr;
+ unsigned short num;
+
+ if (!size)
+ return 0;
+
+ size = (size >> 4) + ((size & 15) > 0);
+ while (*p++ == 0xffff) {
+ if (--size == 0)
+ return (p - addr) << 4;
+ }
+
+ num = *--p;
+ return ((p - addr) << 4) + ffz(num);
+}
+
+#define minix_test_and_set_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_set_bit(nr, addr) \
+ __set_bit((nr) ^ 16, (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
+
+static inline int minix_test_bit(int nr, const void *vaddr)
+{
+ const unsigned short *p = vaddr;
+ return (p[nr >> 4] & (1U << (nr & 15))) != 0;
+}
+
+#else
+
+/*
+ * little-endian bitmaps
+ */
+
+#define minix_test_and_set_bit __test_and_set_bit_le
+#define minix_set_bit __set_bit_le
+#define minix_test_and_clear_bit __test_and_clear_bit_le
+#define minix_test_bit test_bit_le
+#define minix_find_first_zero_bit find_first_zero_bit_le
+
+#endif
+
#endif /* FS_MINIX_H */
diff --git a/fs/mpage.c b/fs/mpage.c
index d78455a81ec9..0afc809e46e0 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -364,6 +364,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
map_bh.b_state = 0;
map_bh.b_size = 0;
@@ -385,6 +388,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
BUG_ON(!list_empty(pages));
if (bio)
mpage_bio_submit(READ, bio);
+ blk_finish_plug(&plug);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -666,8 +670,11 @@ int
mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block)
{
+ struct blk_plug plug;
int ret;
+ blk_start_plug(&plug);
+
if (!get_block)
ret = generic_writepages(mapping, wbc);
else {
@@ -682,6 +689,7 @@ mpage_writepages(struct address_space *mapping,
if (mpd.bio)
mpage_bio_submit(WRITE, mpd.bio);
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(mpage_writepages);
diff --git a/fs/namei.c b/fs/namei.c
index 5a9a6c3094da..e6cd6113872c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -70,7 +70,7 @@
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
- * the name is a symlink pointing to a non-existant name.
+ * the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
@@ -183,6 +183,9 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ if (current_user_ns() != inode_userns(inode))
+ goto other_perms;
+
if (current_fsuid() == inode->i_uid)
mode >>= 6;
else {
@@ -196,6 +199,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag
mode >>= 3;
}
+other_perms:
/*
* If the DACs are ok we don't need any capability check.
*/
@@ -237,7 +241,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
* Executable DACs are overridable if at least one exec bit is set.
*/
if (!(mask & MAY_EXEC) || execute_ok(inode))
- if (capable(CAP_DAC_OVERRIDE))
+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
/*
@@ -245,7 +249,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
- if (capable(CAP_DAC_READ_SEARCH))
+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
@@ -654,6 +658,7 @@ static inline int handle_reval_path(struct nameidata *nd)
static inline int exec_permission(struct inode *inode, unsigned int flags)
{
int ret;
+ struct user_namespace *ns = inode_userns(inode);
if (inode->i_op->permission) {
ret = inode->i_op->permission(inode, MAY_EXEC, flags);
@@ -666,7 +671,8 @@ static inline int exec_permission(struct inode *inode, unsigned int flags)
if (ret == -ECHILD)
return ret;
- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
+ if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
+ ns_capable(ns, CAP_DAC_READ_SEARCH))
goto ok;
return ret;
@@ -986,6 +992,12 @@ int follow_down_one(struct path *path)
return 0;
}
+static inline bool managed_dentry_might_block(struct dentry *dentry)
+{
+ return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
+ dentry->d_op->d_manage(dentry, true) < 0);
+}
+
/*
* Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we
* meet a managed dentry and we're not walking to "..". True is returned to
@@ -994,19 +1006,26 @@ int follow_down_one(struct path *path)
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode, bool reverse_transit)
{
- while (d_mountpoint(path->dentry)) {
+ for (;;) {
struct vfsmount *mounted;
- if (unlikely(path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) &&
- !reverse_transit &&
- path->dentry->d_op->d_manage(path->dentry, true) < 0)
+ /*
+ * Don't forget we might have a non-mountpoint managed dentry
+ * that wants to block transit.
+ */
+ *inode = path->dentry->d_inode;
+ if (!reverse_transit &&
+ unlikely(managed_dentry_might_block(path->dentry)))
return false;
+
+ if (!d_mountpoint(path->dentry))
+ break;
+
mounted = __lookup_mnt(path->mnt, path->dentry, 1);
if (!mounted)
break;
path->mnt = mounted;
path->dentry = mounted->mnt_root;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
- *inode = path->dentry->d_inode;
}
if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
@@ -1644,13 +1663,16 @@ static int path_lookupat(int dfd, const char *name,
err = -ECHILD;
}
- if (!err)
+ if (!err) {
err = handle_reval_path(nd);
+ if (err)
+ path_put(&nd->path);
+ }
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
- return -ENOTDIR;
+ err = -ENOTDIR;
}
}
@@ -1842,11 +1864,15 @@ static inline int check_sticky(struct inode *dir, struct inode *inode)
if (!(dir->i_mode & S_ISVTX))
return 0;
+ if (current_user_ns() != inode_userns(inode))
+ goto other_userns;
if (inode->i_uid == fsuid)
return 0;
if (dir->i_uid == fsuid)
return 0;
- return !capable(CAP_FOWNER);
+
+other_userns:
+ return !ns_capable(inode_userns(inode), CAP_FOWNER);
}
/*
@@ -2026,7 +2052,7 @@ static int may_open(struct path *path, int acc_mode, int flag)
}
/* O_NOATIME can only be set by the owner or superuser */
- if (flag & O_NOATIME && !is_owner_or_cap(inode))
+ if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
/*
@@ -2440,7 +2466,8 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
- if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
+ if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
+ !ns_capable(inode_userns(dir), CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
diff --git a/fs/namespace.c b/fs/namespace.c
index 9263995bf6a1..7dba2ed03429 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2701,7 +2701,7 @@ void __init mnt_init(void)
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
- printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
+ printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mount_hashtable[u]);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 00a1d1c3d3a4..0250e4ce4893 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -596,7 +596,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
/* server->priv.data = NULL; */
server->m = data;
- /* Althought anything producing this is buggy, it happens
+ /* Although anything producing this is buggy, it happens
now because of PATH_MAX changes.. */
if (server->m.time_out < 1) {
server->m.time_out = 10;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 14e0f9371d14..00ecf62ce7c1 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -241,7 +241,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
args->cbl_layout_type = ntohl(*p++);
/* Depite the spec's xdr, iomode really belongs in the FILE switch,
- * as it is unuseable and ignored with the other types.
+ * as it is unusable and ignored with the other types.
*/
iomode = ntohl(*p++);
args->cbl_layoutchanged = ntohl(*p++);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index abdf38d5971d..7237672216c8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -44,6 +44,7 @@
/* #define NFS_DEBUG_VERBOSE 1 */
static int nfs_opendir(struct inode *, struct file *);
+static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, void *, filldir_t);
static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
@@ -64,7 +65,7 @@ const struct file_operations nfs_dir_operations = {
.read = generic_read_dir,
.readdir = nfs_readdir,
.open = nfs_opendir,
- .release = nfs_release,
+ .release = nfs_closedir,
.fsync = nfs_fsync_dir,
};
@@ -133,13 +134,35 @@ const struct inode_operations nfs4_dir_inode_operations = {
#endif /* CONFIG_NFS_V4 */
+static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct rpc_cred *cred)
+{
+ struct nfs_open_dir_context *ctx;
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx != NULL) {
+ ctx->duped = 0;
+ ctx->dir_cookie = 0;
+ ctx->dup_cookie = 0;
+ ctx->cred = get_rpccred(cred);
+ } else
+ ctx = ERR_PTR(-ENOMEM);
+ return ctx;
+}
+
+static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx)
+{
+ put_rpccred(ctx->cred);
+ kfree(ctx);
+}
+
/*
* Open file
*/
static int
nfs_opendir(struct inode *inode, struct file *filp)
{
- int res;
+ int res = 0;
+ struct nfs_open_dir_context *ctx;
+ struct rpc_cred *cred;
dfprintk(FILE, "NFS: open dir(%s/%s)\n",
filp->f_path.dentry->d_parent->d_name.name,
@@ -147,8 +170,15 @@ nfs_opendir(struct inode *inode, struct file *filp)
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
- /* Call generic open code in order to cache credentials */
- res = nfs_open(inode, filp);
+ cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return PTR_ERR(cred);
+ ctx = alloc_nfs_open_dir_context(cred);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto out;
+ }
+ filp->private_data = ctx;
if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
/* This is a mountpoint, so d_revalidate will never
* have been called, so we need to refresh the
@@ -156,9 +186,18 @@ nfs_opendir(struct inode *inode, struct file *filp)
*/
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
}
+out:
+ put_rpccred(cred);
return res;
}
+static int
+nfs_closedir(struct inode *inode, struct file *filp)
+{
+ put_nfs_open_dir_context(filp->private_data);
+ return 0;
+}
+
struct nfs_cache_array_entry {
u64 cookie;
u64 ino;
@@ -284,19 +323,20 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
{
loff_t diff = desc->file->f_pos - desc->current_index;
unsigned int index;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
if (diff < 0)
goto out_eof;
if (diff >= array->size) {
if (array->eof_index >= 0)
goto out_eof;
- desc->current_index += array->size;
return -EAGAIN;
}
index = (unsigned int)diff;
*desc->dir_cookie = array->array[index].cookie;
desc->cache_entry_index = index;
+ ctx->duped = 0;
return 0;
out_eof:
desc->eof = 1;
@@ -307,10 +347,18 @@ static
int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
{
int i;
+ loff_t new_pos;
int status = -EAGAIN;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == *desc->dir_cookie) {
+ new_pos = desc->current_index + i;
+ if (new_pos < desc->file->f_pos) {
+ ctx->dup_cookie = *desc->dir_cookie;
+ ctx->duped = 1;
+ }
+ desc->file->f_pos = new_pos;
desc->cache_entry_index = i;
return 0;
}
@@ -342,6 +390,7 @@ int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
if (status == -EAGAIN) {
desc->last_cookie = array->last_cookie;
+ desc->current_index += array->size;
desc->page_index++;
}
nfs_readdir_release_array(desc->page);
@@ -354,7 +403,8 @@ static
int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
struct nfs_entry *entry, struct file *file, struct inode *inode)
{
- struct rpc_cred *cred = nfs_file_cred(file);
+ struct nfs_open_dir_context *ctx = file->private_data;
+ struct rpc_cred *cred = ctx->cred;
unsigned long timestamp, gencount;
int error;
@@ -693,6 +743,20 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
int i = 0;
int res = 0;
struct nfs_cache_array *array = NULL;
+ struct nfs_open_dir_context *ctx = file->private_data;
+
+ if (ctx->duped != 0 && ctx->dup_cookie == *desc->dir_cookie) {
+ if (printk_ratelimit()) {
+ pr_notice("NFS: directory %s/%s contains a readdir loop. "
+ "Please contact your server vendor. "
+ "Offending cookie: %llu\n",
+ file->f_dentry->d_parent->d_name.name,
+ file->f_dentry->d_name.name,
+ *desc->dir_cookie);
+ }
+ res = -ELOOP;
+ goto out;
+ }
array = nfs_readdir_get_array(desc->page);
if (IS_ERR(array)) {
@@ -785,6 +849,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *inode = dentry->d_inode;
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
int res;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
@@ -801,7 +866,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
memset(desc, 0, sizeof(*desc));
desc->file = filp;
- desc->dir_cookie = &nfs_file_open_context(filp)->dir_cookie;
+ desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = NFS_USE_READDIRPLUS(inode);
@@ -853,6 +918,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
dentry->d_parent->d_name.name,
@@ -872,7 +938,8 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
- nfs_file_open_context(filp)->dir_cookie = 0;
+ dir_ctx->dir_cookie = 0;
+ dir_ctx->duped = 0;
}
out:
mutex_unlock(&inode->i_mutex);
@@ -1068,7 +1135,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (fhandle == NULL || fattr == NULL)
goto out_error;
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1224,7 +1291,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
@@ -1562,7 +1629,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
if (dentry->d_inode)
goto out;
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_error;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index d85a534b15cd..2f093ed16980 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -301,7 +301,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
* disk, but it retrieves and clears ctx->error after synching, despite
* the two being set at the same time in nfs_context_set_write_error().
* This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occured, and hence cause it to
+ * nfs_file_write() that a write error occurred, and hence cause it to
* fall back to doing a synchronous write.
*/
static int
@@ -326,6 +326,9 @@ nfs_file_fsync(struct file *file, int datasync)
ret = xchg(&ctx->error, 0);
if (!ret && status < 0)
ret = status;
+ if (!ret && !datasync)
+ /* application has asked for meta-data sync */
+ ret = pnfs_layoutcommit_inode(inode, true);
return ret;
}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 1084792bc0fe..dcb61548887f 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -222,6 +222,10 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
goto out;
}
+ if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
inode = nfs_fhget(sb, mntfh, fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 01768e5e2c9b..57bb31ad7a5e 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -254,7 +254,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
struct inode *inode = ERR_PTR(-ENOENT);
unsigned long hash;
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
+ nfs_attr_check_mountpoint(sb, fattr);
+
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
@@ -298,8 +300,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
- if ((fattr->valid & NFS_ATTR_FATTR_FSID)
- && !nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
+ fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
inode->i_op = &nfs_referral_inode_operations;
else
@@ -639,7 +641,6 @@ struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cr
ctx->mode = f_mode;
ctx->flags = 0;
ctx->error = 0;
- ctx->dir_cookie = 0;
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
@@ -1471,6 +1472,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
nfsi->layout = NULL;
+ atomic_set(&nfsi->commits_outstanding, 0);
#endif
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 72e0bddf7a2f..ce118ce885dd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -39,6 +39,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
return 0;
}
+static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
+{
+ if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
+ fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -214,6 +220,7 @@ extern const u32 nfs41_maxwrite_overhead;
/* nfs4proc.c */
#ifdef CONFIG_NFS_V4
extern struct rpc_procinfo nfs4_procedures[];
+void nfs_fixup_secinfo_attributes(struct nfs_fattr *, struct nfs_fh *);
#endif
extern int nfs4_init_ds_session(struct nfs_client *clp);
@@ -276,11 +283,25 @@ extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
+extern void nfs_commit_free(struct nfs_write_data *p);
extern int nfs_initiate_write(struct nfs_write_data *data,
struct rpc_clnt *clnt,
const struct rpc_call_ops *call_ops,
int how);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how);
+extern void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg);
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg);
+void nfs_commit_clear_lock(struct nfs_inode *nfsi);
+void nfs_commitdata_release(void *data);
+void nfs_commit_release_pages(struct nfs_write_data *data);
+
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -296,12 +317,14 @@ extern int nfs4_init_client(struct nfs_client *clp,
rpc_authflavor_t authflavour,
int noresvport);
extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
-extern int _nfs4_call_sync(struct nfs_server *server,
+extern int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
-extern int _nfs4_call_sync_session(struct nfs_server *server,
+extern int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index bf1c68009ffd..9166fcb66da2 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <linux/sunrpc/clnt.h>
#include <linux/vfs.h>
+#include <linux/sunrpc/gss_api.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -27,7 +28,8 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(struct dentry *dentry,
struct nfs_fh *fh,
- struct nfs_fattr *fattr);
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor);
/*
* nfs_path - reconstruct the path given an arbitrary dentry
@@ -116,6 +118,102 @@ Elong:
return ERR_PTR(-ENAMETOOLONG);
}
+#ifdef CONFIG_NFS_V4
+static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors, struct inode *inode)
+{
+ struct gss_api_mech *mech;
+ struct xdr_netobj oid;
+ int i;
+ rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+ for (i = 0; i < flavors->num_flavors; i++) {
+ struct nfs4_secinfo_flavor *flavor;
+ flavor = &flavors->flavors[i];
+
+ if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+ pseudoflavor = flavor->flavor;
+ break;
+ } else if (flavor->flavor == RPC_AUTH_GSS) {
+ oid.len = flavor->gss.sec_oid4.len;
+ oid.data = flavor->gss.sec_oid4.data;
+ mech = gss_mech_get_by_OID(&oid);
+ if (!mech)
+ continue;
+ pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+ gss_mech_put(mech);
+ break;
+ }
+ }
+
+ return pseudoflavor;
+}
+
+static rpc_authflavor_t nfs_negotiate_security(const struct dentry *parent, const struct dentry *dentry)
+{
+ int status = 0;
+ struct page *page;
+ struct nfs4_secinfo_flavors *flavors;
+ int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+ rpc_authflavor_t flavor = RPC_AUTH_UNIX;
+
+ secinfo = NFS_PROTO(parent->d_inode)->secinfo;
+ if (secinfo != NULL) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ status = -ENOMEM;
+ goto out;
+ }
+ flavors = page_address(page);
+ status = secinfo(parent->d_inode, &dentry->d_name, flavors);
+ flavor = nfs_find_best_sec(flavors, dentry->d_inode);
+ put_page(page);
+ }
+
+ return flavor;
+
+out:
+ status = -ENOMEM;
+ return status;
+}
+
+static rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
+ struct dentry *dentry, struct path *path,
+ struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ rpc_authflavor_t flavor;
+ struct rpc_clnt *clone;
+ struct rpc_auth *auth;
+ int err;
+
+ flavor = nfs_negotiate_security(parent, path->dentry);
+ if (flavor < 0)
+ goto out;
+ clone = rpc_clone_client(server->client);
+ auth = rpcauth_create(flavor, clone);
+ if (!auth) {
+ flavor = -EIO;
+ goto out_shutdown;
+ }
+ err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
+ &path->dentry->d_name,
+ fh, fattr);
+ if (err < 0)
+ flavor = err;
+out_shutdown:
+ rpc_shutdown_client(clone);
+out:
+ return flavor;
+}
+#else /* CONFIG_NFS_V4 */
+static inline rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server,
+ struct dentry *parent, struct dentry *dentry,
+ struct path *path, struct nfs_fh *fh,
+ struct nfs_fattr *fattr)
+{
+ return -EPERM;
+}
+#endif /* CONFIG_NFS_V4 */
+
/*
* nfs_d_automount - Handle crossing a mountpoint on the server
* @path - The mountpoint
@@ -136,6 +234,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
int err;
+ rpc_authflavor_t flavor = 1;
dprintk("--> nfs_d_automount()\n");
@@ -153,9 +252,16 @@ struct vfsmount *nfs_d_automount(struct path *path)
/* Look it up again to get its attributes */
parent = dget_parent(path->dentry);
- err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+ err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode,
&path->dentry->d_name,
fh, fattr);
+ if (err == -EPERM) {
+ flavor = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr);
+ if (flavor < 0)
+ err = flavor;
+ else
+ err = 0;
+ }
dput(parent);
if (err != 0) {
mnt = ERR_PTR(err);
@@ -165,7 +271,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
mnt = nfs_do_refmount(path->dentry);
else
- mnt = nfs_do_submount(path->dentry, fh, fattr);
+ mnt = nfs_do_submount(path->dentry, fh, fattr, flavor);
if (IS_ERR(mnt))
goto out;
@@ -232,17 +338,20 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @dentry - parent directory
* @fh - filehandle for new root dentry
* @fattr - attributes for new root inode
+ * @authflavor - security flavor to use when performing the mount
*
*/
static struct vfsmount *nfs_do_submount(struct dentry *dentry,
struct nfs_fh *fh,
- struct nfs_fattr *fattr)
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor)
{
struct nfs_clone_mount mountdata = {
.sb = dentry->d_sb,
.dentry = dentry,
.fh = fh,
.fattr = fattr,
+ .authflavor = authflavor,
};
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
char *page = (char *) __get_free_page(GFP_USER);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d0c80d8b3f96..38053d823eb0 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -141,7 +141,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs3_diropargs arg = {
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c64be1cff080..e1c261ddd65d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -57,7 +57,8 @@ enum nfs4_session_state {
struct nfs4_minor_version_ops {
u32 minor_version;
- int (*call_sync)(struct nfs_server *server,
+ int (*call_sync)(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -262,6 +263,8 @@ extern int nfs4_proc_destroy_session(struct nfs4_session *);
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
+extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
+ bool sync);
static inline bool
is_ds_only_client(struct nfs_client *clp)
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 428558464817..6f8192f4cfc7 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -154,6 +154,23 @@ static int filelayout_read_done_cb(struct rpc_task *task,
}
/*
+ * We reference the rpc_cred of the first WRITE that triggers the need for
+ * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
+ * rfc5661 is not clear about which credential should be used.
+ */
+static void
+filelayout_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+ wdata->res.verf->committed == NFS_FILE_SYNC)
+ return;
+
+ pnfs_set_layoutcommit(wdata);
+ dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
+ (unsigned long) wdata->lseg->pls_end_pos);
+}
+
+/*
* Call ops for the async read/write cases
* In the case of dense layouts, the offset needs to be reset to its
* original value.
@@ -210,6 +227,38 @@ static int filelayout_write_done_cb(struct rpc_task *task,
return -EAGAIN;
}
+ filelayout_set_layoutcommit(data);
+ return 0;
+}
+
+/* Fake up some data that will cause nfs_commit_release to retry the writes. */
+static void prepare_to_resend_writes(struct nfs_write_data *data)
+{
+ struct nfs_page *first = nfs_list_entry(data->pages.next);
+
+ data->task.tk_status = 0;
+ memcpy(data->verf.verifier, first->wb_verf.verifier,
+ sizeof(first->wb_verf.verifier));
+ data->verf.verifier[0]++; /* ensure verifier mismatch */
+}
+
+static int filelayout_commit_done_cb(struct rpc_task *task,
+ struct nfs_write_data *data)
+{
+ int reset = 0;
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ prepare_to_resend_writes(data);
+ filelayout_set_lo_fail(data->lseg);
+ } else
+ nfs_restart_rpc(task, data->ds_clp);
+ return -EAGAIN;
+ }
+
return 0;
}
@@ -240,6 +289,16 @@ static void filelayout_write_release(void *data)
wdata->mds_ops->rpc_release(data);
}
+static void filelayout_commit_release(void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ nfs_commit_release_pages(wdata);
+ if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
+ nfs_commit_clear_lock(NFS_I(wdata->inode));
+ nfs_commitdata_release(wdata);
+}
+
struct rpc_call_ops filelayout_read_call_ops = {
.rpc_call_prepare = filelayout_read_prepare,
.rpc_call_done = filelayout_read_call_done,
@@ -252,6 +311,12 @@ struct rpc_call_ops filelayout_write_call_ops = {
.rpc_release = filelayout_write_release,
};
+struct rpc_call_ops filelayout_commit_call_ops = {
+ .rpc_call_prepare = filelayout_write_prepare,
+ .rpc_call_done = filelayout_write_call_done,
+ .rpc_release = filelayout_commit_release,
+};
+
static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_read_data *data)
{
@@ -320,10 +385,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
data->inode->i_ino, sync, (size_t) data->args.count, offset,
ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
- /* We can't handle commit to ds yet */
- if (!FILELAYOUT_LSEG(lseg)->commit_through_mds)
- data->args.stable = NFS_FILE_SYNC;
-
data->write_done_cb = filelayout_write_done_cb;
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
@@ -441,12 +502,33 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
struct nfs4_layoutget_res *lgr,
struct nfs4_deviceid *id)
{
- uint32_t *p = (uint32_t *)lgr->layout.buf;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = lgr->layoutp->pages,
+ .page_len = lgr->layoutp->len,
+ .buflen = lgr->layoutp->len,
+ .len = lgr->layoutp->len,
+ };
+ struct page *scratch;
+ __be32 *p;
uint32_t nfl_util;
int i;
dprintk("%s: set_layout_map Begin\n", __func__);
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
+ * num_fh (4) */
+ p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
+ if (unlikely(!p))
+ goto out_err;
+
memcpy(id, p, sizeof(*id));
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
print_deviceid(id);
@@ -468,32 +550,57 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
__func__, nfl_util, fl->num_fh, fl->first_stripe_index,
fl->pattern_offset);
+ if (!fl->num_fh)
+ goto out_err;
+
fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
GFP_KERNEL);
if (!fl->fh_array)
- return -ENOMEM;
+ goto out_err;
for (i = 0; i < fl->num_fh; i++) {
/* Do we want to use a mempool here? */
fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
- if (!fl->fh_array[i]) {
- filelayout_free_fh_array(fl);
- return -ENOMEM;
- }
+ if (!fl->fh_array[i])
+ goto out_err_free;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free;
fl->fh_array[i]->size = be32_to_cpup(p++);
if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
printk(KERN_ERR "Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
- filelayout_free_fh_array(fl);
- return -EIO;
+ goto out_err_free;
}
+
+ p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
+ if (unlikely(!p))
+ goto out_err_free;
memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
- p += XDR_QUADLEN(fl->fh_array[i]->size);
dprintk("DEBUG: %s: fh len %d\n", __func__,
fl->fh_array[i]->size);
}
+ __free_page(scratch);
return 0;
+
+out_err_free:
+ filelayout_free_fh_array(fl);
+out_err:
+ __free_page(scratch);
+ return -EIO;
+}
+
+static void
+filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+
+ dprintk("--> %s\n", __func__);
+ nfs4_fl_put_deviceid(fl->dsaddr);
+ kfree(fl->commit_buckets);
+ _filelayout_free_lseg(fl);
}
static struct pnfs_layout_segment *
@@ -514,17 +621,28 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
_filelayout_free_lseg(fl);
return NULL;
}
- return &fl->generic_hdr;
-}
-static void
-filelayout_free_lseg(struct pnfs_layout_segment *lseg)
-{
- struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
-
- dprintk("--> %s\n", __func__);
- nfs4_fl_put_deviceid(fl->dsaddr);
- _filelayout_free_lseg(fl);
+ /* This assumes there is only one IOMODE_RW lseg. What
+ * we really want to do is have a layout_hdr level
+ * dictionary of <multipath_list4, fh> keys, each
+ * associated with a struct list_head, populated by calls
+ * to filelayout_write_pagelist().
+ * */
+ if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
+ int i;
+ int size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL);
+ if (!fl->commit_buckets) {
+ filelayout_free_lseg(&fl->generic_hdr);
+ return NULL;
+ }
+ fl->number_of_buckets = size;
+ for (i = 0; i < size; i++)
+ INIT_LIST_HEAD(&fl->commit_buckets[i]);
+ }
+ return &fl->generic_hdr;
}
/*
@@ -552,6 +670,191 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
return (p_stripe == r_stripe);
}
+static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg)
+{
+ return !FILELAYOUT_LSEG(lseg)->commit_through_mds;
+}
+
+static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
+{
+ if (fl->stripe_type == STRIPE_SPARSE)
+ return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
+ else
+ return j;
+}
+
+struct list_head *filelayout_choose_commit_list(struct nfs_page *req)
+{
+ struct pnfs_layout_segment *lseg = req->wb_commit_lseg;
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ u32 i, j;
+ struct list_head *list;
+
+ /* Note that we are calling nfs4_fl_calc_j_index on each page
+ * that ends up being committed to a data server. An attractive
+ * alternative is to add a field to nfs_write_data and nfs_page
+ * to store the value calculated in filelayout_write_pagelist
+ * and just use that here.
+ */
+ j = nfs4_fl_calc_j_index(lseg,
+ (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+ i = select_bucket_index(fl, j);
+ list = &fl->commit_buckets[i];
+ if (list_empty(list)) {
+ /* Non-empty buckets hold a reference on the lseg */
+ get_lseg(lseg);
+ }
+ return list;
+}
+
+static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE)
+ return i;
+ else
+ return nfs4_fl_calc_ds_index(lseg, i);
+}
+
+static struct nfs_fh *
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE) {
+ if (flseg->num_fh == 1)
+ i = 0;
+ else if (flseg->num_fh == 0)
+ /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
+ return NULL;
+ }
+ return flseg->fh_array[i];
+}
+
+static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ u32 idx;
+ struct nfs_fh *fh;
+
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ prepare_to_resend_writes(data);
+ data->mds_ops->rpc_release(data);
+ return -EAGAIN;
+ }
+ dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
+ data->write_done_cb = filelayout_commit_done_cb;
+ data->ds_clp = ds->ds_clp;
+ fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ if (fh)
+ data->args.fh = fh;
+ return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_commit_call_ops, how);
+}
+
+/*
+ * This is only useful while we are using whole file layouts.
+ */
+static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
+{
+ struct pnfs_layout_segment *lseg, *rv = NULL;
+
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ rv = get_lseg(lseg);
+ spin_unlock(&inode->i_lock);
+ return rv;
+}
+
+static int alloc_ds_commits(struct inode *inode, struct list_head *list)
+{
+ struct pnfs_layout_segment *lseg;
+ struct nfs4_filelayout_segment *fl;
+ struct nfs_write_data *data;
+ int i, j;
+
+ /* Won't need this when non-whole file layout segments are supported
+ * instead we will use a pnfs_layout_hdr structure */
+ lseg = find_only_write_lseg(inode);
+ if (!lseg)
+ return 0;
+ fl = FILELAYOUT_LSEG(lseg);
+ for (i = 0; i < fl->number_of_buckets; i++) {
+ if (list_empty(&fl->commit_buckets[i]))
+ continue;
+ data = nfs_commitdata_alloc();
+ if (!data)
+ goto out_bad;
+ data->ds_commit_index = i;
+ data->lseg = lseg;
+ list_add(&data->pages, list);
+ }
+ put_lseg(lseg);
+ return 0;
+
+out_bad:
+ for (j = i; j < fl->number_of_buckets; j++) {
+ if (list_empty(&fl->commit_buckets[i]))
+ continue;
+ nfs_retry_commit(&fl->commit_buckets[i], lseg);
+ put_lseg(lseg); /* associated with emptying bucket */
+ }
+ put_lseg(lseg);
+ /* Caller will clean up entries put on list */
+ return -ENOMEM;
+}
+
+/* This follows nfs_commit_list pretty closely */
+static int
+filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how)
+{
+ struct nfs_write_data *data, *tmp;
+ LIST_HEAD(list);
+
+ if (!list_empty(mds_pages)) {
+ data = nfs_commitdata_alloc();
+ if (!data)
+ goto out_bad;
+ data->lseg = NULL;
+ list_add(&data->pages, &list);
+ }
+
+ if (alloc_ds_commits(inode, &list))
+ goto out_bad;
+
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ list_del_init(&data->pages);
+ atomic_inc(&NFS_I(inode)->commits_outstanding);
+ if (!data->lseg) {
+ nfs_init_commit(data, mds_pages, NULL);
+ nfs_initiate_commit(data, NFS_CLIENT(inode),
+ data->mds_ops, how);
+ } else {
+ nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index], data->lseg);
+ filelayout_initiate_commit(data, how);
+ }
+ }
+ return 0;
+ out_bad:
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ nfs_retry_commit(&data->pages, data->lseg);
+ list_del_init(&data->pages);
+ nfs_commit_free(data);
+ }
+ nfs_retry_commit(mds_pages, NULL);
+ nfs_commit_clear_lock(NFS_I(inode));
+ return -ENOMEM;
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
@@ -559,6 +862,9 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_test = filelayout_pg_test,
+ .mark_pnfs_commit = filelayout_mark_pnfs_commit,
+ .choose_commit_list = filelayout_choose_commit_list,
+ .commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
};
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index ee0c907742b5..7c44579f5832 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,7 +33,7 @@
#include "pnfs.h"
/*
- * Field testing shows we need to support upto 4096 stripe indices.
+ * Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
* RFC 5661 multipath_list4 structures.
@@ -79,6 +79,8 @@ struct nfs4_filelayout_segment {
struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
unsigned int num_fh;
struct nfs_fh **fh_array;
+ struct list_head *commit_buckets; /* Sort commits to ds */
+ int number_of_buckets;
};
static inline struct nfs4_filelayout_segment *
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 68143c162e3b..de5350f2b249 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -261,7 +261,7 @@ out:
* Currently only support ipv4, and one multi-path address.
*/
static struct nfs4_pnfs_ds *
-decode_and_add_ds(__be32 **pp, struct inode *inode)
+decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
{
struct nfs4_pnfs_ds *ds = NULL;
char *buf;
@@ -269,25 +269,34 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
u32 ip_addr, port;
int nlen, rlen, i;
int tmp[2];
- __be32 *r_netid, *r_addr, *p = *pp;
+ __be32 *p;
/* r_netid */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_err;
nlen = be32_to_cpup(p++);
- r_netid = p;
- p += XDR_QUADLEN(nlen);
- /* r_addr */
- rlen = be32_to_cpup(p++);
- r_addr = p;
- p += XDR_QUADLEN(rlen);
- *pp = p;
+ p = xdr_inline_decode(streamp, nlen);
+ if (unlikely(!p))
+ goto out_err;
/* Check that netid is "tcp" */
- if (nlen != 3 || memcmp((char *)r_netid, "tcp", 3)) {
+ if (nlen != 3 || memcmp((char *)p, "tcp", 3)) {
dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
goto out_err;
}
+ /* r_addr */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_err;
+ rlen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(streamp, rlen);
+ if (unlikely(!p))
+ goto out_err;
+
/* ipv6 length plus port is legal */
if (rlen > INET6_ADDRSTRLEN + 8) {
dprintk("%s: Invalid address, length %d\n", __func__,
@@ -300,7 +309,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
goto out_err;
}
buf[rlen] = '\0';
- memcpy(buf, r_addr, rlen);
+ memcpy(buf, p, rlen);
/* replace the port dots with dashes for the in4_pton() delimiter*/
for (i = 0; i < 2; i++) {
@@ -336,90 +345,154 @@ out_err:
static struct nfs4_file_layout_dsaddr*
decode_device(struct inode *ino, struct pnfs_device *pdev)
{
- int i, dummy;
+ int i;
u32 cnt, num;
u8 *indexp;
- __be32 *p = (__be32 *)pdev->area, *indicesp;
- struct nfs4_file_layout_dsaddr *dsaddr;
+ __be32 *p;
+ u8 *stripe_indices;
+ u8 max_stripe_index;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = pdev->pages,
+ .page_len = pdev->pglen,
+ .buflen = pdev->pglen,
+ .len = pdev->pglen,
+ };
+ struct page *scratch;
+
+ /* set up xdr stream */
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ goto out_err;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
/* Get the stripe count (number of stripe index) */
- cnt = be32_to_cpup(p++);
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_scratch;
+
+ cnt = be32_to_cpup(p);
dprintk("%s stripe count %d\n", __func__, cnt);
if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
printk(KERN_WARNING "%s: stripe count %d greater than "
"supported maximum %d\n", __func__,
cnt, NFS4_PNFS_MAX_STRIPE_CNT);
- goto out_err;
+ goto out_err_free_scratch;
+ }
+
+ /* read stripe indices */
+ stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL);
+ if (!stripe_indices)
+ goto out_err_free_scratch;
+
+ p = xdr_inline_decode(&stream, cnt << 2);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ indexp = &stripe_indices[0];
+ max_stripe_index = 0;
+ for (i = 0; i < cnt; i++) {
+ *indexp = be32_to_cpup(p++);
+ max_stripe_index = max(max_stripe_index, *indexp);
+ indexp++;
}
/* Check the multipath list count */
- indicesp = p;
- p += XDR_QUADLEN(cnt << 2);
- num = be32_to_cpup(p++);
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ num = be32_to_cpup(p);
dprintk("%s ds_num %u\n", __func__, num);
if (num > NFS4_PNFS_MAX_MULTI_CNT) {
printk(KERN_WARNING "%s: multipath count %d greater than "
"supported maximum %d\n", __func__,
num, NFS4_PNFS_MAX_MULTI_CNT);
- goto out_err;
+ goto out_err_free_stripe_indices;
}
+
+ /* validate stripe indices are all < num */
+ if (max_stripe_index >= num) {
+ printk(KERN_WARNING "%s: stripe index %u >= num ds %u\n",
+ __func__, max_stripe_index, num);
+ goto out_err_free_stripe_indices;
+ }
+
dsaddr = kzalloc(sizeof(*dsaddr) +
(sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
GFP_KERNEL);
if (!dsaddr)
- goto out_err;
-
- dsaddr->stripe_indices = kzalloc(sizeof(u8) * cnt, GFP_KERNEL);
- if (!dsaddr->stripe_indices)
- goto out_err_free;
+ goto out_err_free_stripe_indices;
dsaddr->stripe_count = cnt;
+ dsaddr->stripe_indices = stripe_indices;
+ stripe_indices = NULL;
dsaddr->ds_num = num;
memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id));
- /* Go back an read stripe indices */
- p = indicesp;
- indexp = &dsaddr->stripe_indices[0];
- for (i = 0; i < dsaddr->stripe_count; i++) {
- *indexp = be32_to_cpup(p++);
- if (*indexp >= num)
- goto out_err_free;
- indexp++;
- }
- /* Skip already read multipath list count */
- p++;
-
for (i = 0; i < dsaddr->ds_num; i++) {
int j;
+ u32 mp_count;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
- dummy = be32_to_cpup(p++); /* multipath count */
- if (dummy > 1) {
+ mp_count = be32_to_cpup(p); /* multipath count */
+ if (mp_count > 1) {
printk(KERN_WARNING
"%s: Multipath count %d not supported, "
"skipping all greater than 1\n", __func__,
- dummy);
+ mp_count);
}
- for (j = 0; j < dummy; j++) {
+ for (j = 0; j < mp_count; j++) {
if (j == 0) {
- dsaddr->ds_list[i] = decode_and_add_ds(&p, ino);
+ dsaddr->ds_list[i] = decode_and_add_ds(&stream,
+ ino);
if (dsaddr->ds_list[i] == NULL)
- goto out_err_free;
+ goto out_err_free_deviceid;
} else {
u32 len;
/* skip extra multipath */
- len = be32_to_cpup(p++);
- p += XDR_QUADLEN(len);
- len = be32_to_cpup(p++);
- p += XDR_QUADLEN(len);
- continue;
+
+ /* read len, skip */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+ len = be32_to_cpup(p);
+
+ p = xdr_inline_decode(&stream, len);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+
+ /* read len, skip */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+ len = be32_to_cpup(p);
+
+ p = xdr_inline_decode(&stream, len);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
}
}
}
+
+ __free_page(scratch);
return dsaddr;
-out_err_free:
+out_err_free_deviceid:
nfs4_fl_free_deviceid(dsaddr);
+ /* stripe_indicies was part of dsaddr */
+ goto out_err_free_scratch;
+out_err_free_stripe_indices:
+ kfree(stripe_indices);
+out_err_free_scratch:
+ __free_page(scratch);
out_err:
dprintk("%s ERROR: returning NULL\n", __func__);
return NULL;
@@ -498,11 +571,6 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
goto out_free;
}
- /* set pdev->area */
- pdev->area = vmap(pages, max_pages, VM_MAP, PAGE_KERNEL);
- if (!pdev->area)
- goto out_free;
-
memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
pdev->layout_type = LAYOUT_NFSV4_1_FILES;
pdev->pages = pages;
@@ -521,8 +589,6 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
*/
dsaddr = decode_and_add_device(inode, pdev);
out_free:
- if (pdev->area != NULL)
- vunmap(pdev->area);
for (i = 0; i < max_pages; i++)
__free_page(pages[i]);
kfree(pages);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1d84e7088af9..dfd1e6d7e6c3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -71,7 +72,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
-static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@@ -85,6 +88,8 @@ static int nfs4_map_errors(int err)
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
+ case -NFS4ERR_WRONGSEC:
+ return -EPERM;
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
@@ -657,7 +662,8 @@ struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
-static int nfs4_call_sync_sequence(struct nfs_server *server,
+static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -673,7 +679,7 @@ static int nfs4_call_sync_sequence(struct nfs_server *server,
.cache_reply = cache_reply,
};
struct rpc_task_setup task_setup = {
- .rpc_client = server->client,
+ .rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
@@ -692,13 +698,14 @@ static int nfs4_call_sync_sequence(struct nfs_server *server,
return ret;
}
-int _nfs4_call_sync_session(struct nfs_server *server,
+int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
- return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
+ return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
}
#else
@@ -709,19 +716,28 @@ static int nfs4_sequence_done(struct rpc_task *task,
}
#endif /* CONFIG_NFS_V4_1 */
-int _nfs4_call_sync(struct nfs_server *server,
+int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
args->sa_session = res->sr_session = NULL;
- return rpc_call_sync(server->client, msg, 0);
+ return rpc_call_sync(clnt, msg, 0);
}
-#define nfs4_call_sync(server, msg, args, res, cache_reply) \
- (server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
- &(res)->seq_res, (cache_reply))
+static inline
+int nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
+ args, res, cache_reply);
+}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
@@ -1831,7 +1847,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
@@ -2090,7 +2106,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
};
int status;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
@@ -2160,7 +2176,7 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(info->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -2176,15 +2192,43 @@ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
return err;
}
+static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info, rpc_authflavor_t flavor)
+{
+ struct rpc_auth *auth;
+ int ret;
+
+ auth = rpcauth_create(flavor, server->client);
+ if (!auth) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = nfs4_lookup_root(server, fhandle, info);
+ if (ret < 0)
+ ret = -EAGAIN;
+out:
+ return ret;
+}
+
/*
* get the file handle for the "/" directory on the server
*/
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
- int status;
+ int i, len, status = 0;
+ rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS + 2];
+
+ flav_array[0] = RPC_AUTH_UNIX;
+ len = gss_mech_list_pseudoflavors(&flav_array[1]);
+ flav_array[1+len] = RPC_AUTH_NULL;
+ len += 2;
- status = nfs4_lookup_root(server, fhandle, info);
+ for (i = 0; i < len; i++) {
+ status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
+ if (status == 0)
+ break;
+ }
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
if (status == 0)
@@ -2249,7 +2293,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
@@ -2309,9 +2353,9 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return status;
}
-static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh,
- const struct qstr *name, struct nfs_fh *fhandle,
- struct nfs_fattr *fattr)
+static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
+ const struct nfs_fh *dirfh, const struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
struct nfs4_lookup_arg args = {
@@ -2333,7 +2377,7 @@ static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *d
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
@@ -2345,7 +2389,7 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
struct nfs4_exception exception = { };
int err;
do {
- err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
+ err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
/* FIXME: !!!! */
if (err == -NFS4ERR_MOVED) {
err = -EREMOTE;
@@ -2356,27 +2400,41 @@ static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
return err;
}
-static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
- struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
{
int status;
dprintk("NFS call lookup %s\n", name->name);
- status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
+ status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
if (status == -NFS4ERR_MOVED)
status = nfs4_get_referral(dir, name, fattr, fhandle);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
-static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
+{
+ memset(fh, 0, sizeof(struct nfs_fh));
+ fattr->fsid.major = 1;
+ fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
+ NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
+ fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ fattr->nlink = 2;
+}
+
+static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
- _nfs4_proc_lookup(dir, name, fhandle, fattr),
+ _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
&exception);
+ if (err == -EPERM)
+ nfs_fixup_secinfo_attributes(fattr, fhandle);
} while (exception.retry);
return err;
}
@@ -2421,7 +2479,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
if (res.fattr == NULL)
return -ENOMEM;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
@@ -2488,7 +2546,7 @@ static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
.rpc_resp = &res,
};
- return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
+ return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
@@ -2577,7 +2635,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
if (res.dir_attr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &args, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2678,7 +2736,7 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
@@ -2729,7 +2787,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
if (res.fattr == NULL || res.dir_attr == NULL)
goto out;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2792,8 +2850,8 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
- int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
- &data->arg, &data->res, 1);
+ int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
@@ -2905,7 +2963,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
- status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
@@ -2997,7 +3055,7 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(fsstat->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
@@ -3028,7 +3086,7 @@ static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
@@ -3073,7 +3131,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
}
nfs_fattr_init(pathconf->fattr);
- return nfs4_call_sync(server, &msg, &args, &res, 0);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -3195,12 +3253,9 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
}
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
-
- if (!nfs4_sequence_done(task, &data->res.seq_res))
- return -EAGAIN;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
@@ -3210,11 +3265,24 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+ return data->write_done_cb(task, data);
+}
+
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
-
- data->args.bitmask = server->cache_consistency_bitmask;
+
+ if (data->lseg) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ } else
+ data->args.bitmask = server->cache_consistency_bitmask;
+ if (!data->write_done_cb)
+ data->write_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
}
@@ -3452,7 +3520,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
- ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
if (res.acl_len > args.acl_len)
@@ -3527,7 +3595,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
if (i < 0)
return i;
nfs_inode_return_delegation(inode);
- ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
* Free each page after tx, so the only ref left is
@@ -3890,7 +3958,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
arg.lock_owner.s_dev = server->s_dev;
- status = nfs4_call_sync(server, &msg, &arg, &res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
@@ -4618,12 +4686,46 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
+static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
+{
+ int status;
+ struct nfs4_secinfo_arg args = {
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ };
+ struct nfs4_secinfo_res res = {
+ .flavors = flavors,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ dprintk("NFS call secinfo %s\n", name->name);
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("NFS reply secinfo: %d\n", status);
+ return status;
+}
+
+int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_secinfo(dir, name, flavors),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
#ifdef CONFIG_NFS_V4_1
/*
* Check the exchange flags returned by the server for invalid flags, having
@@ -5516,8 +5618,6 @@ static void nfs4_layoutget_release(void *calldata)
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
- if (lgp->res.layout.buf != NULL)
- free_page((unsigned long) lgp->res.layout.buf);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -5549,12 +5649,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
dprintk("--> %s\n", __func__);
- lgp->res.layout.buf = (void *)__get_free_page(GFP_NOFS);
- if (lgp->res.layout.buf == NULL) {
- nfs4_layoutget_release(lgp);
- return -ENOMEM;
- }
-
+ lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -5586,7 +5681,7 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
int status;
dprintk("--> %s\n", __func__);
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@@ -5606,6 +5701,100 @@ int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
+static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (nfs4_setup_sequence(server, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void
+nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ switch (task->tk_status) { /* Just ignore these failures */
+ case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
+ case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
+ case NFS4ERR_BADLAYOUT: /* no layout */
+ case NFS4ERR_GRACE: /* loca_recalim always false */
+ task->tk_status = 0;
+ }
+
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ nfs_restart_rpc(task, server->nfs_client);
+ return;
+ }
+
+ if (task->tk_status == 0)
+ nfs_post_op_update_inode_force_wcc(data->args.inode,
+ data->res.fattr);
+}
+
+static void nfs4_layoutcommit_release(void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+
+ /* Matched by references in pnfs_set_layoutcommit */
+ put_lseg(data->lseg);
+ put_rpccred(data->cred);
+ kfree(data);
+}
+
+static const struct rpc_call_ops nfs4_layoutcommit_ops = {
+ .rpc_call_prepare = nfs4_layoutcommit_prepare,
+ .rpc_call_done = nfs4_layoutcommit_done,
+ .rpc_release = nfs4_layoutcommit_release,
+};
+
+int
+nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = NFS_CLIENT(data->args.inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutcommit_ops,
+ .callback_data = data,
+ .flags = RPC_TASK_ASYNC,
+ };
+ struct rpc_task *task;
+ int status = 0;
+
+ dprintk("NFS: %4d initiating layoutcommit call. sync %d "
+ "lbw: %llu inode %lu\n",
+ data->task.tk_pid, sync,
+ data->args.lastbytewritten,
+ data->args.inode->i_ino);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (sync == false)
+ goto out;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = task->tk_status;
+out:
+ dprintk("%s: status %d\n", __func__, status);
+ rpc_put_task(task);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -5741,6 +5930,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
+ .secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ab1bf5bb021f..a6804f704d9d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -590,7 +590,8 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
- state->inode = igrab(inode);
+ ihold(inode);
+ state->inode = inode;
spin_unlock(&inode->i_lock);
/* Note: The reclaim code dictates that we add stateless
* and read-only stateids to the end of the list */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 0cf560f77884..dddfb5795d7b 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -46,6 +46,7 @@
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -112,7 +113,7 @@ static int nfs4_stat_to_errno(int);
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (encode_getattr_maxsz)
-#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11)
+#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 15)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
#define encode_setclientid_maxsz \
@@ -253,6 +254,8 @@ static int nfs4_stat_to_errno(int);
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
+#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
+#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)))
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -324,6 +327,18 @@ static int nfs4_stat_to_errno(int);
#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
decode_stateid_maxsz + \
XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
+#define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \
+ 2 /* offset */ + \
+ 2 /* length */ + \
+ 1 /* reclaim */ + \
+ encode_stateid_maxsz + \
+ 1 /* new offset (true) */ + \
+ 2 /* last byte written */ + \
+ 1 /* nt_timechanged (false) */ + \
+ 1 /* layoutupdate4 layout type */ + \
+ 1 /* NULL filelayout layoutupdate4 payload */)
+#define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
+
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -676,6 +691,14 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
+#define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_secinfo_maxsz)
+#define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_secinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_exchange_id_sz \
(compound_encode_hdr_maxsz + \
@@ -727,6 +750,17 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutget_maxsz)
+#define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_putfh_maxsz + \
+ encode_layoutcommit_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutcommit_maxsz + \
+ decode_getattr_maxsz)
+
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
@@ -1620,6 +1654,18 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
hdr->replen += decode_delegreturn_maxsz;
}
+static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ int len = name->len;
+ __be32 *p;
+
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_SECINFO);
+ xdr_encode_opaque(p, name->name, len);
+ hdr->nops++;
+ hdr->replen += decode_secinfo_maxsz;
+}
+
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
static void encode_exchange_id(struct xdr_stream *xdr,
@@ -1816,6 +1862,34 @@ encode_layoutget(struct xdr_stream *xdr,
hdr->nops++;
hdr->replen += decode_layoutget_maxsz;
}
+
+static int
+encode_layoutcommit(struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten,
+ NFS_SERVER(args->inode)->pnfs_curr_ld->id);
+
+ p = reserve_space(xdr, 48 + NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_LAYOUTCOMMIT);
+ /* Only whole file layouts */
+ p = xdr_encode_hyper(p, 0); /* offset */
+ p = xdr_encode_hyper(p, NFS4_MAX_UINT64); /* length */
+ *p++ = cpu_to_be32(0); /* reclaim */
+ p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(1); /* newoffset = TRUE */
+ p = xdr_encode_hyper(p, args->lastbytewritten);
+ *p++ = cpu_to_be32(0); /* Never send time_modify_changed */
+ *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
+ *p++ = cpu_to_be32(0); /* no file layout payload */
+
+ hdr->nops++;
+ hdr->replen += decode_layoutcommit_maxsz;
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -2294,7 +2368,8 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_commit(xdr, args, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2465,6 +2540,24 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode SECINFO request
+ */
+static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_secinfo(xdr, args->name, &hdr);
+ encode_nops(&hdr);
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* EXCHANGE_ID request
@@ -2604,8 +2697,32 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutget(xdr, args, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
+ args->layout.pages, 0, args->layout.pglen);
+
encode_nops(&hdr);
}
+
+/*
+ * Encode LAYOUTCOMMIT request
+ */
+static int nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutcommit(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -2925,6 +3042,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
if (unlikely(!p))
goto out_overflow;
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
+ return -be32_to_cpup(p);
}
return 0;
out_overflow:
@@ -3912,6 +4030,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
fattr->valid |= status;
status = decode_attr_error(xdr, bitmap);
+ if (status == -NFS4ERR_WRONGSEC) {
+ nfs_fixup_secinfo_attributes(fattr, fh);
+ status = 0;
+ }
if (status < 0)
goto xdr_error;
@@ -4680,6 +4802,73 @@ static int decode_delegreturn(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
+static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.sec_oid4.len = be32_to_cpup(p);
+ if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
+ goto out_err;
+
+ p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.qop4 = be32_to_cpup(p++);
+ flavor->gss.service = be32_to_cpup(p);
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+out_err:
+ return -EINVAL;
+}
+
+static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+ struct nfs4_secinfo_flavor *sec_flavor;
+ int status;
+ __be32 *p;
+ int i;
+
+ status = decode_op_hdr(xdr, OP_SECINFO);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->flavors->num_flavors = be32_to_cpup(p);
+
+ for (i = 0; i < res->flavors->num_flavors; i++) {
+ sec_flavor = &res->flavors->flavors[i];
+ if ((char *)&sec_flavor[1] - (char *)res > PAGE_SIZE)
+ break;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sec_flavor->flavor = be32_to_cpup(p);
+
+ if (sec_flavor->flavor == RPC_AUTH_GSS) {
+ if (decode_secinfo_gss(xdr, sec_flavor))
+ break;
+ }
+ }
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
#if defined(CONFIG_NFS_V4_1)
static int decode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_res *res)
@@ -4950,6 +5139,9 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
__be32 *p;
int status;
u32 layout_count;
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct kvec *iov = rcvbuf->head;
+ u32 hdrlen, recvd;
status = decode_op_hdr(xdr, OP_LAYOUTGET);
if (status)
@@ -4966,17 +5158,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
return -EINVAL;
}
- p = xdr_inline_decode(xdr, 24);
+ p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &res->range.offset);
p = xdr_decode_hyper(p, &res->range.length);
res->range.iomode = be32_to_cpup(p++);
res->type = be32_to_cpup(p++);
-
- status = decode_opaque_inline(xdr, &res->layout.len, (char **)&p);
- if (unlikely(status))
- return status;
+ res->layoutp->len = be32_to_cpup(p);
dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
__func__,
@@ -4984,12 +5173,18 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
(unsigned long)res->range.length,
res->range.iomode,
res->type,
- res->layout.len);
+ res->layoutp->len);
- /* nfs4_proc_layoutget allocated a single page */
- if (res->layout.len > PAGE_SIZE)
- return -ENOMEM;
- memcpy(res->layout.buf, p, res->layout.len);
+ hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (res->layoutp->len > recvd) {
+ dprintk("NFS: server cheating in layoutget reply: "
+ "layout len %u > recvd %u\n",
+ res->layoutp->len, recvd);
+ return -EINVAL;
+ }
+
+ xdr_read_pages(xdr, res->layoutp->len);
if (layout_count > 1) {
/* We only handle a length one array at the moment. Any
@@ -5006,6 +5201,35 @@ out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
+
+static int decode_layoutcommit(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct nfs4_layoutcommit_res *res)
+{
+ __be32 *p;
+ __u32 sizechanged;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sizechanged = be32_to_cpup(p);
+
+ if (sizechanged) {
+ /* throw away new size */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -5723,8 +5947,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_commit(xdr, res);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5919,6 +6144,32 @@ out:
return status;
}
+/*
+ * Decode SECINFO response
+ */
+static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_secinfo(xdr, res);
+ if (status)
+ goto out;
+out:
+ return status;
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
* Decode EXCHANGE_ID response
@@ -6066,6 +6317,34 @@ static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
out:
return status;
}
+
+/*
+ * Decode LAYOUTCOMMIT response
+ */
+static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutcommit(xdr, rqstp, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
+out:
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
/**
@@ -6180,10 +6459,6 @@ static struct {
{ NFS4ERR_SYMLINK, -ELOOP },
{ NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_WRONGSEC, -EPERM }, /* FIXME: this needs
- * to be handled by a
- * middle-layer.
- */
{ -1, -EIO }
};
@@ -6258,6 +6533,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+ PROC(SECINFO, enc_secinfo, dec_secinfo),
#if defined(CONFIG_NFS_V4_1)
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
@@ -6267,6 +6543,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 23e794410669..c80add6e2213 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -135,14 +135,14 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
nfs_unlock_request(req);
}
-/**
+/*
* nfs_clear_request - Free up all resources allocated to the request
* @req:
*
* Release page and open context resources associated with a read/write
* request after it has completed.
*/
-void nfs_clear_request(struct nfs_page *req)
+static void nfs_clear_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
struct nfs_open_context *ctx = req->wb_context;
@@ -223,6 +223,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_count = 0;
desc->pg_bsize = bsize;
desc->pg_base = 0;
+ desc->pg_moreio = 0;
desc->pg_inode = inode;
desc->pg_doio = doio;
desc->pg_ioflags = io_flags;
@@ -335,9 +336,11 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
while (!nfs_pageio_do_add_request(desc, req)) {
+ desc->pg_moreio = 1;
nfs_pageio_doio(desc);
if (desc->pg_error < 0)
return 0;
+ desc->pg_moreio = 0;
}
return 1;
}
@@ -395,6 +398,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
pgoff_t idx_end;
int found, i;
int res;
+ struct list_head *list;
res = 0;
if (npages == 0)
@@ -415,10 +419,10 @@ int nfs_scan_list(struct nfs_inode *nfsi,
idx_start = req->wb_index + 1;
if (nfs_set_page_tag_locked(req)) {
kref_get(&req->wb_kref);
- nfs_list_remove_request(req);
radix_tree_tag_clear(&nfsi->nfs_page_tree,
req->wb_index, tag);
- nfs_list_add_request(req, dst);
+ list = pnfs_choose_commit_list(req, dst);
+ nfs_list_add_request(req, list);
res++;
if (res == INT_MAX)
goto out;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index f38813a0a295..d9ab97269ce6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -259,6 +259,7 @@ put_lseg(struct pnfs_layout_segment *lseg)
pnfs_free_lseg_list(&free_me);
}
}
+EXPORT_SYMBOL_GPL(put_lseg);
static bool
should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
@@ -471,6 +472,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
+ struct page **pages = NULL;
+ int i;
+ u32 max_resp_sz, max_pages;
dprintk("--> %s\n", __func__);
@@ -478,6 +482,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
if (lgp == NULL)
return NULL;
+
+ /* allocate pages for xdr post processing */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = max_resp_sz >> PAGE_SHIFT;
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_err_free;
+
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_err_free;
+ }
+
lgp->args.minlength = NFS4_MAX_UINT64;
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
lgp->args.range.iomode = iomode;
@@ -486,6 +505,8 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
+ lgp->args.layout.pages = pages;
+ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
/* Synchronously retrieve layout information from server and
@@ -496,7 +517,26 @@ send_layoutget(struct pnfs_layout_hdr *lo,
/* remember that LAYOUTGET failed and suspend trying */
set_bit(lo_fail_bit(iomode), &lo->plh_flags);
}
+
+ /* free xdr pages */
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+
return lseg;
+
+out_err_free:
+ /* free any allocated xdr pages, lgp as it's not used */
+ if (pages) {
+ for (i = 0; i < max_pages; i++) {
+ if (!pages[i])
+ break;
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+ }
+ kfree(lgp);
+ return NULL;
}
bool pnfs_roc(struct inode *ino)
@@ -945,3 +985,105 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
+
+/*
+ * Currently there is only one (whole file) write lseg.
+ */
+static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode)
+{
+ struct pnfs_layout_segment *lseg, *rv = NULL;
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ rv = lseg;
+ return rv;
+}
+
+void
+pnfs_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ struct nfs_inode *nfsi = NFS_I(wdata->inode);
+ loff_t end_pos = wdata->args.offset + wdata->res.count;
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ /* references matched in nfs4_layoutcommit_release */
+ get_lseg(wdata->lseg);
+ wdata->lseg->pls_lc_cred =
+ get_rpccred(wdata->args.context->state->owner->so_cred);
+ mark_inode_dirty_sync(wdata->inode);
+ dprintk("%s: Set layoutcommit for inode %lu ",
+ __func__, wdata->inode->i_ino);
+ }
+ if (end_pos > wdata->lseg->pls_end_pos)
+ wdata->lseg->pls_end_pos = end_pos;
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+}
+EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
+
+/*
+ * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
+ * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
+ * data to disk to allow the server to recover the data if it crashes.
+ * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
+ * is off, and a COMMIT is sent to a data server, or
+ * if WRITEs to a data server return NFS_DATA_SYNC.
+ */
+int
+pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ struct nfs4_layoutcommit_data *data;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct pnfs_layout_segment *lseg;
+ struct rpc_cred *cred;
+ loff_t end_pos;
+ int status = 0;
+
+ dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
+
+ if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+ return 0;
+
+ /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ mark_inode_dirty_sync(inode);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&inode->i_lock);
+ if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ spin_unlock(&inode->i_lock);
+ kfree(data);
+ goto out;
+ }
+ /*
+ * Currently only one (whole file) write lseg which is referenced
+ * in pnfs_set_layoutcommit and will be found.
+ */
+ lseg = pnfs_list_write_lseg(inode);
+
+ end_pos = lseg->pls_end_pos;
+ cred = lseg->pls_lc_cred;
+ lseg->pls_end_pos = 0;
+ lseg->pls_lc_cred = NULL;
+
+ memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
+ sizeof(nfsi->layout->plh_stateid.data));
+ spin_unlock(&inode->i_lock);
+
+ data->args.inode = inode;
+ data->lseg = lseg;
+ data->cred = cred;
+ nfs_fattr_init(&data->fattr);
+ data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+ data->res.fattr = &data->fattr;
+ data->args.lastbytewritten = end_pos - 1;
+ data->res.server = NFS_SERVER(inode);
+
+ status = nfs4_proc_layoutcommit(data, sync);
+out:
+ dprintk("<-- %s status %d\n", __func__, status);
+ return status;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 6380b9405bcd..bc4827202e7a 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -43,6 +43,8 @@ struct pnfs_layout_segment {
atomic_t pls_refcount;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
+ struct rpc_cred *pls_lc_cred; /* LAYOUTCOMMIT credential */
+ loff_t pls_end_pos; /* LAYOUTCOMMIT write end */
};
enum pnfs_try_status {
@@ -74,6 +76,13 @@ struct pnfs_layoutdriver_type {
/* test for nfs page cache coalescing */
int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
+ /* Returns true if layoutdriver wants to divert this request to
+ * driver's commit routine.
+ */
+ bool (*mark_pnfs_commit)(struct pnfs_layout_segment *lseg);
+ struct list_head * (*choose_commit_list) (struct nfs_page *req);
+ int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+
/*
* Return PNFS_ATTEMPTED to indicate the layout code has attempted
* I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
@@ -100,7 +109,6 @@ struct pnfs_device {
unsigned int layout_type;
unsigned int mincount;
struct page **pages;
- void *area;
unsigned int pgbase;
unsigned int pglen;
};
@@ -145,7 +153,8 @@ bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
-
+void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
+int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
static inline int lo_fail_bit(u32 iomode)
{
@@ -169,6 +178,51 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
return nfss->pnfs_curr_ld != NULL;
}
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+ if (lseg) {
+ struct pnfs_layoutdriver_type *ld;
+
+ ld = NFS_SERVER(req->wb_page->mapping->host)->pnfs_curr_ld;
+ if (ld->mark_pnfs_commit && ld->mark_pnfs_commit(lseg)) {
+ set_bit(PG_PNFS_COMMIT, &req->wb_flags);
+ req->wb_commit_lseg = get_lseg(lseg);
+ }
+ }
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+ return PNFS_NOT_ATTEMPTED;
+ return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+ struct list_head *rv;
+
+ if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags)) {
+ struct inode *inode = req->wb_commit_lseg->pls_layout->plh_inode;
+
+ set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
+ rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req);
+ /* matched by ref taken when PG_PNFS_COMMIT is set */
+ put_lseg(req->wb_commit_lseg);
+ } else
+ rv = mds;
+ return rv;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+ if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags))
+ put_lseg(req->wb_commit_lseg);
+}
+
#else /* CONFIG_NFS_V4_1 */
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
@@ -252,6 +306,31 @@ pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *ino)
pgio->pg_test = NULL;
}
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+ return mds;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+}
+
+static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b8ec170f2a0f..ac40b8535d7e 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -177,7 +177,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 47a3ad63e0d5..af0c6279a4a7 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -59,6 +59,7 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
}
return p;
}
+EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
void nfs_commit_free(struct nfs_write_data *p)
{
@@ -66,6 +67,7 @@ void nfs_commit_free(struct nfs_write_data *p)
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
+EXPORT_SYMBOL_GPL(nfs_commit_free);
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
@@ -179,8 +181,8 @@ static int wb_priority(struct writeback_control *wbc)
if (wbc->for_reclaim)
return FLUSH_HIGHPRI | FLUSH_STABLE;
if (wbc->for_kupdate || wbc->for_background)
- return FLUSH_LOWPRI;
- return 0;
+ return FLUSH_LOWPRI | FLUSH_COND_STABLE;
+ return FLUSH_COND_STABLE;
}
/*
@@ -387,11 +389,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
spin_lock(&inode->i_lock);
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
- if (!nfsi->npages) {
- igrab(inode);
- if (nfs_have_delegation(inode, FMODE_WRITE))
- nfsi->change_attr++;
- }
+ if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
+ nfsi->change_attr++;
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
@@ -421,11 +420,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
clear_bit(PG_MAPPED, &req->wb_flags);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
nfsi->npages--;
- if (!nfsi->npages) {
- spin_unlock(&inode->i_lock);
- iput(inode);
- } else
- spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
nfs_release_request(req);
}
@@ -441,7 +436,7 @@ nfs_mark_request_dirty(struct nfs_page *req)
* Add a request to the inode's commit list.
*/
static void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
@@ -453,6 +448,7 @@ nfs_mark_request_commit(struct nfs_page *req)
NFS_PAGE_TAG_COMMIT);
nfsi->ncommit++;
spin_unlock(&inode->i_lock);
+ pnfs_mark_request_commit(req, lseg);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
@@ -474,14 +470,18 @@ nfs_clear_request_commit(struct nfs_page *req)
static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
- return data->verf.committed != NFS_FILE_SYNC;
+ if (data->verf.committed == NFS_DATA_SYNC)
+ return data->lseg == NULL;
+ else
+ return data->verf.committed != NFS_FILE_SYNC;
}
static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
{
if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- nfs_mark_request_commit(req);
+ nfs_mark_request_commit(req, data->lseg);
return 1;
}
if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
@@ -492,7 +492,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
}
#else
static inline void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
{
}
@@ -509,7 +509,8 @@ int nfs_write_need_commit(struct nfs_write_data *data)
}
static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
{
return 0;
}
@@ -612,9 +613,11 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
}
if (nfs_clear_request_commit(req) &&
- radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
- req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
+ radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
+ req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) {
NFS_I(inode)->ncommit--;
+ pnfs_clear_request_commit(req);
+ }
/* Okay, the request matches. Update the region */
if (offset < req->wb_offset) {
@@ -762,11 +765,12 @@ int nfs_updatepage(struct file *file, struct page *page,
return status;
}
-static void nfs_writepage_release(struct nfs_page *req)
+static void nfs_writepage_release(struct nfs_page *req,
+ struct nfs_write_data *data)
{
struct page *page = req->wb_page;
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
nfs_inode_remove_request(req);
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
@@ -863,7 +867,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
data->args.stable = NFS_UNSTABLE;
- if (how & FLUSH_STABLE) {
+ if (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
data->args.stable = NFS_DATA_SYNC;
if (!nfs_need_commit(NFS_I(inode)))
data->args.stable = NFS_FILE_SYNC;
@@ -912,6 +916,12 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
nfs_list_remove_request(req);
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+ desc->pg_count > wsize))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
+
nbytes = desc->pg_count;
do {
size_t len = min(nbytes, wsize);
@@ -1002,6 +1012,10 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
if ((!lseg) && list_is_singular(&data->pages))
lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
/* Set up the argument struct */
ret = nfs_write_rpcsetup(req, data, &nfs_write_full_ops, desc->pg_count, 0, lseg, desc->pg_ioflags);
out:
@@ -1074,7 +1088,7 @@ static void nfs_writeback_release_partial(void *calldata)
out:
if (atomic_dec_and_test(&req->wb_complete))
- nfs_writepage_release(req);
+ nfs_writepage_release(req, data);
nfs_writedata_release(calldata);
}
@@ -1141,7 +1155,7 @@ static void nfs_writeback_release_full(void *calldata)
if (nfs_write_need_commit(data)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req);
+ nfs_mark_request_commit(req, data->lseg);
dprintk(" marked for commit\n");
goto next;
}
@@ -1251,57 +1265,82 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
{
+ int ret;
+
if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
return 1;
- if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
- NFS_INO_COMMIT, nfs_wait_bit_killable,
- TASK_KILLABLE))
- return 1;
- return 0;
+ if (!may_wait)
+ return 0;
+ ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ return (ret < 0) ? ret : 1;
}
-static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
smp_mb__after_clear_bit();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
+EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
-
-static void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
+ put_lseg(wdata->lseg);
put_nfs_open_context(wdata->args.context);
nfs_commit_free(wdata);
}
+EXPORT_SYMBOL_GPL(nfs_commitdata_release);
-/*
- * Set up the argument/result storage required for the RPC call.
- */
-static int nfs_commit_rpcsetup(struct list_head *head,
- struct nfs_write_data *data,
- int how)
+int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how)
{
- struct nfs_page *first = nfs_list_entry(head->next);
- struct inode *inode = first->wb_context->path.dentry->d_inode;
- int priority = flush_task_priority(how);
struct rpc_task *task;
+ int priority = flush_task_priority(how);
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = first->wb_context->cred,
+ .rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
- .rpc_client = NFS_CLIENT(inode),
+ .rpc_client = clnt,
.rpc_message = &msg,
- .callback_ops = &nfs_commit_ops,
+ .callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
.priority = priority,
};
+ /* Set up the initial task struct. */
+ NFS_PROTO(data->inode)->commit_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (how & FLUSH_SYNC)
+ rpc_wait_for_completion_task(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_commit);
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *first = nfs_list_entry(head->next);
+ struct inode *inode = first->wb_context->path.dentry->d_inode;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
@@ -1309,7 +1348,9 @@ static int nfs_commit_rpcsetup(struct list_head *head,
list_splice_init(head, &data->pages);
data->inode = inode;
- data->cred = msg.rpc_cred;
+ data->cred = first->wb_context->cred;
+ data->lseg = lseg; /* reference transferred */
+ data->mds_ops = &nfs_commit_ops;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
@@ -1320,20 +1361,25 @@ static int nfs_commit_rpcsetup(struct list_head *head,
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
+}
+EXPORT_SYMBOL_GPL(nfs_init_commit);
- /* Set up the initial task struct. */
- NFS_PROTO(inode)->commit_setup(data, &msg);
-
- dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *req;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
- if (how & FLUSH_SYNC)
- rpc_wait_for_completion_task(task);
- rpc_put_task(task);
- return 0;
+ while (!list_empty(page_list)) {
+ req = nfs_list_entry(page_list->next);
+ nfs_list_remove_request(req);
+ nfs_mark_request_commit(req, lseg);
+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ nfs_clear_page_tag_locked(req);
+ }
}
+EXPORT_SYMBOL_GPL(nfs_retry_commit);
/*
* Commit dirty pages
@@ -1342,7 +1388,6 @@ static int
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
struct nfs_write_data *data;
- struct nfs_page *req;
data = nfs_commitdata_alloc();
@@ -1350,17 +1395,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
goto out_bad;
/* Set up the argument struct */
- return nfs_commit_rpcsetup(head, data, how);
+ nfs_init_commit(data, head, NULL);
+ return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
out_bad:
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_mark_request_commit(req);
- dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- nfs_clear_page_tag_locked(req);
- }
+ nfs_retry_commit(head, NULL);
nfs_commit_clear_lock(NFS_I(inode));
return -ENOMEM;
}
@@ -1380,10 +1418,9 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
return;
}
-static void nfs_commit_release(void *calldata)
+void nfs_commit_release_pages(struct nfs_write_data *data)
{
- struct nfs_write_data *data = calldata;
- struct nfs_page *req;
+ struct nfs_page *req;
int status = data->task.tk_status;
while (!list_empty(&data->pages)) {
@@ -1417,6 +1454,14 @@ static void nfs_commit_release(void *calldata)
next:
nfs_clear_page_tag_locked(req);
}
+}
+EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
+
+static void nfs_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ nfs_commit_release_pages(data);
nfs_commit_clear_lock(NFS_I(data->inode));
nfs_commitdata_release(calldata);
}
@@ -1433,23 +1478,30 @@ int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
int may_wait = how & FLUSH_SYNC;
- int res = 0;
+ int res;
- if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
+ res = nfs_commit_set_lock(NFS_I(inode), may_wait);
+ if (res <= 0)
goto out_mark_dirty;
spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
if (res) {
- int error = nfs_commit_list(inode, &head, how);
+ int error;
+
+ error = pnfs_commit_list(inode, &head, how);
+ if (error == PNFS_NOT_ATTEMPTED)
+ error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
- if (may_wait)
- wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
- nfs_wait_bit_killable,
- TASK_KILLABLE);
- else
+ if (!may_wait)
goto out_mark_dirty;
+ error = wait_on_bit(&NFS_I(inode)->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ if (error < 0)
+ return error;
} else
nfs_commit_clear_lock(NFS_I(inode));
return res;
@@ -1503,7 +1555,22 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return nfs_commit_unstable_pages(inode, wbc);
+ int ret;
+
+ ret = nfs_commit_unstable_pages(inode, wbc);
+ if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
+ int status;
+ bool sync = true;
+
+ if (wbc->sync_mode == WB_SYNC_NONE || wbc->nonblocking ||
+ wbc->for_background)
+ sync = false;
+
+ status = pnfs_layoutcommit_inode(inode, sync);
+ if (status < 0)
+ return status;
+ }
+ return ret;
}
/*
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index 84c27d69d421..6940439bd609 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -117,7 +117,6 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
* invoked in contexts where a memory allocation failure is
* fatal. Fortunately this fake ACL is small enough to
* construct on the stack. */
- memset(acl2, 0, sizeof(acl2));
posix_acl_init(acl2, 4);
/* Insert entries in canonical order: other orders seem
@@ -174,7 +173,7 @@ xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
return -EINVAL;
break;
case ACL_MASK:
- /* Solaris sometimes sets additonal bits in the mask */
+ /* Solaris sometimes sets additional bits in the mask */
entry->e_perm &= S_IRWXO;
break;
default:
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8b31e5f8795d..ad000aeb21a2 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -299,7 +299,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
#define EXPORT_HASHBITS 8
#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
-#define EXPORT_HASHMASK (EXPORT_HASHMAX -1)
static struct cache_head *export_table[EXPORT_HASHMAX];
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7e84a852cdae..ad48faca20fc 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -702,7 +702,7 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
*p++ = htonl(resp->eof);
*p++ = htonl(resp->count); /* xdr opaque count */
xdr_ressize_check(rqstp, p);
- /* now update rqstp->rq_res to reflect data aswell */
+ /* now update rqstp->rq_res to reflect data as well */
rqstp->rq_res.page_len = resp->count;
if (resp->count & 3) {
/* need to pad the tail */
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 6d2c397d458b..55780a22fdbd 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -63,7 +63,6 @@ struct ent {
#define ENT_HASHBITS 8
#define ENT_HASHMAX (1 << ENT_HASHBITS)
-#define ENT_HASHMASK (ENT_HASHMAX - 1)
static void
ent_init(struct cache_head *cnew, struct cache_head *citm)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index db52546143d1..5fcb1396a7e3 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
void *);
enum nfsd4_op_flags {
ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
- ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */
- ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */
+ ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
+ ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
};
struct nfsd4_operation {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7b566ec14e18..4b36ec3eb8ea 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -148,7 +148,7 @@ static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
-#define FILE_HASH_MASK (FILE_HASH_SIZE - 1)
+
/* hash table for (open)nfs4_stateid */
#define STATEID_HASH_BITS 10
#define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
@@ -316,64 +316,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
static struct list_head client_lru;
static struct list_head close_lru;
-static void unhash_generic_stateid(struct nfs4_stateid *stp)
-{
- list_del(&stp->st_hash);
- list_del(&stp->st_perfile);
- list_del(&stp->st_perstateowner);
-}
-
-static void free_generic_stateid(struct nfs4_stateid *stp)
-{
- put_nfs4_file(stp->st_file);
- kmem_cache_free(stateid_slab, stp);
-}
-
-static void release_lock_stateid(struct nfs4_stateid *stp)
-{
- struct file *file;
-
- unhash_generic_stateid(stp);
- file = find_any_file(stp->st_file);
- if (file)
- locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
- free_generic_stateid(stp);
-}
-
-static void unhash_lockowner(struct nfs4_stateowner *sop)
-{
- struct nfs4_stateid *stp;
-
- list_del(&sop->so_idhash);
- list_del(&sop->so_strhash);
- list_del(&sop->so_perstateid);
- while (!list_empty(&sop->so_stateids)) {
- stp = list_first_entry(&sop->so_stateids,
- struct nfs4_stateid, st_perstateowner);
- release_lock_stateid(stp);
- }
-}
-
-static void release_lockowner(struct nfs4_stateowner *sop)
-{
- unhash_lockowner(sop);
- nfs4_put_stateowner(sop);
-}
-
-static void
-release_stateid_lockowners(struct nfs4_stateid *open_stp)
-{
- struct nfs4_stateowner *lock_sop;
-
- while (!list_empty(&open_stp->st_lockowners)) {
- lock_sop = list_entry(open_stp->st_lockowners.next,
- struct nfs4_stateowner, so_perstateid);
- /* list_del(&open_stp->st_lockowners); */
- BUG_ON(lock_sop->so_is_open_owner);
- release_lockowner(lock_sop);
- }
-}
-
/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
@@ -446,13 +388,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp)
return nfs4_access_to_omode(access);
}
-static void release_open_stateid(struct nfs4_stateid *stp)
+static void unhash_generic_stateid(struct nfs4_stateid *stp)
+{
+ list_del(&stp->st_hash);
+ list_del(&stp->st_perfile);
+ list_del(&stp->st_perstateowner);
+}
+
+static void free_generic_stateid(struct nfs4_stateid *stp)
{
int oflag = nfs4_access_bmap_to_omode(stp);
+ nfs4_file_put_access(stp->st_file, oflag);
+ put_nfs4_file(stp->st_file);
+ kmem_cache_free(stateid_slab, stp);
+}
+
+static void release_lock_stateid(struct nfs4_stateid *stp)
+{
+ struct file *file;
+
+ unhash_generic_stateid(stp);
+ file = find_any_file(stp->st_file);
+ if (file)
+ locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
+ free_generic_stateid(stp);
+}
+
+static void unhash_lockowner(struct nfs4_stateowner *sop)
+{
+ struct nfs4_stateid *stp;
+
+ list_del(&sop->so_idhash);
+ list_del(&sop->so_strhash);
+ list_del(&sop->so_perstateid);
+ while (!list_empty(&sop->so_stateids)) {
+ stp = list_first_entry(&sop->so_stateids,
+ struct nfs4_stateid, st_perstateowner);
+ release_lock_stateid(stp);
+ }
+}
+
+static void release_lockowner(struct nfs4_stateowner *sop)
+{
+ unhash_lockowner(sop);
+ nfs4_put_stateowner(sop);
+}
+
+static void
+release_stateid_lockowners(struct nfs4_stateid *open_stp)
+{
+ struct nfs4_stateowner *lock_sop;
+
+ while (!list_empty(&open_stp->st_lockowners)) {
+ lock_sop = list_entry(open_stp->st_lockowners.next,
+ struct nfs4_stateowner, so_perstateid);
+ /* list_del(&open_stp->st_lockowners); */
+ BUG_ON(lock_sop->so_is_open_owner);
+ release_lockowner(lock_sop);
+ }
+}
+
+static void release_open_stateid(struct nfs4_stateid *stp)
+{
unhash_generic_stateid(stp);
release_stateid_lockowners(stp);
- nfs4_file_put_access(stp->st_file, oflag);
free_generic_stateid(stp);
}
@@ -608,7 +608,8 @@ static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4
u32 maxrpc = nfsd_serv->sv_max_mesg;
new->maxreqs = numslots;
- new->maxresp_cached = slotsize + NFSD_MIN_HDR_SEQ_SZ;
+ new->maxresp_cached = min_t(u32, req->maxresp_cached,
+ slotsize + NFSD_MIN_HDR_SEQ_SZ);
new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
@@ -3054,7 +3055,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
if (ONE_STATEID(stateid) && (flags & RD_STATE))
return nfs_ok;
else if (locks_in_grace()) {
- /* Answer in remaining cases depends on existance of
+ /* Answer in remaining cases depends on existence of
* conflicting state; so we must wait out the grace period. */
return nfserr_grace;
} else if (flags & WR_STATE)
@@ -3674,7 +3675,7 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid,
/*
* Alloc a lock owner structure.
* Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
- * occured.
+ * occurred.
*
* strhashval = lock_ownerstr_hashval
*/
@@ -3735,6 +3736,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
+ stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
@@ -3749,6 +3751,17 @@ check_lock_length(u64 offset, u64 length)
LOFF_OVERFLOW(offset, length)));
}
+static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
+{
+ struct nfs4_file *fp = lock_stp->st_file;
+ int oflag = nfs4_access_to_omode(access);
+
+ if (test_bit(access, &lock_stp->st_access_bmap))
+ return;
+ nfs4_file_get_access(fp, oflag);
+ __set_bit(access, &lock_stp->st_access_bmap);
+}
+
/*
* LOCK operation
*/
@@ -3765,7 +3778,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file_lock conflock;
__be32 status = 0;
unsigned int strhashval;
- unsigned int cmd;
int err;
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
@@ -3847,22 +3859,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
switch (lock->lk_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
- if (find_readable_file(lock_stp->st_file)) {
- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
- filp = find_readable_file(lock_stp->st_file);
- }
+ filp = find_readable_file(lock_stp->st_file);
+ if (filp)
+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
file_lock.fl_type = F_RDLCK;
- cmd = F_SETLK;
- break;
+ break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
- if (find_writeable_file(lock_stp->st_file)) {
- nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
- filp = find_writeable_file(lock_stp->st_file);
- }
+ filp = find_writeable_file(lock_stp->st_file);
+ if (filp)
+ get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
file_lock.fl_type = F_WRLCK;
- cmd = F_SETLK;
- break;
+ break;
default:
status = nfserr_inval;
goto out;
@@ -3886,7 +3894,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Note: locks.c uses the BKL to protect the inode's lock list.
*/
- err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
+ err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
switch (-err) {
case 0: /* success! */
update_stateid(&lock_stp->st_stateid);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 615f0a9f0600..c6766af00d98 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
u32 dummy;
char *machine_name;
- int i, j;
+ int i;
int nr_secflavs;
READ_BUF(16);
@@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy * 4);
- for (j = 0; j < dummy; ++j)
- READ32(dummy);
break;
case RPC_AUTH_GSS:
dprintk("RPC_AUTH_GSS callback secflavor "
@@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
READ_BUF(4);
READ32(dummy);
READ_BUF(dummy);
- p += XDR_QUADLEN(dummy);
break;
default:
dprintk("Illegal callback secflavor\n");
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 33b3e2b06779..1f5eae40f34e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -12,13 +12,14 @@
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include "idmap.h"
#include "nfsd.h"
#include "cache.h"
/*
- * We have a single directory with 9 nodes in it.
+ * We have a single directory with several nodes in it.
*/
enum {
NFSD_Root = 1,
@@ -42,6 +43,7 @@ enum {
NFSD_Versions,
NFSD_Ports,
NFSD_MaxBlkSize,
+ NFSD_SupportedEnctypes,
/*
* The below MUST come last. Otherwise we leave a hole in nfsd_files[]
* with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
@@ -187,6 +189,34 @@ static struct file_operations export_features_operations = {
.release = single_release,
};
+#ifdef CONFIG_SUNRPC_GSS
+static int supported_enctypes_show(struct seq_file *m, void *v)
+{
+ struct gss_api_mech *k5mech;
+
+ k5mech = gss_mech_get_by_name("krb5");
+ if (k5mech == NULL)
+ goto out;
+ if (k5mech->gm_upcall_enctypes != NULL)
+ seq_printf(m, k5mech->gm_upcall_enctypes);
+ gss_mech_put(k5mech);
+out:
+ return 0;
+}
+
+static int supported_enctypes_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, supported_enctypes_show, NULL);
+}
+
+static struct file_operations supported_enctypes_ops = {
+ .open = supported_enctypes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_SUNRPC_GSS */
+
extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1397,6 +1427,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
+#ifdef CONFIG_SUNRPC_GSS
+ [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
+#endif /* CONFIG_SUNRPC_GSS */
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 4ce005dbf3e6..65ec595e2226 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -451,7 +451,7 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p,
*p++ = htonl(resp->count);
xdr_ressize_check(rqstp, p);
- /* now update rqstp->rq_res to reflect data aswell */
+ /* now update rqstp->rq_res to reflect data as well */
rqstp->rq_res.page_len = resp->count;
if (resp->count & 3) {
/* need to pad the tail */
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 2d31224b07bf..6bd2f3c21f2b 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -367,16 +367,12 @@ struct nfs4_file {
struct list_head fi_delegations;
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
- /* One each for O_RDONLY, O_WRONLY: */
- atomic_t fi_access[2];
/*
- * Each open stateid contributes 1 to either fi_readers or
- * fi_writers, or both, depending on the open mode. A
- * delegation also takes an fi_readers reference. Lock
- * stateid's take none.
+ * Each open or lock stateid contributes 1 to either
+ * fi_access[O_RDONLY], fi_access[O_WRONLY], or both, depending
+ * on open or lock mode:
*/
- atomic_t fi_readers;
- atomic_t fi_writers;
+ atomic_t fi_access[2];
struct file *fi_deleg_file;
struct file_lock *fi_lease;
atomic_t fi_delegees;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index ff93025ae2f7..2e1cebde90df 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1749,8 +1749,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (host_err)
goto out_drop_write;
}
- if (host_err)
- goto out_drop_write;
host_err = vfs_rename(fdir, odentry, tdir, ndentry);
if (!host_err) {
host_err = commit_metadata(tfhp);
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 9af34a7e6e13..f5fde36b9e28 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -74,7 +74,7 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
#define nilfs_set_bit_atomic ext2_set_bit_atomic
#define nilfs_clear_bit_atomic ext2_clear_bit_atomic
-#define nilfs_find_next_zero_bit ext2_find_next_zero_bit
+#define nilfs_find_next_zero_bit find_next_zero_bit_le
/*
* persistent object allocator cache
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 85f7baa15f5d..609cd223eea8 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -34,15 +34,10 @@
#include "page.h"
#include "btnode.h"
-
-static const struct address_space_operations def_btnode_aops = {
- .sync_page = block_sync_page,
-};
-
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
- nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
+ nilfs_mapping_init(btnc, bdi);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 93589fccdd97..397e73258631 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page)) {
- unlock_page(page);
+ if (PageMappedToDisk(page))
goto mapped;
- }
+
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
int fully_mapped = 1;
@@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (fully_mapped) {
SetPageMappedToDisk(page);
- unlock_page(page);
goto mapped;
}
}
@@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
- if (unlikely(ret)) {
+ if (ret != VM_FAULT_LOCKED) {
nilfs_transaction_abort(inode->i_sb);
return ret;
}
+ nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb);
mapped:
SetPageChecked(page);
wait_on_page_writeback(page);
- return 0;
+ return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct nilfs_file_vm_ops = {
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index caf9a6a3fb54..1c2a3e23f8b2 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -49,7 +49,6 @@
#include "ifile.h"
static const struct address_space_operations def_gcinode_aops = {
- .sync_page = block_sync_page,
};
/*
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index d5625be236a8..c0aa27490c02 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -280,7 +280,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
- .sync_page = block_sync_page,
.writepages = nilfs_writepages,
.set_page_dirty = nilfs_set_page_dirty,
.readpages = nilfs_readpages,
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 95c04c2f2b3e..f2469ba6246b 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -113,7 +113,7 @@ static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
unsigned int flags, oldflags;
int ret;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *)argp))
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index a0babd2bff6a..a649b05f7069 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
.writepage = nilfs_mdt_write_page,
- .sync_page = block_sync_page,
};
static const struct inode_operations def_mdt_iops;
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
-static const struct address_space_operations shadow_map_aops = {
- .sync_page = block_sync_page,
-};
-
/**
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_data, bdi);
address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
mi->mi_shadow = shadow;
return 0;
}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 856e8e4e0b74..a8dd344303cb 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -114,19 +114,19 @@ enum {
* Macros to check inode numbers
*/
#define NILFS_MDT_INO_BITS \
- ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
- 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
- 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+ ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
+ 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
+ 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
#define NILFS_SYS_INO_BITS \
- ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+ ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+ ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+ ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
/**
* struct nilfs_transaction_info: context information for synchronization
@@ -285,7 +285,7 @@ extern void nilfs_destroy_inode(struct inode *);
extern void nilfs_error(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void nilfs_warning(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
+ __attribute__ ((format (printf, 3, 4)));
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a585b35fd6bc..1168059c7efd 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -493,15 +493,14 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
}
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops)
+ struct backing_dev_info *bdi)
{
mapping->host = NULL;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = bdi;
- mapping->a_ops = aops;
+ mapping->a_ops = &empty_aops;
}
/*
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 2a00953ebd5f..f06b79ad7493 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -62,8 +62,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops);
+ struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 0f83e93935b2..2853ff20f85a 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following
* submission.
*/
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 6b1305dc26c0..9fde1c00a296 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -164,7 +164,7 @@ static int process_access_response(struct fsnotify_group *group,
fd, response);
/*
* make sure the response is valid, if invalid we do nothing and either
- * userspace can send a valid responce or we will clean it up after the
+ * userspace can send a valid response or we will clean it up after the
* timeout
*/
switch (response) {
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 4c29fcf557d1..07ea8d3e6ea2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -22,13 +22,14 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
+#include "../internal.h"
+
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
@@ -237,15 +238,14 @@ out:
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes)
*
- * Called with inode_lock held, protecting the unmounting super block's list
- * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
- * We temporarily drop inode_lock, however, and CAN block.
+ * Called during unmount with no locks held, so needs to be safe against
+ * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
*/
void fsnotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp;
@@ -254,8 +254,11 @@ void fsnotify_unmount_inodes(struct list_head *list)
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
/*
* If i_count is zero, the inode cannot have any watches and
@@ -263,8 +266,10 @@ void fsnotify_unmount_inodes(struct list_head *list)
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
- if (!atomic_read(&inode->i_count))
+ if (!atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
need_iput_tmp = need_iput;
need_iput = NULL;
@@ -274,22 +279,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
__iget(inode);
else
need_iput_tmp = NULL;
+ spin_unlock(&inode->i_lock);
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
- atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
- __iget(next_i);
- need_iput = next_i;
+ atomic_read(&next_i->i_count)) {
+ spin_lock(&next_i->i_lock);
+ if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+ __iget(next_i);
+ need_iput = next_i;
+ }
+ spin_unlock(&next_i->i_lock);
}
/*
- * We can safely drop inode_lock here because we hold
+ * We can safely drop inode_sb_list_lock here because we hold
* references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
+ * will be added since the umount has begun.
*/
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
if (need_iput_tmp)
iput(need_iput_tmp);
@@ -301,7 +309,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
iput(inode);
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
}
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index a91b69a6a291..e3cbd746f64a 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -194,10 +194,11 @@ static int idr_callback(int id, void *p, void *data)
static void inotify_free_group_priv(struct fsnotify_group *group)
{
- /* ideally the idr is empty and we won't hit the BUG in teh callback */
+ /* ideally the idr is empty and we won't hit the BUG in the callback */
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
+ atomic_dec(&group->inotify_data.user->inotify_devs);
free_uid(group->inotify_data.user);
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index bd46e7c8a0ef..8445fbc8985c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on)
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
- struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
@@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file)
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
- atomic_dec(&user->inotify_devs);
-
return 0;
}
@@ -697,7 +694,7 @@ retry:
return ret;
}
-static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
+static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
@@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
- group->inotify_data.user = user;
group->inotify_data.fa = NULL;
+ group->inotify_data.user = get_current_user();
+
+ if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
+ inotify_max_user_instances) {
+ fsnotify_put_group(group);
+ return ERR_PTR(-EMFILE);
+ }
return group;
}
@@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
- struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
@@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
- user = get_current_user();
- if (unlikely(atomic_read(&user->inotify_devs) >=
- inotify_max_user_instances)) {
- ret = -EMFILE;
- goto out_free_uid;
- }
-
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
- group = inotify_new_group(user, inotify_max_queued_events);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto out_free_uid;
- }
-
- atomic_inc(&user->inotify_devs);
+ group = inotify_new_group(inotify_max_queued_events);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
- if (ret >= 0)
- return ret;
+ if (ret < 0)
+ fsnotify_put_group(group);
- fsnotify_put_group(group);
- atomic_dec(&user->inotify_devs);
-out_free_uid:
- free_uid(user);
return ret;
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 325185e514bb..252ab1f6452b 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -24,7 +24,7 @@
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
- * and the object itself is guarenteed to survive until the reference is dropped.
+ * and the object itself is guaranteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
@@ -91,7 +91,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 85eebff6d0d7..e86577d6c5c3 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -23,7 +23,6 @@
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index c3c2c7ac9020..0b1e885b8cf8 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1543,8 +1543,6 @@ err_out:
*/
const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
#endif /* NTFS_RW */
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
*/
const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index f5094ee224c1..f14fde2b03d6 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -197,7 +197,7 @@ err_out:
} else if (ctx_needs_reset) {
/*
* If there is no attribute list, restoring the search context
- * is acomplished simply by copying the saved context back over
+ * is accomplished simply by copying the saved context back over
* the caller supplied context. If there is an attribute list,
* things are more complicated as we need to deal with mapping
* of mft records and resulting potential changes in pointers.
@@ -1181,7 +1181,7 @@ not_found:
* for, i.e. if one wants to add the attribute to the mft record this is the
* correct place to insert its attribute list entry into.
*
- * When -errno != -ENOENT, an error occured during the lookup. @ctx->attr is
+ * When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
* then undefined and in particular you should not rely on it not changing.
*/
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 6551c7cbad92..ee4144ce5d7c 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -501,7 +501,7 @@ int ntfs_read_compressed_block(struct page *page)
VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
- * The first vcn after the last wanted vcn (minumum alignment is again
+ * The first vcn after the last wanted vcn (minimum alignment is again
* PAGE_CACHE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
@@ -698,8 +698,7 @@ lock_retry_remap:
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
- blk_run_address_space(mapping);
- schedule();
+ io_schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index a627ed82c0a3..c05d6dcf77a4 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -54,7 +54,7 @@
*
* Return 1 if the attributes match and 0 if not.
*
- * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep.
*/
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
@@ -98,7 +98,7 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
*
* Return 0 on success and -errno on error.
*
- * NOTE: This function runs with the inode_lock spin lock held so it is not
+ * NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
*/
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
@@ -622,7 +622,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
*/
/* Everyone gets all permissions. */
vi->i_mode |= S_IRWXUGO;
- /* If read-only, noone gets write permissions. */
+ /* If read-only, no one gets write permissions. */
if (IS_RDONLY(vi))
vi->i_mode &= ~S_IWUGO;
if (m->flags & MFT_RECORD_IS_DIRECTORY) {
@@ -2529,7 +2529,7 @@ retry_truncate:
* specifies that the behaviour is unspecified thus we do not
* have to do anything. This means that in our implementation
* in the rare case that the file is mmap()ped and a write
- * occured into the mmap()ped region just beyond the file size
+ * occurred into the mmap()ped region just beyond the file size
* and writepage has not yet been called to write out the page
* (which would clear the area beyond the file size) and we now
* extend the file size to incorporate this dirty region
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 8b2549f672bf..faece7190866 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -286,7 +286,7 @@ typedef le16 MFT_RECORD_FLAGS;
* fragmented. Volume free space includes the empty part of the mft zone and
* when the volume's free 88% are used up, the mft zone is shrunk by a factor
* of 2, thus making more space available for more files/data. This process is
- * repeated everytime there is no more free space except for the mft zone until
+ * repeated every time there is no more free space except for the mft zone until
* there really is no more free space.
*/
@@ -1657,13 +1657,13 @@ typedef enum {
* pointed to by the Owner field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the SID with
- * respect to inheritence of an owner.
+ * respect to inheritance of an owner.
*
* SE_GROUP_DEFAULTED - This boolean flag, when set, indicates that the SID in
* the Group field was provided by a defaulting mechanism rather than
* explicitly provided by the original provider of the security
* descriptor. This may affect the treatment of the SID with respect to
- * inheritence of a primary group.
+ * inheritance of a primary group.
*
* SE_DACL_PRESENT - This boolean flag, when set, indicates that the security
* descriptor contains a discretionary ACL. If this flag is set and the
@@ -1674,7 +1674,7 @@ typedef enum {
* pointed to by the Dacl field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the ACL with
- * respect to inheritence of an ACL. This flag is ignored if the
+ * respect to inheritance of an ACL. This flag is ignored if the
* DaclPresent flag is not set.
*
* SE_SACL_PRESENT - This boolean flag, when set, indicates that the security
@@ -1686,7 +1686,7 @@ typedef enum {
* pointed to by the Sacl field was provided by a defaulting mechanism
* rather than explicitly provided by the original provider of the
* security descriptor. This may affect the treatment of the ACL with
- * respect to inheritence of an ACL. This flag is ignored if the
+ * respect to inheritance of an ACL. This flag is ignored if the
* SaclPresent flag is not set.
*
* SE_SELF_RELATIVE - This boolean flag, when set, indicates that the security
@@ -2283,7 +2283,7 @@ typedef struct {
// the key_length is zero, then the vcn immediately
// follows the INDEX_ENTRY_HEADER. Regardless of
// key_length, the address of the 8-byte boundary
- // alligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
+ // aligned vcn of INDEX_ENTRY{_HEADER} *ie is given by
// (char*)ie + le16_to_cpu(ie*)->length) - sizeof(VCN),
// where sizeof(VCN) can be hardcoded as 8 if wanted. */
} __attribute__ ((__packed__)) INDEX_ENTRY;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 4dadcdf3d451..c71de292c5ad 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -669,7 +669,7 @@ err_out:
* of cases where we think that a volume is dirty when in fact it is clean.
* This should only affect volumes that have not been shutdown cleanly but did
* not have any pending, non-check-pointed i/o, i.e. they were completely idle
- * at least for the five seconds preceeding the unclean shutdown.
+ * at least for the five seconds preceding the unclean shutdown.
*
* This function assumes that the $LogFile journal has already been consistency
* checked by a call to ntfs_check_logfile() and in particular if the $LogFile
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h
index b5a6f08bd35c..aa2b6ac3f0a4 100644
--- a/fs/ntfs/logfile.h
+++ b/fs/ntfs/logfile.h
@@ -222,7 +222,7 @@ typedef struct {
/* 24*/ sle64 file_size; /* Usable byte size of the log file. If the
restart_area_offset + the offset of the
file_size are > 510 then corruption has
- occured. This is the very first check when
+ occurred. This is the very first check when
starting with the restart_area as if it
fails it means that some of the above values
will be corrupted by the multi sector
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 326e7475a22a..382857f9c7db 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -73,7 +73,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
vol->mft_record_size) {
page = ERR_PTR(-ENOENT);
- ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, "
+ ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
"which is beyond the end of the mft. "
"This is probably a bug in the ntfs "
"driver.", ni->mft_no);
@@ -1442,7 +1442,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
// Note: It will need to be a special mft record and if none of
// those are available it gets rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
- "accomodate extended mft bitmap attribute "
+ "accommodate extended mft bitmap attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
@@ -1879,7 +1879,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
// and we would then need to update all references to this mft
// record appropriately. This is rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
- "accomodate extended mft data attribute "
+ "accommodate extended mft data attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
@@ -2357,7 +2357,7 @@ ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
}
#ifdef DEBUG
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
- ntfs_debug("Status of mftbmp after initialized extention: "
+ ntfs_debug("Status of mftbmp after initialized extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mftbmp_ni->allocated_size,
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 56a9a6d25a2a..eac7d6788a10 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -1243,7 +1243,7 @@ err_out:
* write.
*
* This is used when building the mapping pairs array of a runlist to compress
- * a given logical cluster number (lcn) or a specific run length to the minumum
+ * a given logical cluster number (lcn) or a specific run length to the minimum
* size possible.
*
* Return the number of bytes written on success. On error, i.e. the
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29099a07b9fe..b52706da4645 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -458,7 +458,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
* the volume on boot and updates them.
*
* When remounting read-only, mark the volume clean if no volume errors
- * have occured.
+ * have occurred.
*/
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
@@ -1269,7 +1269,7 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
"hibernated on the volume.");
return 0;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"hiberfil.sys.");
return ret;
@@ -1370,7 +1370,7 @@ static bool load_and_init_quota(ntfs_volume *vol)
NVolSetQuotaOutOfDate(vol);
return true;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
return false;
}
@@ -1454,7 +1454,7 @@ not_enabled:
NVolSetUsnJrnlStamped(vol);
return true;
}
- /* A real error occured. */
+ /* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"$UsnJrnl.");
return false;
@@ -2292,7 +2292,7 @@ static void ntfs_put_super(struct super_block *sb)
ntfs_commit_inode(vol->mft_ino);
/*
- * If a read-write mount and no volume errors have occured, mark the
+ * If a read-write mount and no volume errors have occurred, mark the
* volume clean. Also, re-commit all affected inodes.
*/
if (!(sb->s_flags & MS_RDONLY)) {
@@ -2496,7 +2496,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
if (vol->nr_clusters & 63)
nr_free += 64 - (vol->nr_clusters & 63);
up_read(&vol->lcnbmp_lock);
- /* If errors occured we may well have gone below zero, fix this. */
+ /* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
@@ -2561,7 +2561,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1);
- /* If errors occured we may well have gone below zero, fix this. */
+ /* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 704f6b1742f3..e913ad130fdd 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/string.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -497,7 +496,7 @@ static int ocfs2_xattr_set_acl(struct dentry *dentry, const char *name,
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index e4984e259cb6..48aa9c7401c7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -30,7 +30,6 @@
#include <linux/swap.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -50,6 +49,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -886,8 +886,7 @@ static int ocfs2_validate_extent_block(struct super_block *sb,
struct ocfs2_extent_block *eb =
(struct ocfs2_extent_block *)bh->b_data;
- mlog(0, "Validating extent block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -965,8 +964,6 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
struct buffer_head *eb_bh = NULL;
u64 last_eb_blk = 0;
- mlog_entry_void();
-
el = et->et_root_el;
last_eb_blk = ocfs2_et_get_last_eb_blk(et);
@@ -987,7 +984,7 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
bail:
brelse(eb_bh);
- mlog_exit(retval);
+ trace_ocfs2_num_free_extents(retval);
return retval;
}
@@ -1010,8 +1007,6 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
struct ocfs2_extent_block *eb;
- mlog_entry_void();
-
count = 0;
while (count < wanted) {
status = ocfs2_claim_metadata(handle,
@@ -1074,8 +1069,8 @@ bail:
brelse(bhs[i]);
bhs[i] = NULL;
}
+ mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -1173,8 +1168,6 @@ static int ocfs2_add_branch(handle_t *handle,
struct ocfs2_extent_list *el;
u32 new_cpos, root_end;
- mlog_entry_void();
-
BUG_ON(!last_eb_bh || !*last_eb_bh);
if (eb_bh) {
@@ -1200,8 +1193,11 @@ static int ocfs2_add_branch(handle_t *handle,
* from new_cpos).
*/
if (root_end > new_cpos) {
- mlog(0, "adjust the cluster end from %u to %u\n",
- root_end, new_cpos);
+ trace_ocfs2_adjust_rightmost_branch(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ root_end, new_cpos);
+
status = ocfs2_adjust_rightmost_branch(handle, et);
if (status) {
mlog_errno(status);
@@ -1332,7 +1328,6 @@ bail:
kfree(new_eb_bhs);
}
- mlog_exit(status);
return status;
}
@@ -1353,8 +1348,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
struct ocfs2_extent_list *root_el;
struct ocfs2_extent_list *eb_el;
- mlog_entry_void();
-
status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
&new_eb_bh);
if (status < 0) {
@@ -1415,7 +1408,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
bail:
brelse(new_eb_bh);
- mlog_exit(status);
return status;
}
@@ -1446,8 +1438,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
struct buffer_head *bh = NULL;
struct buffer_head *lowest_bh = NULL;
- mlog_entry_void();
-
*target_bh = NULL;
el = et->et_root_el;
@@ -1503,7 +1493,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
bail:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -1540,7 +1529,10 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
* another tree level */
if (shift) {
BUG_ON(bh);
- mlog(0, "need to shift tree depth (current = %d)\n", depth);
+ trace_ocfs2_grow_tree(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ depth);
/* ocfs2_shift_tree_depth will return us a buffer with
* the new extent block (so we can pass that to
@@ -1570,7 +1562,6 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
/* call ocfs2_add_branch to add the final part of the tree with
* the new data. */
- mlog(0, "add branch. bh = %p\n", bh);
ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
meta_ac);
if (ret < 0) {
@@ -1645,8 +1636,9 @@ static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
}
insert_index = i;
- mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
- insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
+ trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
+ has_empty, next_free,
+ le16_to_cpu(el->l_count));
BUG_ON(insert_index < 0);
BUG_ON(insert_index >= le16_to_cpu(el->l_count));
@@ -2059,7 +2051,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
left_el = path_leaf_el(left_path);
right_el = path_leaf_el(right_path);
for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
- mlog(0, "Adjust records at index %u\n", i);
+ trace_ocfs2_complete_edge_insert(i);
/*
* One nice property of knowing that all of these
@@ -2389,7 +2381,9 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
goto out;
}
- mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
+ trace_ocfs2_rotate_tree_right(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ insert_cpos, cpos);
/*
* What we want to do here is:
@@ -2418,8 +2412,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
* rotating subtrees.
*/
while (cpos && insert_cpos <= cpos) {
- mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
- insert_cpos, cpos);
+ trace_ocfs2_rotate_tree_right(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ insert_cpos, cpos);
ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
@@ -2461,10 +2457,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
start = ocfs2_find_subtree_root(et, left_path, right_path);
- mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
- start,
- (unsigned long long) right_path->p_node[start].bh->b_blocknr,
- right_path->p_tree_depth);
+ trace_ocfs2_rotate_subtree(start,
+ (unsigned long long)
+ right_path->p_node[start].bh->b_blocknr,
+ right_path->p_tree_depth);
ret = ocfs2_extend_rotate_transaction(handle, start,
orig_credits, right_path);
@@ -2964,8 +2960,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle,
subtree_root = ocfs2_find_subtree_root(et, left_path,
right_path);
- mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
- subtree_root,
+ trace_ocfs2_rotate_subtree(subtree_root,
(unsigned long long)
right_path->p_node[subtree_root].bh->b_blocknr,
right_path->p_tree_depth);
@@ -3989,9 +3984,11 @@ static int ocfs2_append_rec_to_path(handle_t *handle,
goto out;
}
- mlog(0, "Append may need a left path update. cpos: %u, "
- "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
- left_cpos);
+ trace_ocfs2_append_rec_to_path(
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci),
+ le32_to_cpu(insert_rec->e_cpos),
+ left_cpos);
/*
* No need to worry if the append is already in the
@@ -4522,7 +4519,7 @@ set_tail_append:
}
/*
- * Helper function called at the begining of an insert.
+ * Helper function called at the beginning of an insert.
*
* This computes a few things that are commonly used in the process of
* inserting into the btree:
@@ -4562,7 +4559,7 @@ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
ocfs2_et_get_last_eb_blk(et),
&bh);
if (ret) {
- mlog_exit(ret);
+ mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) bh->b_data;
@@ -4678,9 +4675,9 @@ int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
- mlog(0, "add %u clusters at position %u to owner %llu\n",
- new_clusters, cpos,
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
+ trace_ocfs2_insert_extent_start(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos, new_clusters);
memset(&rec, 0, sizeof(rec));
rec.e_cpos = cpu_to_le32(cpos);
@@ -4700,11 +4697,9 @@ int ocfs2_insert_extent(handle_t *handle,
goto bail;
}
- mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
- "Insert.contig_index: %d, Insert.free_records: %d, "
- "Insert.tree_depth: %d\n",
- insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
- free_records, insert.ins_tree_depth);
+ trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig,
+ insert.ins_contig_index, free_records,
+ insert.ins_tree_depth);
if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
status = ocfs2_grow_tree(handle, et,
@@ -4726,7 +4721,6 @@ int ocfs2_insert_extent(handle_t *handle,
bail:
brelse(last_eb_bh);
- mlog_exit(status);
return status;
}
@@ -4746,7 +4740,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret)
{
- int status = 0;
+ int status = 0, err = 0;
int free_extents;
enum ocfs2_alloc_restarted reason = RESTART_NONE;
u32 bit_off, num_bits;
@@ -4773,14 +4767,14 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
* 2) we are so fragmented, we've needed to add metadata too
* many times. */
if (!free_extents && !meta_ac) {
- mlog(0, "we haven't reserved any metadata!\n");
+ err = -1;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
} else if ((!free_extents)
&& (ocfs2_alloc_context_bits_left(meta_ac)
< ocfs2_extend_meta_needed(et->et_root_el))) {
- mlog(0, "filesystem is really fragmented...\n");
+ err = -2;
status = -EAGAIN;
reason = RESTART_META;
goto leave;
@@ -4805,9 +4799,9 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "Allocating %u clusters at block %u for owner %llu\n",
- num_bits, bit_off,
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
+ trace_ocfs2_add_clusters_in_btree(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ bit_off, num_bits);
status = ocfs2_insert_extent(handle, et, *logical_offset, block,
num_bits, flags, meta_ac);
if (status < 0) {
@@ -4821,16 +4815,15 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
*logical_offset += num_bits;
if (clusters_to_add) {
- mlog(0, "need to alloc once more, wanted = %u\n",
- clusters_to_add);
+ err = clusters_to_add;
status = -EAGAIN;
reason = RESTART_TRANS;
}
leave:
- mlog_exit(status);
if (reason_ret)
*reason_ret = reason;
+ trace_ocfs2_add_clusters_in_btree_ret(status, reason, err);
return status;
}
@@ -5039,7 +5032,7 @@ int ocfs2_split_extent(handle_t *handle,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret) {
- mlog_exit(ret);
+ mlog_errno(ret);
goto out;
}
@@ -5056,9 +5049,9 @@ int ocfs2_split_extent(handle_t *handle,
ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
- mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
- split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
- ctxt.c_split_covers_rec);
+ trace_ocfs2_split_extent(split_index, ctxt.c_contig_type,
+ ctxt.c_has_empty_extent,
+ ctxt.c_split_covers_rec);
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
@@ -5192,8 +5185,9 @@ int ocfs2_mark_extent_written(struct inode *inode,
{
int ret;
- mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n",
- inode->i_ino, cpos, len, phys);
+ trace_ocfs2_mark_extent_written(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ cpos, len, phys);
if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
@@ -5512,11 +5506,10 @@ int ocfs2_remove_extent(handle_t *handle,
BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
- mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d "
- "(cpos %u, len %u)\n",
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
- cpos, len, index,
- le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
+ trace_ocfs2_remove_extent(
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos, len, index, le32_to_cpu(rec->e_cpos),
+ ocfs2_rec_clusters(el, rec));
if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
@@ -5795,9 +5788,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- mlog_entry("start_blk = %llu, num_clusters = %u\n",
- (unsigned long long)start_blk, num_clusters);
-
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
@@ -5834,10 +5824,9 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
goto bail;
}
- mlog(0, "Log truncate of %u clusters starting at cluster %u to "
- "%llu (index = %d)\n", num_clusters, start_cluster,
- (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index);
-
+ trace_ocfs2_truncate_log_append(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index,
+ start_cluster, num_clusters);
if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
/*
* Move index back to the record we are coalescing with.
@@ -5846,9 +5835,10 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
index--;
num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
- mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
- index, le32_to_cpu(tl->tl_recs[index].t_start),
- num_clusters);
+ trace_ocfs2_truncate_log_append(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ index, le32_to_cpu(tl->tl_recs[index].t_start),
+ num_clusters);
} else {
tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
tl->tl_used = cpu_to_le16(index + 1);
@@ -5859,7 +5849,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
osb->truncated_clusters += num_clusters;
bail:
- mlog_exit(status);
return status;
}
@@ -5878,8 +5867,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
struct inode *tl_inode = osb->osb_tl_inode;
struct buffer_head *tl_bh = osb->osb_tl_bh;
- mlog_entry_void();
-
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
i = le16_to_cpu(tl->tl_used) - 1;
@@ -5915,8 +5902,9 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
/* if start_blk is not set, we ignore the record as
* invalid. */
if (start_blk) {
- mlog(0, "free record %d, start = %u, clusters = %u\n",
- i, le32_to_cpu(rec.t_start), num_clusters);
+ trace_ocfs2_replay_truncate_records(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ i, le32_to_cpu(rec.t_start), num_clusters);
status = ocfs2_free_clusters(handle, data_alloc_inode,
data_alloc_bh, start_blk,
@@ -5932,7 +5920,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
osb->truncated_clusters = 0;
bail:
- mlog_exit(status);
return status;
}
@@ -5949,8 +5936,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- mlog_entry_void();
-
BUG_ON(mutex_trylock(&tl_inode->i_mutex));
di = (struct ocfs2_dinode *) tl_bh->b_data;
@@ -5962,8 +5947,9 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
tl = &di->id2.i_dealloc;
num_to_flush = le16_to_cpu(tl->tl_used);
- mlog(0, "Flush %u records from truncate log #%llu\n",
- num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
+ trace_ocfs2_flush_truncate_log(
+ (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
+ num_to_flush);
if (!num_to_flush) {
status = 0;
goto out;
@@ -6009,7 +5995,6 @@ out_mutex:
iput(data_alloc_inode);
out:
- mlog_exit(status);
return status;
}
@@ -6032,15 +6017,11 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
container_of(work, struct ocfs2_super,
osb_truncate_log_wq.work);
- mlog_entry_void();
-
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
mlog_errno(status);
else
ocfs2_init_steal_slots(osb);
-
- mlog_exit(status);
}
#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
@@ -6086,7 +6067,6 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
*tl_inode = inode;
*tl_bh = bh;
bail:
- mlog_exit(status);
return status;
}
@@ -6106,7 +6086,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
*tl_copy = NULL;
- mlog(0, "recover truncate log from slot %d\n", slot_num);
+ trace_ocfs2_begin_truncate_log_recovery(slot_num);
status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
if (status < 0) {
@@ -6123,8 +6103,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
tl = &di->id2.i_dealloc;
if (le16_to_cpu(tl->tl_used)) {
- mlog(0, "We'll have %u logs to recover\n",
- le16_to_cpu(tl->tl_used));
+ trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
*tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
if (!(*tl_copy)) {
@@ -6157,9 +6136,9 @@ bail:
if (status < 0 && (*tl_copy)) {
kfree(*tl_copy);
*tl_copy = NULL;
+ mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -6174,8 +6153,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_truncate_log *tl;
- mlog_entry_void();
-
if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
return -EINVAL;
@@ -6183,8 +6160,9 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
tl = &tl_copy->id2.i_dealloc;
num_recs = le16_to_cpu(tl->tl_used);
- mlog(0, "cleanup %u records from %llu\n", num_recs,
- (unsigned long long)le64_to_cpu(tl_copy->i_blkno));
+ trace_ocfs2_complete_truncate_log_recovery(
+ (unsigned long long)le64_to_cpu(tl_copy->i_blkno),
+ num_recs);
mutex_lock(&tl_inode->i_mutex);
for(i = 0; i < num_recs; i++) {
@@ -6219,7 +6197,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
bail_up:
mutex_unlock(&tl_inode->i_mutex);
- mlog_exit(status);
return status;
}
@@ -6228,8 +6205,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
int status;
struct inode *tl_inode = osb->osb_tl_inode;
- mlog_entry_void();
-
if (tl_inode) {
cancel_delayed_work(&osb->osb_truncate_log_wq);
flush_workqueue(ocfs2_wq);
@@ -6241,8 +6216,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
brelse(osb->osb_tl_bh);
iput(osb->osb_tl_inode);
}
-
- mlog_exit_void();
}
int ocfs2_truncate_log_init(struct ocfs2_super *osb)
@@ -6251,8 +6224,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
struct inode *tl_inode = NULL;
struct buffer_head *tl_bh = NULL;
- mlog_entry_void();
-
status = ocfs2_get_truncate_log_info(osb,
osb->slot_num,
&tl_inode,
@@ -6268,7 +6239,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
osb->osb_tl_bh = tl_bh;
osb->osb_tl_inode = tl_inode;
- mlog_exit(status);
return status;
}
@@ -6350,8 +6320,8 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
else
bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
head->free_bit);
- mlog(0, "Free bit: (bit %u, blkno %llu)\n",
- head->free_bit, (unsigned long long)head->free_blk);
+ trace_ocfs2_free_cached_blocks(
+ (unsigned long long)head->free_blk, head->free_bit);
ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
head->free_bit, bg_blkno, 1);
@@ -6404,8 +6374,7 @@ int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
return ret;
}
- mlog(0, "Insert clusters: (bit %u, blk %llu)\n",
- bit, (unsigned long long)blkno);
+ trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit);
item->free_blk = blkno;
item->free_bit = bit;
@@ -6480,8 +6449,8 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb,
fl = ctxt->c_first_suballocator;
if (fl->f_first) {
- mlog(0, "Free items: (type %u, slot %d)\n",
- fl->f_inode_type, fl->f_slot);
+ trace_ocfs2_run_deallocs(fl->f_inode_type,
+ fl->f_slot);
ret2 = ocfs2_free_cached_blocks(osb,
fl->f_inode_type,
fl->f_slot,
@@ -6558,8 +6527,9 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
goto out;
}
- mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
- type, slot, bit, (unsigned long long)blkno);
+ trace_ocfs2_cache_block_dealloc(type, slot,
+ (unsigned long long)suballoc,
+ (unsigned long long)blkno, bit);
item->free_bg = suballoc;
item->free_blk = blkno;
@@ -7005,8 +6975,6 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
struct ocfs2_extent_tree et;
struct ocfs2_cached_dealloc_ctxt dealloc;
- mlog_entry_void();
-
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -7041,8 +7009,11 @@ start:
goto bail;
}
- mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
- OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
+ trace_ocfs2_commit_truncate(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ new_highest_cpos,
+ OCFS2_I(inode)->ip_clusters,
+ path->p_tree_depth);
/*
* By now, el will point to the extent list on the bottom most
@@ -7136,7 +7107,6 @@ bail:
ocfs2_free_path(path);
- mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1fbb0e20131b..ac97bca282d2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -29,7 +29,6 @@
#include <linux/mpage.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_FILE_IO
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -45,6 +44,7 @@
#include "super.h"
#include "symlink.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -59,8 +59,9 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
void *kaddr;
- mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
- (unsigned long long)iblock, bh_result, create);
+ trace_ocfs2_symlink_get_block(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)iblock, bh_result, create);
BUG_ON(ocfs2_inode_is_fast_symlink(inode));
@@ -123,7 +124,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
bail:
brelse(bh);
- mlog_exit(err);
return err;
}
@@ -136,8 +136,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
u64 p_blkno, count, past_eof;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
- (unsigned long long)iblock, bh_result, create);
+ trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)iblock, bh_result, create);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
@@ -199,8 +199,9 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
}
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
- mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
- (unsigned long long)past_eof);
+
+ trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)past_eof);
if (create && (iblock >= past_eof))
set_buffer_new(bh_result);
@@ -208,7 +209,6 @@ bail:
if (err < 0)
err = -EIO;
- mlog_exit(err);
return err;
}
@@ -278,7 +278,8 @@ static int ocfs2_readpage(struct file *file, struct page *page)
loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
int ret, unlock = 1;
- mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
+ trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
+ (page ? page->index : 0));
ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
if (ret != 0) {
@@ -323,7 +324,6 @@ out_inode_unlock:
out:
if (unlock)
unlock_page(page);
- mlog_exit(ret);
return ret;
}
@@ -396,15 +396,11 @@ out_unlock:
*/
static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
{
- int ret;
-
- mlog_entry("(0x%p)\n", page);
-
- ret = block_write_full_page(page, ocfs2_get_block, wbc);
+ trace_ocfs2_writepage(
+ (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
+ page->index);
- mlog_exit(ret);
-
- return ret;
+ return block_write_full_page(page, ocfs2_get_block, wbc);
}
/* Taken from ext3. We don't necessarily need the full blown
@@ -450,7 +446,8 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
int err = 0;
struct inode *inode = mapping->host;
- mlog_entry("(block = %llu)\n", (unsigned long long)block);
+ trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)block);
/* We don't need to lock journal system files, since they aren't
* accessed concurrently from multiple nodes.
@@ -484,8 +481,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
bail:
status = err ? 0 : p_blkno;
- mlog_exit((int)status);
-
return status;
}
@@ -616,9 +611,6 @@ static ssize_t ocfs2_direct_IO(int rw,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
- int ret;
-
- mlog_entry_void();
/*
* Fallback to buffered I/O if we see an inode without
@@ -631,13 +623,10 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset)
return 0;
- ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
- iov, offset, nr_segs,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
-
- mlog_exit(ret);
- return ret;
+ return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
+ iov, offset, nr_segs,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
}
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
@@ -1026,6 +1015,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
&cluster_start, &cluster_end);
+ /* treat the write as new if the a hole/lseek spanned across
+ * the page boundary.
+ */
+ new = new | ((i_size_read(inode) <= page_offset(page)) &&
+ (page_offset(page) <= user_pos));
+
if (page == wc->w_target_page) {
map_from = user_pos & (PAGE_CACHE_SIZE - 1);
map_to = map_from + user_len;
@@ -1534,9 +1529,9 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = NULL;
- mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",
- (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,
- oi->ip_dyn_features);
+ trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
+ len, (unsigned long long)pos,
+ oi->ip_dyn_features);
/*
* Handle inodes which already have inline data 1st.
@@ -1739,6 +1734,13 @@ try_again:
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
+ trace_ocfs2_write_begin_nolock(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (long long)i_size_read(inode),
+ le32_to_cpu(di->i_clusters),
+ pos, len, flags, mmap_page,
+ clusters_to_alloc, extents_to_split);
+
/*
* We set w_target_from, w_target_to here so that
* ocfs2_write_end() knows which range in the target page to
@@ -1751,12 +1753,6 @@ try_again:
* ocfs2_lock_allocators(). It greatly over-estimates
* the work to be done.
*/
- mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u,"
- " clusters_to_add = %u, extents_to_split = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (long long)i_size_read(inode), le32_to_cpu(di->i_clusters),
- clusters_to_alloc, extents_to_split);
-
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
wc->w_di_bh);
ret = ocfs2_lock_allocators(inode, &et,
@@ -1938,8 +1934,8 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
kunmap_atomic(kaddr, KM_USER0);
- mlog(0, "Data written to inode at offset %llu. "
- "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",
+ trace_ocfs2_write_end_inline(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)pos, *copied,
le16_to_cpu(di->id2.i_data.id_count),
le16_to_cpu(di->i_dyn_features));
@@ -2043,7 +2039,6 @@ const struct address_space_operations ocfs2_aops = {
.write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
- .sync_page = block_sync_page,
.direct_IO = ocfs2_direct_IO,
.invalidatepage = ocfs2_invalidatepage,
.releasepage = ocfs2_releasepage,
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index eceb456037c1..75cf3ad987a6 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -71,7 +71,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
/*
* Using a named enum representing lock types in terms of #N bit stored in
- * iocb->private, which is going to be used for communication bewteen
+ * iocb->private, which is going to be used for communication between
* ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
*/
enum ocfs2_iocb_lock_bits {
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index f9d5d3ffc75a..5d18ad10c27f 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -35,8 +35,8 @@
#include "inode.h"
#include "journal.h"
#include "uptodate.h"
-
#include "buffer_head_io.h"
+#include "ocfs2_trace.h"
/*
* Bits on bh->b_state used by ocfs2.
@@ -55,8 +55,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
{
int ret = 0;
- mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n",
- (unsigned long long)bh->b_blocknr, ci);
+ trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
BUG_ON(buffer_jbd(bh));
@@ -66,6 +65,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
* can get modified during recovery even if read-only. */
if (ocfs2_is_hard_readonly(osb)) {
ret = -EROFS;
+ mlog_errno(ret);
goto out;
}
@@ -91,11 +91,11 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
* uptodate. */
ret = -EIO;
put_bh(bh);
+ mlog_errno(ret);
}
ocfs2_metadata_cache_io_unlock(ci);
out:
- mlog_exit(ret);
return ret;
}
@@ -106,10 +106,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int i;
struct buffer_head *bh;
- if (!nr) {
- mlog(ML_BH_IO, "No buffers will be read!\n");
+ trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
+
+ if (!nr)
goto bail;
- }
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
@@ -123,10 +123,8 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
bh = bhs[i];
if (buffer_jbd(bh)) {
- mlog(ML_BH_IO,
- "trying to sync read a jbd "
- "managed bh (blocknr = %llu), skipping\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_read_blocks_sync_jbd(
+ (unsigned long long)bh->b_blocknr);
continue;
}
@@ -186,8 +184,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bh;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
- mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n",
- ci, (unsigned long long)block, nr, flags);
+ trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
BUG_ON(!ci);
BUG_ON((flags & OCFS2_BH_READAHEAD) &&
@@ -207,7 +204,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
if (nr == 0) {
- mlog(ML_BH_IO, "No buffers will be read!\n");
status = 0;
goto bail;
}
@@ -251,8 +247,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
*/
if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
- mlog(ML_UPTODATE,
- "bh (%llu), owner %llu not uptodate\n",
+ trace_ocfs2_read_blocks_from_disk(
(unsigned long long)bh->b_blocknr,
(unsigned long long)ocfs2_metadata_cache_owner(ci));
/* We're using ignore_cache here to say
@@ -260,11 +255,10 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
ignore_cache = 1;
}
+ trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
+ ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
+
if (buffer_jbd(bh)) {
- if (ignore_cache)
- mlog(ML_BH_IO, "trying to sync read a jbd "
- "managed bh (blocknr = %llu)\n",
- (unsigned long long)bh->b_blocknr);
continue;
}
@@ -272,9 +266,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
if (buffer_dirty(bh)) {
/* This should probably be a BUG, or
* at least return an error. */
- mlog(ML_BH_IO, "asking me to sync read a dirty "
- "buffer! (blocknr = %llu)\n",
- (unsigned long long)bh->b_blocknr);
continue;
}
@@ -367,14 +358,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
ocfs2_metadata_cache_io_unlock(ci);
- mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
- (unsigned long long)block, nr,
- ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
- flags);
+ trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
+ flags, ignore_cache);
bail:
- mlog_exit(status);
return status;
}
@@ -408,13 +396,12 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
int ret = 0;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
- mlog_entry_void();
-
BUG_ON(buffer_jbd(bh));
ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
ret = -EROFS;
+ mlog_errno(ret);
goto out;
}
@@ -434,9 +421,9 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
if (!buffer_uptodate(bh)) {
ret = -EIO;
put_bh(bh);
+ mlog_errno(ret);
}
out:
- mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index b108e863d8f6..643720209a98 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
static void o2hb_wait_on_io(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc)
{
- struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
-
- blk_run_address_space(mapping);
o2hb_bio_wait_dec(wc, 1);
-
wait_for_completion(&wc->wc_io_complete);
}
@@ -1658,8 +1654,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
- mlog_entry_void();
-
ret = o2hb_read_slots(reg, reg->hr_blocks);
if (ret) {
mlog_errno(ret);
@@ -1681,7 +1675,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
}
out:
- mlog_exit(ret);
return ret;
}
@@ -2282,7 +2275,7 @@ void o2hb_free_hb_set(struct config_group *group)
kfree(hs);
}
-/* hb callback registration and issueing */
+/* hb callback registration and issuing */
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
{
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 6c61771469af..07ac24fd9252 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -30,7 +30,7 @@
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(mlog_and_bits);
-struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK);
+struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(mlog_not_bits);
static ssize_t mlog_mask_show(u64 mask, char *buf)
@@ -80,8 +80,6 @@ struct mlog_attribute {
}
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
- define_mask(ENTRY),
- define_mask(EXIT),
define_mask(TCP),
define_mask(MSG),
define_mask(SOCKET),
@@ -93,27 +91,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(DLM_THREAD),
define_mask(DLM_MASTER),
define_mask(DLM_RECOVERY),
- define_mask(AIO),
- define_mask(JOURNAL),
- define_mask(DISK_ALLOC),
- define_mask(SUPER),
- define_mask(FILE_IO),
- define_mask(EXTENT_MAP),
define_mask(DLM_GLUE),
- define_mask(BH_IO),
- define_mask(UPTODATE),
- define_mask(NAMEI),
- define_mask(INODE),
define_mask(VOTE),
- define_mask(DCACHE),
define_mask(CONN),
define_mask(QUORUM),
- define_mask(EXPORT),
- define_mask(XATTR),
- define_mask(QUOTA),
- define_mask(REFCOUNT),
define_mask(BASTS),
- define_mask(RESERVATIONS),
define_mask(CLUSTER),
define_mask(ERROR),
define_mask(NOTICE),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 34d6544357d9..baa2b9ef7eef 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -82,41 +82,23 @@
/* bits that are frequently given and infrequently matched in the low word */
/* NOTE: If you add a flag, you need to also update masklog.c! */
-#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
-#define ML_EXIT 0x0000000000000002ULL /* func call exit */
-#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
-#define ML_MSG 0x0000000000000008ULL /* net network messages */
-#define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */
-#define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */
-#define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */
-#define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */
-#define ML_DLM 0x0000000000000100ULL /* dlm general debugging */
-#define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */
-#define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */
-#define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */
-#define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */
-#define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */
-#define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */
-#define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */
-#define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */
-#define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */
-#define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */
-#define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */
-#define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */
-#define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */
-#define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */
-#define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */
-#define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */
-#define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */
-#define ML_CONN 0x0000000004000000ULL /* net connection management */
-#define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */
-#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */
-#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
-#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
-#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
-#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
-#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
-#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
+#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
+#define ML_MSG 0x0000000000000002ULL /* net network messages */
+#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
+#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
+#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
+#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
+#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
+#define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */
+#define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */
+#define ML_CONN 0x0000000000002000ULL /* net connection management */
+#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
+#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
+#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
@@ -124,7 +106,6 @@
#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
-#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
#ifndef MLOG_MASK_PREFIX
#define MLOG_MASK_PREFIX 0
#endif
@@ -222,58 +203,6 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
} while (0)
-#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
-#define mlog_entry(fmt, args...) do { \
- mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
-} while (0)
-
-#define mlog_entry_void() do { \
- mlog(ML_ENTRY, "ENTRY:\n"); \
-} while (0)
-
-/*
- * We disable this for sparse.
- */
-#if !defined(__CHECKER__)
-#define mlog_exit(st) do { \
- if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
- mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed long)) \
- mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
- || __builtin_types_compatible_p(typeof(st), unsigned short) \
- || __builtin_types_compatible_p(typeof(st), unsigned char)) \
- mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), signed int) \
- || __builtin_types_compatible_p(typeof(st), signed short) \
- || __builtin_types_compatible_p(typeof(st), signed char)) \
- mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \
- else if (__builtin_types_compatible_p(typeof(st), long long)) \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
- else \
- mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
-} while (0)
-#else
-#define mlog_exit(st) do { \
- mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
-} while (0)
-#endif
-
-#define mlog_exit_ptr(ptr) do { \
- mlog(ML_EXIT, "EXIT: %p\n", ptr); \
-} while (0)
-
-#define mlog_exit_void() do { \
- mlog(ML_EXIT, "EXIT\n"); \
-} while (0)
-#else
-#define mlog_entry(...) do { } while (0)
-#define mlog_entry_void(...) do { } while (0)
-#define mlog_exit(...) do { } while (0)
-#define mlog_exit_ptr(...) do { } while (0)
-#define mlog_exit_void(...) do { } while (0)
-#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */
-
#define mlog_bug_on_msg(cond, fmt, args...) do { \
if (cond) { \
mlog(ML_ERROR, "bug expression: " #cond "\n"); \
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index a87366750f23..8f9cea1597af 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -89,7 +89,7 @@ static void o2quo_fence_self(void)
};
}
-/* Indicate that a timeout occured on a hearbeat region write. The
+/* Indicate that a timeout occurred on a hearbeat region write. The
* other nodes in the cluster may consider us dead at that time so we
* want to "fence" ourselves so that we don't scribble on the disk
* after they think they've recovered us. This can't solve all
@@ -261,7 +261,7 @@ void o2quo_hb_still_up(u8 node)
spin_unlock(&qs->qs_lock);
}
-/* This is analagous to hb_up. as a node's connection comes up we delay the
+/* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we we might be dropping a hold that conn_up got.
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 3b11cb1e38fc..db5ee4b4f47a 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -210,10 +210,6 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
sc->sc_tv_func_stop = ktime_get();
}
-static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
-{
- return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
-}
#else /* CONFIG_DEBUG_FS */
# define o2net_init_nst(a, b, c, d, e)
# define o2net_set_nst_sock_time(a)
@@ -227,10 +223,14 @@ static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
# define o2net_set_advance_stop_time(a)
# define o2net_set_func_start_time(a)
# define o2net_set_func_stop_time(a)
-# define o2net_get_func_run_time(a) (ktime_t)0
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_OCFS2_FS_STATS
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+ return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+
static void o2net_update_send_stats(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
@@ -565,7 +565,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
* the work queue actually being up. */
if (!valid && o2net_wq) {
unsigned long delay;
- /* delay if we're withing a RECONNECT_DELAY of the
+ /* delay if we're within a RECONNECT_DELAY of the
* last attempt */
delay = (nn->nn_last_connect_attempt +
msecs_to_jiffies(o2net_reconnect_delay()))
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 7eb90403fc8a..e5ba34818332 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/namei.h>
-#define MLOG_MASK_PREFIX ML_DCACHE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "file.h"
#include "inode.h"
#include "super.h"
+#include "ocfs2_trace.h"
void ocfs2_dentry_attach_gen(struct dentry *dentry)
{
@@ -62,8 +62,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
inode = dentry->d_inode;
osb = OCFS2_SB(dentry->d_sb);
- mlog_entry("(0x%p, '%.*s')\n", dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
+ dentry->d_name.name);
/* For a negative dentry -
* check the generation number of the parent and compare with the
@@ -73,9 +73,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
unsigned long gen = (unsigned long) dentry->d_fsdata;
unsigned long pgen =
OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
- mlog(0, "negative dentry: %.*s parent gen: %lu "
- "dentry gen: %lu\n",
- dentry->d_name.len, dentry->d_name.name, pgen, gen);
+
+ trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
+ dentry->d_name.name,
+ pgen, gen);
if (gen != pgen)
goto bail;
goto valid;
@@ -90,8 +91,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
/* did we or someone else delete this inode? */
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
- mlog(0, "inode (%llu) deleted, returning false\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_dentry_revalidate_delete(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
spin_unlock(&OCFS2_I(inode)->ip_lock);
@@ -101,10 +102,9 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
* inode nlink hits zero, it never goes back.
*/
if (inode->i_nlink == 0) {
- mlog(0, "Inode %llu orphaned, returning false "
- "dir = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- S_ISDIR(inode->i_mode));
+ trace_ocfs2_dentry_revalidate_orphaned(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ S_ISDIR(inode->i_mode));
goto bail;
}
@@ -113,9 +113,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
* redo it.
*/
if (!dentry->d_fsdata) {
- mlog(0, "Inode %llu doesn't have dentry lock, "
- "returning false\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_dentry_revalidate_nofsdata(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto bail;
}
@@ -123,8 +122,7 @@ valid:
ret = 1;
bail:
- mlog_exit(ret);
-
+ trace_ocfs2_dentry_revalidate_ret(ret);
return ret;
}
@@ -181,8 +179,8 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
- mlog(0, "dentry found: %.*s\n",
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_find_local_alias(dentry->d_name.len,
+ dentry->d_name.name);
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
@@ -240,9 +238,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
struct dentry *alias;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
- mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n",
- dentry->d_name.len, dentry->d_name.name,
- (unsigned long long)parent_blkno, dl);
+ trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)parent_blkno, dl);
/*
* Negative dentry. We ignore these for now.
@@ -292,7 +289,9 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
(unsigned long long)parent_blkno,
(unsigned long long)dl->dl_parent_blkno);
- mlog(0, "Found: %s\n", dl->dl_lockres.l_name);
+ trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name,
+ (unsigned long long)parent_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
goto out_attach;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f97b6f1c61dd..9fe5b8fd658f 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -43,7 +43,6 @@
#include <linux/quotaops.h>
#include <linux/sort.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -61,6 +60,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -322,21 +322,23 @@ static int ocfs2_check_dir_entry(struct inode * dir,
const char *error_msg = NULL;
const int rlen = le16_to_cpu(de->rec_len);
- if (rlen < OCFS2_DIR_REC_LEN(1))
+ if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
- else if (rlen % 4 != 0)
+ else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
- else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ else if (unlikely(
+ ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
error_msg = "directory entry across blocks";
- if (error_msg != NULL)
+ if (unlikely(error_msg != NULL))
mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
"offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
(unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
de->name_len);
+
return error_msg == NULL ? 1 : 0;
}
@@ -367,8 +369,6 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
int de_len;
int ret = 0;
- mlog_entry_void();
-
de_buf = first_de;
dlimit = de_buf + bytes;
@@ -402,7 +402,7 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
}
bail:
- mlog_exit(ret);
+ trace_ocfs2_search_dirblock(ret);
return ret;
}
@@ -447,8 +447,7 @@ static int ocfs2_validate_dir_block(struct super_block *sb,
* We don't validate dirents here, that's handled
* in-place when the code walks them.
*/
- mlog(0, "Validating dirblock %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -706,8 +705,6 @@ static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
int num = 0;
int nblocks, i, err;
- mlog_entry_void();
-
sb = dir->i_sb;
nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
@@ -788,7 +785,7 @@ cleanup_and_exit:
for (; ra_ptr < ra_max; ra_ptr++)
brelse(bh_use[ra_ptr]);
- mlog_exit_ptr(ret);
+ trace_ocfs2_find_entry_el(ret);
return ret;
}
@@ -950,11 +947,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
goto out;
}
- mlog(0, "Dir %llu: name: \"%.*s\", lookup of hash: %u.0x%x "
- "returns: %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- namelen, name, hinfo->major_hash, hinfo->minor_hash,
- (unsigned long long)phys);
+ trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ namelen, name, hinfo->major_hash,
+ hinfo->minor_hash, (unsigned long long)phys);
ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
if (ret) {
@@ -964,9 +959,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
- mlog(0, "leaf info: num_used: %d, count: %d\n",
- le16_to_cpu(dx_leaf->dl_list.de_num_used),
- le16_to_cpu(dx_leaf->dl_list.de_count));
+ trace_ocfs2_dx_dir_search_leaf_info(
+ le16_to_cpu(dx_leaf->dl_list.de_num_used),
+ le16_to_cpu(dx_leaf->dl_list.de_count));
entry_list = &dx_leaf->dl_list;
@@ -1166,8 +1161,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
int i, status = -ENOENT;
ocfs2_journal_access_func access = ocfs2_journal_access_db;
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
-
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
access = ocfs2_journal_access_di;
@@ -1202,7 +1195,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
}
bail:
- mlog_exit(status);
return status;
}
@@ -1348,8 +1340,8 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
}
}
- mlog(0, "Dir %llu: delete entry at index: %d\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, index);
+ trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ index);
ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
@@ -1632,8 +1624,6 @@ int __ocfs2_add_entry(handle_t *handle,
struct buffer_head *insert_bh = lookup->dl_leaf_bh;
char *data_start = insert_bh->b_data;
- mlog_entry_void();
-
if (!namelen)
return -EINVAL;
@@ -1765,8 +1755,9 @@ int __ocfs2_add_entry(handle_t *handle,
* from ever getting here. */
retval = -ENOSPC;
bail:
+ if (retval)
+ mlog_errno(retval);
- mlog_exit(retval);
return retval;
}
@@ -2028,8 +2019,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct inode *inode = filp->f_path.dentry->d_inode;
int lock_level = 0;
- mlog_entry("dirino=%llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
if (lock_level && error >= 0) {
@@ -2051,9 +2041,10 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
dirent, filldir, NULL);
ocfs2_inode_unlock(inode, lock_level);
+ if (error)
+ mlog_errno(error);
bail_nolock:
- mlog_exit(error);
return error;
}
@@ -2069,8 +2060,8 @@ int ocfs2_find_files_on_disk(const char *name,
{
int status = -ENOENT;
- mlog(0, "name=%.*s, blkno=%p, inode=%llu\n", namelen, name, blkno,
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_find_files_on_disk(namelen, name, blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_find_entry(name, namelen, inode, lookup);
if (status)
@@ -2114,8 +2105,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
int ret;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry("dir %llu, name '%.*s'\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
+ trace_ocfs2_check_dir_for_entry(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
ret = -EEXIST;
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0)
@@ -2125,7 +2116,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
bail:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -2324,8 +2316,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
struct buffer_head *new_bh = NULL;
struct ocfs2_dir_entry *de;
- mlog_entry_void();
-
if (ocfs2_new_dir_wants_trailer(inode))
size = ocfs2_dir_trailer_blk_off(parent->i_sb);
@@ -2380,7 +2370,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
bail:
brelse(new_bh);
- mlog_exit(status);
return status;
}
@@ -2409,9 +2398,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
goto out;
}
- mlog(0, "Dir %llu, attach new index block: %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)dr_blkno);
+ trace_ocfs2_dx_dir_attach_index(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)dr_blkno);
dx_root_bh = sb_getblk(osb->sb, dr_blkno);
if (dx_root_bh == NULL) {
@@ -2511,11 +2500,10 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
dx_leaf->dl_list.de_count =
cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
- mlog(0,
- "Dir %llu, format dx_leaf: %llu, entry count: %u\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)bh->b_blocknr,
- le16_to_cpu(dx_leaf->dl_list.de_count));
+ trace_ocfs2_dx_dir_format_cluster(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)bh->b_blocknr,
+ le16_to_cpu(dx_leaf->dl_list.de_count));
ocfs2_journal_dirty(handle, bh);
}
@@ -2759,12 +2747,11 @@ static void ocfs2_dx_dir_index_root_block(struct inode *dir,
ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
- mlog(0,
- "dir: %llu, major: 0x%x minor: 0x%x, index: %u, name: %.*s\n",
- (unsigned long long)dir->i_ino, hinfo.major_hash,
- hinfo.minor_hash,
- le16_to_cpu(dx_root->dr_entries.de_num_used),
- de->name_len, de->name);
+ trace_ocfs2_dx_dir_index_root_block(
+ (unsigned long long)dir->i_ino,
+ hinfo.major_hash, hinfo.minor_hash,
+ de->name_len, de->name,
+ le16_to_cpu(dx_root->dr_entries.de_num_used));
ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
dirent_blk);
@@ -3235,7 +3222,6 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
bail:
if (did_quota && status < 0)
dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
- mlog_exit(status);
return status;
}
@@ -3270,8 +3256,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
struct ocfs2_extent_tree et;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
- mlog_entry_void();
-
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
/*
* This would be a code error as an inline directory should
@@ -3320,8 +3304,8 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
down_write(&OCFS2_I(dir)->ip_alloc_sem);
drop_alloc_sem = 1;
dir_i_size = i_size_read(dir);
- mlog(0, "extending dir %llu (i_size = %lld)\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
+ trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ dir_i_size);
/* dir->i_size is always block aligned. */
spin_lock(&OCFS2_I(dir)->ip_lock);
@@ -3436,7 +3420,6 @@ bail:
brelse(new_bh);
- mlog_exit(status);
return status;
}
@@ -3583,8 +3566,9 @@ next:
status = 0;
bail:
brelse(bh);
+ if (status)
+ mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -3815,9 +3799,9 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
- mlog(0, "DX Dir: %llu, rebalance leaf leaf_blkno: %llu insert: %u\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno,
- (unsigned long long)leaf_blkno, insert_hash);
+ trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)leaf_blkno,
+ insert_hash);
ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
@@ -3897,8 +3881,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
goto out_commit;
}
- mlog(0, "Split leaf (%u) at %u, insert major hash is %u\n",
- leaf_cpos, split_hash, insert_hash);
+ trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
/*
* We have to carefully order operations here. There are items
@@ -4355,8 +4338,8 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
unsigned int blocks_wanted = 1;
struct buffer_head *bh = NULL;
- mlog(0, "getting ready to insert namelen %d into dir %llu\n",
- namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ trace_ocfs2_prepare_dir_for_insert(
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
if (!namelen) {
ret = -EINVAL;
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 9f30491e5e88..29a886d1e82c 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -128,8 +128,8 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
- mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n",
- lock->ml.type, lock->ml.convert_type, type);
+ mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
+ lock->ml.type, lock->ml.convert_type, type);
spin_lock(&lock->spinlock);
@@ -353,7 +353,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
struct kvec vec[2];
size_t veclen = 1;
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
memset(&convert, 0, sizeof(struct dlm_convert_lock));
convert.node_idx = dlm->node_num;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7e38a072d720..7540a492eaba 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -188,7 +188,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
struct hlist_head *bucket;
struct hlist_node *list;
- mlog_entry("%.*s\n", len, name);
+ mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -222,7 +222,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
{
struct dlm_lock_resource *res = NULL;
- mlog_entry("%.*s\n", len, name);
+ mlog(0, "%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -531,7 +531,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
unsigned int node;
struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
- mlog_entry("%p %u %p", msg, len, data);
+ mlog(0, "%p %u %p", msg, len, data);
if (!dlm_grab(dlm))
return 0;
@@ -926,9 +926,10 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
}
static int dlm_match_regions(struct dlm_ctxt *dlm,
- struct dlm_query_region *qr)
+ struct dlm_query_region *qr,
+ char *local, int locallen)
{
- char *local = NULL, *remote = qr->qr_regions;
+ char *remote = qr->qr_regions;
char *l, *r;
int localnr, i, j, foundit;
int status = 0;
@@ -957,13 +958,8 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
r += O2HB_MAX_REGION_NAME_LEN;
}
- local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
- if (!local) {
- status = -ENOMEM;
- goto bail;
- }
-
- localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS);
+ localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
+ localnr = o2hb_get_all_regions(local, (u8)localnr);
/* compare local regions with remote */
l = local;
@@ -1012,8 +1008,6 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
}
bail:
- kfree(local);
-
return status;
}
@@ -1075,6 +1069,7 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
{
struct dlm_query_region *qr;
struct dlm_ctxt *dlm = NULL;
+ char *local = NULL;
int status = 0;
int locked = 0;
@@ -1083,6 +1078,13 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
qr->qr_domain);
+ /* buffer used in dlm_mast_regions() */
+ local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
+ if (!local) {
+ status = -ENOMEM;
+ goto bail;
+ }
+
status = -EINVAL;
spin_lock(&dlm_domain_lock);
@@ -1112,13 +1114,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
goto bail;
}
- status = dlm_match_regions(dlm, qr);
+ status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
bail:
if (locked)
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
+ kfree(local);
+
return status;
}
@@ -1553,7 +1557,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
struct domain_join_ctxt *ctxt;
enum dlm_query_join_response_code response = JOIN_DISALLOW;
- mlog_entry("%p", dlm);
+ mlog(0, "%p", dlm);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 7009292aac5a..8d39e0fd66f7 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -128,7 +128,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
int call_ast = 0, kick_thread = 0;
enum dlm_status status = DLM_NORMAL;
- mlog_entry("type=%d\n", lock->ml.type);
+ mlog(0, "type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
/* if called from dlm_create_lock_handler, need to
@@ -227,8 +227,8 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
enum dlm_status status = DLM_DENIED;
int lockres_changed = 1;
- mlog_entry("type=%d\n", lock->ml.type);
- mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
+ mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
+ lock->ml.type, res->lockname.len,
res->lockname.name, flags);
spin_lock(&res->spinlock);
@@ -308,8 +308,6 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
int tmpret, status = 0;
enum dlm_status ret;
- mlog_entry_void();
-
memset(&create, 0, sizeof(create));
create.node_idx = dlm->node_num;
create.requested_type = lock->ml.type;
@@ -477,8 +475,6 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
BUG_ON(!dlm);
- mlog_entry_void();
-
if (!dlm_grab(dlm))
return DLM_REJECTED;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 59f0f6bdfc62..fede57ed005f 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -426,8 +426,6 @@ static void dlm_mle_release(struct kref *kref)
struct dlm_master_list_entry *mle;
struct dlm_ctxt *dlm;
- mlog_entry_void();
-
mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
dlm = mle->dlm;
@@ -810,7 +808,7 @@ lookup:
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
mle = NULL;
- /* this is lame, but we cant wait on either
+ /* this is lame, but we can't wait on either
* the mle or lockres waitqueue here */
if (mig)
msleep(100);
@@ -845,7 +843,7 @@ lookup:
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
- /* since this lockres is new it doesnt not require the spinlock */
+ /* since this lockres is new it doesn't not require the spinlock */
dlm_lockres_grab_inflight_ref_new(dlm, res);
/* if this node does not become the master make sure to drop
@@ -3120,8 +3118,6 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
*oldmle = NULL;
- mlog_entry_void();
-
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
@@ -3261,7 +3257,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
struct hlist_node *list;
unsigned int i;
- mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
+ mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
top:
assert_spin_locked(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index aaaffbcbe916..f1beb6fc254d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -727,7 +727,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
- mlog_exit(status);
return status;
}
@@ -1496,9 +1495,9 @@ leave:
kfree(buf);
if (item)
kfree(item);
+ mlog_errno(ret);
}
- mlog_exit(ret);
return ret;
}
@@ -1567,7 +1566,6 @@ leave:
dlm_lockres_put(res);
}
kfree(data);
- mlog_exit(ret);
}
@@ -1986,7 +1984,6 @@ leave:
dlm_lock_put(newlock);
}
- mlog_exit(ret);
return ret;
}
@@ -2083,8 +2080,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
struct hlist_head *bucket;
struct dlm_lock_resource *res, *next;
- mlog_entry_void();
-
assert_spin_locked(&dlm->spinlock);
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
@@ -2607,8 +2602,6 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
int nodenum;
int status;
- mlog_entry("%u\n", dead_node);
-
mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
spin_lock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 817287c6a6db..850aa7e87537 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -317,7 +317,7 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
struct kvec vec[2];
size_t veclen = 1;
- mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
if (owner == dlm->node_num) {
/* ended up trying to contact ourself. this means
@@ -588,8 +588,6 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
struct dlm_lock *lock = NULL;
int call_ast, is_master;
- mlog_entry_void();
-
if (!lksb) {
dlm_error(DLM_BADARGS);
return DLM_BADARGS;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e8d94d722ecb..7642d7ca73e5 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -64,7 +64,7 @@ struct ocfs2_mask_waiter {
unsigned long mw_mask;
unsigned long mw_goal;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long mw_lock_start;
+ ktime_t mw_lock_start;
#endif
};
@@ -397,8 +397,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
{
int len;
- mlog_entry_void();
-
BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
@@ -408,8 +406,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
mlog(0, "built lock resource with name: %s\n", name);
-
- mlog_exit_void();
}
static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
@@ -435,44 +431,41 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
- res->l_lock_num_prmode = 0;
- res->l_lock_num_prmode_failed = 0;
- res->l_lock_total_prmode = 0;
- res->l_lock_max_prmode = 0;
- res->l_lock_num_exmode = 0;
- res->l_lock_num_exmode_failed = 0;
- res->l_lock_total_exmode = 0;
- res->l_lock_max_exmode = 0;
res->l_lock_refresh = 0;
+ memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
+ memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
struct ocfs2_mask_waiter *mw, int ret)
{
- unsigned long long *num, *sum;
- unsigned int *max, *failed;
- struct timespec ts = current_kernel_time();
- unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
-
- if (level == LKM_PRMODE) {
- num = &res->l_lock_num_prmode;
- sum = &res->l_lock_total_prmode;
- max = &res->l_lock_max_prmode;
- failed = &res->l_lock_num_prmode_failed;
- } else if (level == LKM_EXMODE) {
- num = &res->l_lock_num_exmode;
- sum = &res->l_lock_total_exmode;
- max = &res->l_lock_max_exmode;
- failed = &res->l_lock_num_exmode_failed;
- } else
+ u32 usec;
+ ktime_t kt;
+ struct ocfs2_lock_stats *stats;
+
+ if (level == LKM_PRMODE)
+ stats = &res->l_lock_prmode;
+ else if (level == LKM_EXMODE)
+ stats = &res->l_lock_exmode;
+ else
return;
- (*num)++;
- (*sum) += time;
- if (time > *max)
- *max = time;
+ kt = ktime_sub(ktime_get(), mw->mw_lock_start);
+ usec = ktime_to_us(kt);
+
+ stats->ls_gets++;
+ stats->ls_total += ktime_to_ns(kt);
+ /* overflow */
+ if (unlikely(stats->ls_gets) == 0) {
+ stats->ls_gets++;
+ stats->ls_total = ktime_to_ns(kt);
+ }
+
+ if (stats->ls_max < usec)
+ stats->ls_max = usec;
+
if (ret)
- (*failed)++;
+ stats->ls_fail++;
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -482,8 +475,7 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
- struct timespec ts = current_kernel_time();
- mw->mw_lock_start = timespec_to_ns(&ts);
+ mw->mw_lock_start = ktime_get();
}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
@@ -729,8 +721,6 @@ void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
- mlog_entry_void();
-
if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
return;
@@ -756,14 +746,11 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
memset(&res->l_lksb, 0, sizeof(res->l_lksb));
res->l_flags = 0UL;
- mlog_exit_void();
}
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
- mlog_entry_void();
-
BUG_ON(!lockres);
switch(level) {
@@ -776,15 +763,11 @@ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
default:
BUG();
}
-
- mlog_exit_void();
}
static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
int level)
{
- mlog_entry_void();
-
BUG_ON(!lockres);
switch(level) {
@@ -799,7 +782,6 @@ static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
default:
BUG();
}
- mlog_exit_void();
}
/* WARNING: This function lives in a world where the only three lock
@@ -846,8 +828,6 @@ static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
@@ -860,14 +840,10 @@ static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
@@ -889,14 +865,10 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
@@ -908,15 +880,12 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc
lockres->l_level = lockres->l_requested;
lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
-
- mlog_exit_void();
}
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
int level)
{
int needs_downconvert = 0;
- mlog_entry_void();
assert_spin_locked(&lockres->l_lock);
@@ -938,8 +907,7 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
- mlog_exit(needs_downconvert);
+ mlog(0, "needs_downconvert = %d\n", needs_downconvert);
return needs_downconvert;
}
@@ -1151,8 +1119,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
unsigned long flags;
- mlog_entry_void();
-
mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
lockres->l_name, lockres->l_unlock_action);
@@ -1162,7 +1128,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
"unlock_action %d\n", error, lockres->l_name,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
- mlog_exit_void();
return;
}
@@ -1186,8 +1151,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- mlog_exit_void();
}
/*
@@ -1233,7 +1196,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
@@ -1244,7 +1206,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
- mlog_exit_void();
}
/* Note: If we detect another process working on the lock (i.e.,
@@ -1260,8 +1221,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
unsigned long flags;
unsigned int gen;
- mlog_entry_void();
-
mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
dlm_flags);
@@ -1293,7 +1252,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
bail:
- mlog_exit(ret);
return ret;
}
@@ -1416,8 +1374,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unsigned int gen;
int noqueue_attempted = 0;
- mlog_entry_void();
-
ocfs2_init_mask_waiter(&mw);
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
@@ -1583,7 +1539,6 @@ out:
caller_ip);
}
#endif
- mlog_exit(ret);
return ret;
}
@@ -1605,7 +1560,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
ocfs2_downconvert_on_unlock(osb, lockres);
@@ -1614,7 +1568,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
if (lockres->l_lockdep_map.key != NULL)
rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
#endif
- mlog_exit_void();
}
static int ocfs2_create_new_lock(struct ocfs2_super *osb,
@@ -1648,8 +1601,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
BUG_ON(!inode);
BUG_ON(!ocfs2_inode_is_new(inode));
- mlog_entry_void();
-
mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
/* NOTE: That we don't increment any of the holder counts, nor
@@ -1683,7 +1634,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
}
bail:
- mlog_exit(ret);
return ret;
}
@@ -1695,16 +1645,12 @@ int ocfs2_rw_lock(struct inode *inode, int write)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
- if (ocfs2_mount_local(osb)) {
- mlog_exit(0);
+ if (ocfs2_mount_local(osb))
return 0;
- }
lockres = &OCFS2_I(inode)->ip_rw_lockres;
@@ -1715,7 +1661,6 @@ int ocfs2_rw_lock(struct inode *inode, int write)
if (status < 0)
mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -1725,16 +1670,12 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
-
- mlog_exit_void();
}
/*
@@ -1748,8 +1689,6 @@ int ocfs2_open_lock(struct inode *inode)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1764,7 +1703,6 @@ int ocfs2_open_lock(struct inode *inode)
mlog_errno(status);
out:
- mlog_exit(status);
return status;
}
@@ -1776,8 +1714,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
@@ -1799,7 +1735,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
level, DLM_LKF_NOQUEUE, 0);
out:
- mlog_exit(status);
return status;
}
@@ -1811,8 +1746,6 @@ void ocfs2_open_unlock(struct inode *inode)
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1827,7 +1760,7 @@ void ocfs2_open_unlock(struct inode *inode)
DLM_LOCK_EX);
out:
- mlog_exit_void();
+ return;
}
static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
@@ -2043,8 +1976,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
{
int kick = 0;
- mlog_entry_void();
-
/* If we know that another node is waiting on our lock, kick
* the downconvert thread * pre-emptively when we reach a release
* condition. */
@@ -2065,8 +1996,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
if (kick)
ocfs2_wake_downconvert_thread(osb);
-
- mlog_exit_void();
}
#define OCFS2_SEC_BITS 34
@@ -2095,8 +2024,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- mlog_entry_void();
-
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
/*
@@ -2128,8 +2055,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
out:
mlog_meta_lvb(0, lockres);
-
- mlog_exit_void();
}
static void ocfs2_unpack_timespec(struct timespec *spec,
@@ -2145,8 +2070,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- mlog_entry_void();
-
mlog_meta_lvb(0, lockres);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2177,8 +2100,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
ocfs2_unpack_timespec(&inode->i_ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
spin_unlock(&oi->ip_lock);
-
- mlog_exit_void();
}
static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
@@ -2205,8 +2126,6 @@ static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
unsigned long flags;
int status = 0;
- mlog_entry_void();
-
refresh_check:
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
@@ -2227,7 +2146,7 @@ refresh_check:
status = 1;
bail:
- mlog_exit(status);
+ mlog(0, "status %d\n", status);
return status;
}
@@ -2237,7 +2156,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
int status)
{
unsigned long flags;
- mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
@@ -2246,8 +2164,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
-
- mlog_exit_void();
}
/* may or may not return a bh if it went to disk. */
@@ -2260,8 +2176,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
struct ocfs2_dinode *fe;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
if (ocfs2_mount_local(osb))
goto bail;
@@ -2330,7 +2244,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
bail_refresh:
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
- mlog_exit(status);
return status;
}
@@ -2374,8 +2287,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
BUG_ON(!inode);
- mlog_entry_void();
-
mlog(0, "inode %llu, take %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
@@ -2467,7 +2378,6 @@ bail:
if (local_bh)
brelse(local_bh);
- mlog_exit(status);
return status;
}
@@ -2517,7 +2427,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
{
int ret;
- mlog_entry_void();
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret < 0) {
mlog_errno(ret);
@@ -2545,7 +2454,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
} else
*level = 0;
- mlog_exit(ret);
return ret;
}
@@ -2556,8 +2464,6 @@ void ocfs2_inode_unlock(struct inode *inode,
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry_void();
-
mlog(0, "inode %llu drop %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
@@ -2565,8 +2471,6 @@ void ocfs2_inode_unlock(struct inode *inode,
if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
-
- mlog_exit_void();
}
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
@@ -2617,8 +2521,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
@@ -2650,7 +2552,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
ocfs2_track_lock_refresh(lockres);
}
bail:
- mlog_exit(status);
return status;
}
@@ -2869,8 +2770,15 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
return iter;
}
-/* So that debugfs.ocfs2 can determine which format is being used */
-#define OCFS2_DLM_DEBUG_STR_VERSION 2
+/*
+ * Version is used by debugfs.ocfs2 to determine the format being used
+ *
+ * New in version 2
+ * - Lock stats printed
+ * New in version 3
+ * - Max time in lock stats is in usecs (instead of nsecs)
+ */
+#define OCFS2_DLM_DEBUG_STR_VERSION 3
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
@@ -2912,18 +2820,18 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
seq_printf(m, "0x%x\t", lvb[i]);
#ifdef CONFIG_OCFS2_FS_STATS
-# define lock_num_prmode(_l) (_l)->l_lock_num_prmode
-# define lock_num_exmode(_l) (_l)->l_lock_num_exmode
-# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
-# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
-# define lock_total_prmode(_l) (_l)->l_lock_total_prmode
-# define lock_total_exmode(_l) (_l)->l_lock_total_exmode
-# define lock_max_prmode(_l) (_l)->l_lock_max_prmode
-# define lock_max_exmode(_l) (_l)->l_lock_max_exmode
-# define lock_refresh(_l) (_l)->l_lock_refresh
+# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
+# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
+# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
+# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
+# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
+# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
+# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
+# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
+# define lock_refresh(_l) ((_l)->l_lock_refresh)
#else
-# define lock_num_prmode(_l) (0ULL)
-# define lock_num_exmode(_l) (0ULL)
+# define lock_num_prmode(_l) (0)
+# define lock_num_exmode(_l) (0)
# define lock_num_prmode_failed(_l) (0)
# define lock_num_exmode_failed(_l) (0)
# define lock_total_prmode(_l) (0ULL)
@@ -2933,8 +2841,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_refresh(_l) (0)
#endif
/* The following seq_print was added in version 2 of this output */
- seq_printf(m, "%llu\t"
- "%llu\t"
+ seq_printf(m, "%u\t"
+ "%u\t"
"%u\t"
"%u\t"
"%llu\t"
@@ -3054,8 +2962,6 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
int status = 0;
struct ocfs2_cluster_connection *conn = NULL;
- mlog_entry_void();
-
if (ocfs2_mount_local(osb)) {
osb->node_num = 0;
goto local;
@@ -3112,15 +3018,12 @@ bail:
kthread_stop(osb->dc_task);
}
- mlog_exit(status);
return status;
}
void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
int hangup_pending)
{
- mlog_entry_void();
-
ocfs2_drop_osb_locks(osb);
/*
@@ -3143,8 +3046,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
osb->cconn = NULL;
ocfs2_dlm_shutdown_debug(osb);
-
- mlog_exit_void();
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
@@ -3226,7 +3127,6 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
ocfs2_wait_on_busy_lock(lockres);
out:
- mlog_exit(0);
return 0;
}
@@ -3284,8 +3184,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
{
int status, err;
- mlog_entry_void();
-
/* No need to call ocfs2_mark_lockres_freeing here -
* ocfs2_clear_inode has done it for us. */
@@ -3310,7 +3208,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
if (err < 0 && !status)
status = err;
- mlog_exit(status);
return status;
}
@@ -3352,8 +3249,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
int ret;
u32 dlm_flags = DLM_LKF_CONVERT;
- mlog_entry_void();
-
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
@@ -3375,7 +3270,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
ret = 0;
bail:
- mlog_exit(ret);
return ret;
}
@@ -3385,8 +3279,6 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
{
assert_spin_locked(&lockres->l_lock);
- mlog_entry_void();
-
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
@@ -3416,8 +3308,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
{
int ret;
- mlog_entry_void();
-
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (ret) {
@@ -3427,7 +3317,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
- mlog_exit(ret);
return ret;
}
@@ -3443,8 +3332,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
int set_lvb = 0;
unsigned int gen;
- mlog_entry_void();
-
spin_lock_irqsave(&lockres->l_lock, flags);
recheck:
@@ -3619,14 +3506,14 @@ downconvert:
gen);
leave:
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
leave_requeue:
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->requeue = 1;
- mlog_exit(0);
return 0;
}
@@ -3859,8 +3746,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
- mlog_entry_void();
-
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
@@ -3869,8 +3754,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
-
- mlog_exit_void();
}
void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
@@ -3879,10 +3762,8 @@ void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
- mlog_entry_void();
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
- mlog_exit_void();
}
static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
@@ -3937,8 +3818,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
int status = 0;
- mlog_entry_void();
-
/* On RO devices, locking really isn't needed... */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
@@ -3961,7 +3840,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
ocfs2_qinfo_unlock(oinfo, ex);
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
- mlog_exit(status);
return status;
}
@@ -4007,8 +3885,6 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
* considered valid until we remove the OCFS2_LOCK_QUEUED
* flag. */
- mlog_entry_void();
-
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
@@ -4042,15 +3918,11 @@ unqueue:
if (ctl.unblock_action != UNBLOCK_CONTINUE
&& lockres->l_ops->post_unlock)
lockres->l_ops->post_unlock(osb, lockres);
-
- mlog_exit_void();
}
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
- mlog_entry_void();
-
assert_spin_locked(&lockres->l_lock);
if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -4071,8 +3943,6 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
osb->blocked_lock_count++;
}
spin_unlock(&osb->dc_task_lock);
-
- mlog_exit_void();
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
@@ -4080,8 +3950,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
unsigned long processed;
struct ocfs2_lock_res *lockres;
- mlog_entry_void();
-
spin_lock(&osb->dc_task_lock);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
@@ -4105,8 +3973,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
spin_lock(&osb->dc_task_lock);
}
spin_unlock(&osb->dc_task_lock);
-
- mlog_exit_void();
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 254652a9b542..745db42528d5 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/types.h>
-#define MLOG_MASK_PREFIX ML_EXPORT
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -40,6 +39,7 @@
#include "buffer_head_io.h"
#include "suballoc.h"
+#include "ocfs2_trace.h"
struct ocfs2_inode_handle
{
@@ -56,10 +56,9 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
int status, set;
struct dentry *result;
- mlog_entry("(0x%p, 0x%p)\n", sb, handle);
+ trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno);
if (blkno == 0) {
- mlog(0, "nfs wants inode with blkno: 0\n");
result = ERR_PTR(-ESTALE);
goto bail;
}
@@ -83,6 +82,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
}
status = ocfs2_test_inode_bit(osb, blkno, &set);
+ trace_ocfs2_get_dentry_test_bit(status, set);
if (status < 0) {
if (status == -EINVAL) {
/*
@@ -90,18 +90,14 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
* as an inode, we return -ESTALE to be
* nice
*/
- mlog(0, "test inode bit failed %d\n", status);
status = -ESTALE;
- } else {
+ } else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
- }
goto unlock_nfs_sync;
}
/* If the inode allocator bit is clear, this inode must be stale */
if (!set) {
- mlog(0, "inode %llu suballoc bit is clear\n",
- (unsigned long long)blkno);
status = -ESTALE;
goto unlock_nfs_sync;
}
@@ -114,8 +110,8 @@ unlock_nfs_sync:
check_err:
if (status < 0) {
if (status == -ESTALE) {
- mlog(0, "stale inode ino: %llu generation: %u\n",
- (unsigned long long)blkno, handle->ih_generation);
+ trace_ocfs2_get_dentry_stale((unsigned long long)blkno,
+ handle->ih_generation);
}
result = ERR_PTR(status);
goto bail;
@@ -130,8 +126,9 @@ check_err:
check_gen:
if (handle->ih_generation != inode->i_generation) {
iput(inode);
- mlog(0, "stale inode ino: %llu generation: %u\n",
- (unsigned long long)blkno, handle->ih_generation);
+ trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
+ handle->ih_generation,
+ inode->i_generation);
result = ERR_PTR(-ESTALE);
goto bail;
}
@@ -141,7 +138,7 @@ check_gen:
mlog_errno(PTR_ERR(result));
bail:
- mlog_exit_ptr(result);
+ trace_ocfs2_get_dentry_end(result);
return result;
}
@@ -152,11 +149,8 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
struct dentry *parent;
struct inode *dir = child->d_inode;
- mlog_entry("(0x%p, '%.*s')\n", child,
- child->d_name.len, child->d_name.name);
-
- mlog(0, "find parent of directory %llu\n",
- (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno);
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
@@ -178,7 +172,7 @@ bail_unlock:
ocfs2_inode_unlock(dir, 0);
bail:
- mlog_exit_ptr(parent);
+ trace_ocfs2_get_parent_end(parent);
return parent;
}
@@ -193,9 +187,9 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
u32 generation;
__le32 *fh = (__force __le32 *) fh_in;
- mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry,
- dentry->d_name.len, dentry->d_name.name,
- fh, len, connectable);
+ trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ fh, len, connectable);
if (connectable && (len < 6)) {
*max_len = 6;
@@ -210,8 +204,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
blkno = OCFS2_I(inode)->ip_blkno;
generation = inode->i_generation;
- mlog(0, "Encoding fh: blkno: %llu, generation: %u\n",
- (unsigned long long)blkno, generation);
+ trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation);
len = 3;
fh[0] = cpu_to_le32((u32)(blkno >> 32));
@@ -236,14 +229,14 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
len = 6;
type = 2;
- mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
- (unsigned long long)blkno, generation);
+ trace_ocfs2_encode_fh_parent((unsigned long long)blkno,
+ generation);
}
*max_len = len;
bail:
- mlog_exit(type);
+ trace_ocfs2_encode_fh_type(type);
return type;
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 09e3fdfa6d33..23457b491e8c 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/fiemap.h>
-#define MLOG_MASK_PREFIX ML_EXTENT_MAP
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "inode.h"
#include "super.h"
#include "symlink.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -841,10 +841,9 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
u64 p_block, p_count;
int i, count, done = 0;
- mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, "
- "flags = %x, validate = %p)\n",
- inode, (unsigned long long)v_block, nr, bhs, flags,
- validate);
+ trace_ocfs2_read_virt_blocks(
+ inode, (unsigned long long)v_block, nr, bhs, flags,
+ validate);
if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
i_size_read(inode)) {
@@ -897,7 +896,6 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
}
out:
- mlog_exit(rc);
return rc;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index a6651956482e..41565ae52856 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -38,7 +38,6 @@
#include <linux/quotaops.h>
#include <linux/blkdev.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -61,6 +60,7 @@
#include "acl.h"
#include "quota.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -99,8 +99,10 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
int mode = file->f_flags;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
- file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
+ trace_ocfs2_file_open(inode, file, file->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name, mode);
if (file->f_mode & FMODE_WRITE)
dquot_initialize(inode);
@@ -135,7 +137,6 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
}
leave:
- mlog_exit(status);
return status;
}
@@ -143,19 +144,19 @@ static int ocfs2_file_release(struct inode *inode, struct file *file)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
- file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
-
spin_lock(&oi->ip_lock);
if (!--oi->ip_open_count)
oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
+
+ trace_ocfs2_file_release(inode, file, file->f_path.dentry,
+ oi->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ oi->ip_open_count);
spin_unlock(&oi->ip_lock);
ocfs2_free_file_private(inode, file);
- mlog_exit(0);
-
return 0;
}
@@ -177,9 +178,11 @@ static int ocfs2_sync_file(struct file *file, int datasync)
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync,
- file->f_path.dentry, file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
+ trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
+ OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ (unsigned long long)datasync);
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
/*
@@ -195,7 +198,8 @@ static int ocfs2_sync_file(struct file *file, int datasync)
err = jbd2_journal_force_commit(journal);
bail:
- mlog_exit(err);
+ if (err)
+ mlog_errno(err);
return (err < 0) ? -EIO : 0;
}
@@ -251,8 +255,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
- mlog_entry_void();
-
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -280,7 +282,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
- mlog_exit(ret);
return ret;
}
@@ -291,7 +292,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
{
int status;
- mlog_entry_void();
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -303,7 +303,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
}
bail:
- mlog_exit(status);
return status;
}
@@ -375,8 +374,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
struct ocfs2_dinode *di;
u64 cluster_bytes;
- mlog_entry_void();
-
/*
* We need to CoW the cluster contains the offset if it is reflinked
* since we will call ocfs2_zero_range_for_truncate later which will
@@ -429,8 +426,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
out_commit:
ocfs2_commit_trans(osb, handle);
out:
-
- mlog_exit(status);
return status;
}
@@ -442,14 +437,14 @@ static int ocfs2_truncate_file(struct inode *inode,
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- mlog_entry("(inode = %llu, new_i_size = %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)new_i_size);
-
/* We trust di_bh because it comes from ocfs2_inode_lock(), which
* already validated it */
fe = (struct ocfs2_dinode *) di_bh->b_data;
+ trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ (unsigned long long)new_i_size);
+
mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
"Inode %llu, inode i_size = %lld != di "
"i_size = %llu, i_flags = 0x%x\n",
@@ -459,19 +454,14 @@ static int ocfs2_truncate_file(struct inode *inode,
le32_to_cpu(fe->i_flags));
if (new_i_size > le64_to_cpu(fe->i_size)) {
- mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
- (unsigned long long)le64_to_cpu(fe->i_size),
- (unsigned long long)new_i_size);
+ trace_ocfs2_truncate_file_error(
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ (unsigned long long)new_i_size);
status = -EINVAL;
mlog_errno(status);
goto bail;
}
- mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
- (unsigned long long)le64_to_cpu(fe->i_blkno),
- (unsigned long long)le64_to_cpu(fe->i_size),
- (unsigned long long)new_i_size);
-
/* lets handle the simple truncate cases before doing any more
* cluster locking. */
if (new_i_size == le64_to_cpu(fe->i_size))
@@ -525,7 +515,6 @@ bail:
if (!status && OCFS2_I(inode)->ip_clusters == 0)
status = ocfs2_try_remove_refcount_tree(inode, di_bh);
- mlog_exit(status);
return status;
}
@@ -578,8 +567,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
struct ocfs2_extent_tree et;
int did_quota = 0;
- mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
-
/*
* This function only exists for file systems which don't
* support holes.
@@ -596,11 +583,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
restart_all:
BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
- mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
- "clusters_to_add = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
- clusters_to_add);
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
&data_ac, &meta_ac);
@@ -620,6 +602,12 @@ restart_all:
}
restarted_transaction:
+ trace_ocfs2_extend_allocation(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)i_size_read(inode),
+ le32_to_cpu(fe->i_clusters), clusters_to_add,
+ why, restart_func);
+
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (status)
@@ -666,13 +654,11 @@ restarted_transaction:
if (why != RESTART_NONE && clusters_to_add) {
if (why == RESTART_META) {
- mlog(0, "restarting function.\n");
restart_func = 1;
status = 0;
} else {
BUG_ON(why != RESTART_TRANS);
- mlog(0, "restarting transaction.\n");
/* TODO: This can be more intelligent. */
credits = ocfs2_calc_extend_credits(osb->sb,
&fe->id2.i_list,
@@ -689,11 +675,11 @@ restarted_transaction:
}
}
- mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
+ trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
le32_to_cpu(fe->i_clusters),
- (unsigned long long)le64_to_cpu(fe->i_size));
- mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
- OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
+ (unsigned long long)le64_to_cpu(fe->i_size),
+ OCFS2_I(inode)->ip_clusters,
+ (unsigned long long)i_size_read(inode));
leave:
if (status < 0 && did_quota)
@@ -718,7 +704,6 @@ leave:
brelse(bh);
bh = NULL;
- mlog_exit(status);
return status;
}
@@ -785,10 +770,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
if (!zero_to)
zero_to = PAGE_CACHE_SIZE;
- mlog(0,
- "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
- (unsigned long long)abs_from, (unsigned long long)abs_to,
- index, zero_from, zero_to);
+ trace_ocfs2_write_zero_page(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)abs_from,
+ (unsigned long long)abs_to,
+ index, zero_from, zero_to);
/* We know that zero_from is block aligned */
for (block_start = zero_from; block_start < zero_to;
@@ -928,9 +914,10 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
u64 next_pos;
u64 zero_pos = range_start;
- mlog(0, "range_start = %llu, range_end = %llu\n",
- (unsigned long long)range_start,
- (unsigned long long)range_end);
+ trace_ocfs2_zero_extend_range(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)range_start,
+ (unsigned long long)range_end);
BUG_ON(range_start >= range_end);
while (zero_pos < range_end) {
@@ -962,9 +949,9 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
struct super_block *sb = inode->i_sb;
zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
- mlog(0, "zero_start %llu for i_size %llu\n",
- (unsigned long long)zero_start,
- (unsigned long long)i_size_read(inode));
+ trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)zero_start,
+ (unsigned long long)i_size_read(inode));
while (zero_start < zero_to_size) {
ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
zero_to_size,
@@ -1113,30 +1100,20 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
struct dquot *transfer_to[MAXQUOTAS] = { };
int qtype;
- mlog_entry("(0x%p, '%.*s')\n", dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_setattr(inode, dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ dentry->d_name.len, dentry->d_name.name,
+ attr->ia_valid, attr->ia_mode,
+ attr->ia_uid, attr->ia_gid);
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE;
- if (attr->ia_valid & ATTR_MODE)
- mlog(0, "mode change: %d\n", attr->ia_mode);
- if (attr->ia_valid & ATTR_UID)
- mlog(0, "uid change: %d\n", attr->ia_uid);
- if (attr->ia_valid & ATTR_GID)
- mlog(0, "gid change: %d\n", attr->ia_gid);
- if (attr->ia_valid & ATTR_SIZE)
- mlog(0, "size change...\n");
- if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
- mlog(0, "time change...\n");
-
#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
| ATTR_GID | ATTR_UID | ATTR_MODE)
- if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
- mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
+ if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
return 0;
- }
status = inode_change_ok(inode, attr);
if (status)
@@ -1274,7 +1251,6 @@ bail:
mlog_errno(status);
}
- mlog_exit(status);
return status;
}
@@ -1287,8 +1263,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
struct ocfs2_super *osb = sb->s_fs_info;
int err;
- mlog_entry_void();
-
err = ocfs2_inode_revalidate(dentry);
if (err) {
if (err != -ENOENT)
@@ -1302,8 +1276,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
stat->blksize = osb->s_clustersize;
bail:
- mlog_exit(err);
-
return err;
}
@@ -1314,8 +1286,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
if (flags & IPERM_FLAG_RCU)
return -ECHILD;
- mlog_entry_void();
-
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret) {
if (ret != -ENOENT)
@@ -1327,7 +1297,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
ocfs2_inode_unlock(inode, 0);
out:
- mlog_exit(ret);
return ret;
}
@@ -1339,8 +1308,9 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di;
- mlog_entry("(Inode %llu, mode 0%o)\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
+ trace_ocfs2_write_remove_suid(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ inode->i_mode);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
@@ -1368,7 +1338,6 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
out_trans:
ocfs2_commit_trans(osb, handle);
out:
- mlog_exit(ret);
return ret;
}
@@ -1547,8 +1516,9 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
* partial clusters here. There's no need to worry about
* physical allocation - the zeroing code knows to skip holes.
*/
- mlog(0, "byte start: %llu, end: %llu\n",
- (unsigned long long)start, (unsigned long long)end);
+ trace_ocfs2_zero_partial_clusters(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)start, (unsigned long long)end);
/*
* If both edges are on a cluster boundary then there's no
@@ -1572,8 +1542,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if (tmpend > end)
tmpend = end;
- mlog(0, "1st range: start: %llu, tmpend: %llu\n",
- (unsigned long long)start, (unsigned long long)tmpend);
+ trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+ (unsigned long long)tmpend);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
if (ret)
@@ -1587,8 +1557,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
*/
start = end & ~(osb->s_clustersize - 1);
- mlog(0, "2nd range: start: %llu, end: %llu\n",
- (unsigned long long)start, (unsigned long long)end);
+ trace_ocfs2_zero_partial_clusters_range2(
+ (unsigned long long)start, (unsigned long long)end);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
if (ret)
@@ -1688,6 +1658,11 @@ static int ocfs2_remove_inode_range(struct inode *inode,
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
+ trace_ocfs2_remove_inode_range(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)byte_start,
+ (unsigned long long)byte_len);
+
if (byte_len == 0)
return 0;
@@ -1734,11 +1709,6 @@ static int ocfs2_remove_inode_range(struct inode *inode,
trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
cluster_in_el = trunc_end;
- mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)byte_start,
- (unsigned long long)byte_len, trunc_start, trunc_end);
-
ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
if (ret) {
mlog_errno(ret);
@@ -2093,7 +2063,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
int ret = 0, meta_level = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
- loff_t saved_pos, end;
+ loff_t saved_pos = 0, end;
/*
* We start with a read level meta lock and only jump to an ex
@@ -2132,12 +2102,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
/* work on a copy of ppos until we're sure that we won't have
* to recalculate it due to relocking. */
- if (appending) {
+ if (appending)
saved_pos = i_size_read(inode);
- mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
- } else {
+ else
saved_pos = *ppos;
- }
end = saved_pos + count;
@@ -2208,6 +2176,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
*ppos = saved_pos;
out_unlock:
+ trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
+ saved_pos, appending, count,
+ direct_io, has_refcount);
+
if (meta_level >= 0)
ocfs2_inode_unlock(inode, meta_level);
@@ -2233,10 +2205,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
- mlog_entry("(0x%p, %u, '%.*s')\n", file,
- (unsigned int)nr_segs,
- file->f_path.dentry->d_name.len,
- file->f_path.dentry->d_name.name);
+ trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ file->f_path.dentry->d_name.len,
+ file->f_path.dentry->d_name.name,
+ (unsigned int)nr_segs);
if (iocb->ki_left == 0)
return 0;
@@ -2402,7 +2375,6 @@ out_sems:
if (written)
ret = written;
- mlog_exit(ret);
return ret;
}
@@ -2438,10 +2410,11 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
.u.file = out,
};
- mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
- (unsigned int)len,
- out->f_path.dentry->d_name.len,
- out->f_path.dentry->d_name.name);
+
+ trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ out->f_path.dentry->d_name.len,
+ out->f_path.dentry->d_name.name, len);
if (pipe->inode)
mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
@@ -2485,7 +2458,6 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
}
- mlog_exit(ret);
return ret;
}
@@ -2498,10 +2470,10 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
int ret = 0, lock_level = 0;
struct inode *inode = in->f_path.dentry->d_inode;
- mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
- (unsigned int)len,
- in->f_path.dentry->d_name.len,
- in->f_path.dentry->d_name.name);
+ trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ in->f_path.dentry->d_name.len,
+ in->f_path.dentry->d_name.name, len);
/*
* See the comment in ocfs2_file_aio_read()
@@ -2516,7 +2488,6 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
ret = generic_file_splice_read(in, ppos, pipe, len, flags);
bail:
- mlog_exit(ret);
return ret;
}
@@ -2529,10 +2500,11 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
struct file *filp = iocb->ki_filp;
struct inode *inode = filp->f_path.dentry->d_inode;
- mlog_entry("(0x%p, %u, '%.*s')\n", filp,
- (unsigned int)nr_segs,
- filp->f_path.dentry->d_name.len,
- filp->f_path.dentry->d_name.name);
+ trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ filp->f_path.dentry->d_name.len,
+ filp->f_path.dentry->d_name.name, nr_segs);
+
if (!inode) {
ret = -EINVAL;
@@ -2578,8 +2550,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
ocfs2_inode_unlock(inode, lock_level);
ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
- if (ret == -EINVAL)
- mlog(0, "generic_file_aio_read returned -EINVAL\n");
+ trace_generic_file_aio_read_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
@@ -2597,7 +2568,6 @@ bail:
}
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
- mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 1aa863dd901f..d8208b20dc53 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_SUPER
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -37,6 +36,7 @@
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -66,7 +66,7 @@ void ocfs2_do_node_down(int node_num, void *data)
BUG_ON(osb->node_num == node_num);
- mlog(0, "ocfs2: node down event for %d\n", node_num);
+ trace_ocfs2_do_node_down(node_num);
if (!osb->cconn) {
/*
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 4068c6c4c6f6..b4c8bb6b8d28 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -31,7 +31,6 @@
#include <asm/byteorder.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -53,6 +52,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "refcounttree.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -131,7 +131,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
- mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno);
+ trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
+ sysfile_type);
/* Ok. By now we've either got the offsets passed to us by the
* caller, or we just pulled them off the bh. Lets do some
@@ -152,16 +153,16 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
/* inode was *not* in the inode cache. 2.6.x requires
* us to do our own read_inode call and unlock it
* afterwards. */
- if (inode && inode->i_state & I_NEW) {
- mlog(0, "Inode was not in inode cache, reading it.\n");
- ocfs2_read_locked_inode(inode, &args);
- unlock_new_inode(inode);
- }
if (inode == NULL) {
inode = ERR_PTR(-ENOMEM);
mlog_errno(PTR_ERR(inode));
goto bail;
}
+ trace_ocfs2_iget5_locked(inode->i_state);
+ if (inode->i_state & I_NEW) {
+ ocfs2_read_locked_inode(inode, &args);
+ unlock_new_inode(inode);
+ }
if (is_bad_inode(inode)) {
iput(inode);
inode = ERR_PTR(-ESTALE);
@@ -170,9 +171,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
bail:
if (!IS_ERR(inode)) {
- mlog(0, "returning inode with number %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
- mlog_exit_ptr(inode);
+ trace_ocfs2_iget_end(inode,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
}
return inode;
@@ -192,18 +192,17 @@ static int ocfs2_find_actor(struct inode *inode, void *opaque)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
- mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque);
-
args = opaque;
mlog_bug_on_msg(!inode, "No inode in find actor!\n");
+ trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno);
+
if (oi->ip_blkno != args->fi_blkno)
goto bail;
ret = 1;
bail:
- mlog_exit(ret);
return ret;
}
@@ -218,8 +217,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
ocfs2_file_ip_alloc_sem_key;
- mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
-
inode->i_ino = args->fi_ino;
OCFS2_I(inode)->ip_blkno = args->fi_blkno;
if (args->fi_sysfile_type != 0)
@@ -235,7 +232,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
&ocfs2_file_ip_alloc_sem_key);
- mlog_exit(0);
return 0;
}
@@ -246,9 +242,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
struct ocfs2_super *osb;
int use_plocks = 1;
- mlog_entry("(0x%p, size:%llu)\n", inode,
- (unsigned long long)le64_to_cpu(fe->i_size));
-
sb = inode->i_sb;
osb = OCFS2_SB(sb);
@@ -300,20 +293,20 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_nlink = ocfs2_read_links_count(fe);
+ trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
+ le32_to_cpu(fe->i_flags));
if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
inode->i_flags |= S_NOQUOTA;
}
-
+
if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
- mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino);
} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
inode->i_flags |= S_NOQUOTA;
} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
- mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino);
/* we can't actually hit this as read_inode can't
* handle superblocks today ;-) */
BUG();
@@ -381,7 +374,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
if (S_ISDIR(inode->i_mode))
ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
OCFS2_RESV_FLAG_DIR);
- mlog_exit_void();
}
static int ocfs2_read_locked_inode(struct inode *inode,
@@ -394,8 +386,6 @@ static int ocfs2_read_locked_inode(struct inode *inode,
int status, can_lock;
u32 generation = 0;
- mlog_entry("(0x%p, 0x%p)\n", inode, args);
-
status = -EINVAL;
if (inode == NULL || inode->i_sb == NULL) {
mlog(ML_ERROR, "bad inode\n");
@@ -443,6 +433,9 @@ static int ocfs2_read_locked_inode(struct inode *inode,
&& !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
&& !ocfs2_mount_local(osb);
+ trace_ocfs2_read_locked_inode(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock);
+
/*
* To maintain backwards compatibility with older versions of
* ocfs2-tools, we still store the generation value for system
@@ -534,7 +527,6 @@ bail:
if (args && bh)
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -551,8 +543,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
struct ocfs2_dinode *fe;
handle_t *handle = NULL;
- mlog_entry_void();
-
fe = (struct ocfs2_dinode *) fe_bh->b_data;
/*
@@ -600,7 +590,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
out:
if (handle)
ocfs2_commit_trans(osb, handle);
- mlog_exit(status);
return status;
}
@@ -696,8 +685,6 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
spin_lock(&osb->osb_lock);
if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) {
- mlog(0, "Recovery is happening on orphan dir %d, will skip "
- "this inode\n", slot);
ret = -EDEADLK;
goto out;
}
@@ -706,6 +693,7 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
osb->osb_orphan_wipes[slot]++;
out:
spin_unlock(&osb->osb_lock);
+ trace_ocfs2_check_orphan_recovery_state(slot, ret);
return ret;
}
@@ -816,6 +804,10 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task,
+ (unsigned long long)oi->ip_blkno,
+ oi->ip_flags);
+
/* We shouldn't be getting here for the root directory
* inode.. */
if (inode == osb->root_inode) {
@@ -828,11 +820,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
* have to skip deleting this guy. That's OK though because
* the node who's doing the actual deleting should handle it
* anyway. */
- if (current == osb->dc_task) {
- mlog(0, "Skipping delete of %lu because we're currently "
- "in downconvert\n", inode->i_ino);
+ if (current == osb->dc_task)
goto bail;
- }
spin_lock(&oi->ip_lock);
/* OCFS2 *never* deletes system files. This should technically
@@ -846,12 +835,9 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
/* If we have allowd wipe of this inode for another node, it
* will be marked here so we can safely skip it. Recovery will
- * cleanup any inodes we might inadvertantly skip here. */
- if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) {
- mlog(0, "Skipping delete of %lu because another node "
- "has done this for us.\n", inode->i_ino);
+ * cleanup any inodes we might inadvertently skip here. */
+ if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE)
goto bail_unlock;
- }
ret = 1;
bail_unlock:
@@ -868,28 +854,27 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
struct buffer_head *di_bh,
int *wipe)
{
- int status = 0;
+ int status = 0, reason = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di;
*wipe = 0;
+ trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
+ inode->i_nlink);
+
/* While we were waiting for the cluster lock in
* ocfs2_delete_inode, another node might have asked to delete
* the inode. Recheck our flags to catch this. */
if (!ocfs2_inode_is_valid_to_delete(inode)) {
- mlog(0, "Skipping delete of %llu because flags changed\n",
- (unsigned long long)oi->ip_blkno);
+ reason = 1;
goto bail;
}
/* Now that we have an up to date inode, we can double check
* the link count. */
- if (inode->i_nlink) {
- mlog(0, "Skipping delete of %llu because nlink = %u\n",
- (unsigned long long)oi->ip_blkno, inode->i_nlink);
+ if (inode->i_nlink)
goto bail;
- }
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
@@ -904,9 +889,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
* ORPHANED_FL not.
*/
if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
- mlog(0, "Reflinked inode %llu is no longer orphaned. "
- "it shouldn't be deleted\n",
- (unsigned long long)oi->ip_blkno);
+ reason = 2;
goto bail;
}
@@ -934,7 +917,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
* the inode open lock in ocfs2_read_locked_inode(). When we
* get to ->delete_inode(), each node tries to convert it's
* lock to an exclusive. Trylocks are serialized by the inode
- * meta data lock. If the upconvert suceeds, we know the inode
+ * meta data lock. If the upconvert succeeds, we know the inode
* is no longer live and can be deleted.
*
* Though we call this with the meta data lock held, the
@@ -943,8 +926,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
status = ocfs2_try_open_lock(inode, 1);
if (status == -EAGAIN) {
status = 0;
- mlog(0, "Skipping delete of %llu because it is in use on "
- "other nodes\n", (unsigned long long)oi->ip_blkno);
+ reason = 3;
goto bail;
}
if (status < 0) {
@@ -953,11 +935,10 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
}
*wipe = 1;
- mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n",
- (unsigned long long)oi->ip_blkno,
- le16_to_cpu(di->i_orphaned_slot));
+ trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));
bail:
+ trace_ocfs2_query_inode_wipe_end(status, reason);
return status;
}
@@ -967,8 +948,8 @@ bail:
static void ocfs2_cleanup_delete_inode(struct inode *inode,
int sync_data)
{
- mlog(0, "Cleanup inode %llu, sync = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
+ trace_ocfs2_cleanup_delete_inode(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
write_inode_now(inode, 1);
truncate_inode_pages(&inode->i_data, 0);
@@ -980,15 +961,15 @@ static void ocfs2_delete_inode(struct inode *inode)
sigset_t oldset;
struct buffer_head *di_bh = NULL;
- mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+ trace_ocfs2_delete_inode(inode->i_ino,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ is_bad_inode(inode));
/* When we fail in read_inode() we mark inode as bad. The second test
* catches the case when inode allocation fails before allocating
* a block for inode. */
- if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) {
- mlog(0, "Skipping delete of bad inode\n");
+ if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
goto bail;
- }
dquot_initialize(inode);
@@ -1080,7 +1061,7 @@ bail_unlock_nfs_sync:
bail_unblock:
ocfs2_unblock_signals(&oldset);
bail:
- mlog_exit_void();
+ return;
}
static void ocfs2_clear_inode(struct inode *inode)
@@ -1088,11 +1069,9 @@ static void ocfs2_clear_inode(struct inode *inode)
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- mlog_entry_void();
-
end_writeback(inode);
- mlog(0, "Clearing inode: %llu, nlink = %u\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink);
+ trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
+ inode->i_nlink);
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
"Inode=%lu\n", inode->i_ino);
@@ -1181,8 +1160,6 @@ static void ocfs2_clear_inode(struct inode *inode)
*/
jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
&oi->ip_jinode);
-
- mlog_exit_void();
}
void ocfs2_evict_inode(struct inode *inode)
@@ -1204,17 +1181,14 @@ int ocfs2_drop_inode(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int res;
- mlog_entry_void();
-
- mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n",
- (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags);
+ trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
+ inode->i_nlink, oi->ip_flags);
if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
res = 1;
else
res = generic_drop_inode(inode);
- mlog_exit_void();
return res;
}
@@ -1226,11 +1200,11 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
struct inode *inode = dentry->d_inode;
int status = 0;
- mlog_entry("(inode = 0x%p, ino = %llu)\n", inode,
- inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL);
+ trace_ocfs2_inode_revalidate(inode,
+ inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
+ inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);
if (!inode) {
- mlog(0, "eep, no inode!\n");
status = -ENOENT;
goto bail;
}
@@ -1238,7 +1212,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
spin_lock(&OCFS2_I(inode)->ip_lock);
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&OCFS2_I(inode)->ip_lock);
- mlog(0, "inode deleted!\n");
status = -ENOENT;
goto bail;
}
@@ -1254,8 +1227,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
}
ocfs2_inode_unlock(inode, 0);
bail:
- mlog_exit(status);
-
return status;
}
@@ -1271,8 +1242,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
int status;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
- mlog_entry("(inode %llu)\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1302,7 +1272,6 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
ocfs2_journal_dirty(handle, bh);
leave:
- mlog_exit(status);
return status;
}
@@ -1345,8 +1314,7 @@ int ocfs2_validate_inode_block(struct super_block *sb,
int rc;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
- mlog(0, "Validating dinode %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 7a4868196152..8f13c5989eae 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -9,7 +9,6 @@
#include <linux/mount.h>
#include <linux/compat.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -46,6 +45,22 @@ static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq,
#define o2info_set_request_error(a, b) \
__o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
+static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags |= OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_set_request_filled(a) \
+ __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
+
+static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
+{
+ req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
+}
+
+#define o2info_clear_request_filled(a) \
+ __o2info_clear_request_filled((struct ocfs2_info_request *)&(a))
+
static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
{
int status;
@@ -59,7 +74,6 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
*flags = OCFS2_I(inode)->ip_attr;
ocfs2_inode_unlock(inode, 0);
- mlog_exit(status);
return status;
}
@@ -82,7 +96,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
}
status = -EACCES;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
goto bail_unlock;
if (!S_ISDIR(inode->i_mode))
@@ -125,7 +139,6 @@ bail:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -139,7 +152,8 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
goto bail;
oib.ib_blocksize = inode->i_sb->s_blocksize;
- oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oib);
if (o2info_to_user(oib, req))
goto bail;
@@ -163,7 +177,8 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
goto bail;
oic.ic_clustersize = osb->s_clustersize;
- oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oic);
if (o2info_to_user(oic, req))
goto bail;
@@ -187,7 +202,8 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
goto bail;
oim.im_max_slots = osb->max_slots;
- oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oim);
if (o2info_to_user(oim, req))
goto bail;
@@ -211,7 +227,8 @@ int ocfs2_info_handle_label(struct inode *inode,
goto bail;
memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
- oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oil);
if (o2info_to_user(oil, req))
goto bail;
@@ -235,7 +252,8 @@ int ocfs2_info_handle_uuid(struct inode *inode,
goto bail;
memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
- oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oiu);
if (o2info_to_user(oiu, req))
goto bail;
@@ -261,7 +279,8 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
oif.if_compat_features = osb->s_feature_compat;
oif.if_incompat_features = osb->s_feature_incompat;
oif.if_ro_compat_features = osb->s_feature_ro_compat;
- oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+
+ o2info_set_request_filled(oif);
if (o2info_to_user(oif, req))
goto bail;
@@ -286,7 +305,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
oij.ij_journal_size = osb->journal->j_inode->i_size;
- oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED;
+ o2info_set_request_filled(oij);
if (o2info_to_user(oij, req))
goto bail;
@@ -308,7 +327,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
if (o2info_from_user(oir, req))
goto bail;
- oir.ir_flags &= ~OCFS2_INFO_FL_FILLED;
+ o2info_clear_request_filled(oir);
if (o2info_to_user(oir, req))
goto bail;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index faa2303dbf0a..b141a44605ca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -31,7 +31,6 @@
#include <linux/time.h>
#include <linux/random.h>
-#define MLOG_MASK_PREFIX ML_JOURNAL
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -52,6 +51,7 @@
#include "quota.h"
#include "buffer_head_io.h"
+#include "ocfs2_trace.h"
DEFINE_SPINLOCK(trans_inc_lock);
@@ -303,16 +303,15 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
unsigned int flushed;
struct ocfs2_journal *journal = NULL;
- mlog_entry_void();
-
journal = osb->journal;
/* Flush all pending commits and checkpoint the journal. */
down_write(&journal->j_trans_barrier);
- if (atomic_read(&journal->j_num_trans) == 0) {
+ flushed = atomic_read(&journal->j_num_trans);
+ trace_ocfs2_commit_cache_begin(flushed);
+ if (flushed == 0) {
up_write(&journal->j_trans_barrier);
- mlog(0, "No transactions for me to flush!\n");
goto finally;
}
@@ -331,13 +330,11 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
atomic_set(&journal->j_num_trans, 0);
up_write(&journal->j_trans_barrier);
- mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
- journal->j_trans_id, flushed);
+ trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
ocfs2_wake_downconvert_thread(osb);
wake_up(&journal->j_checkpointed);
finally:
- mlog_exit(status);
return status;
}
@@ -425,9 +422,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
return 0;
old_nblocks = handle->h_buffer_credits;
- mlog_entry_void();
- mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
+ trace_ocfs2_extend_trans(old_nblocks, nblocks);
#ifdef CONFIG_OCFS2_DEBUG_FS
status = 1;
@@ -440,9 +436,7 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
#endif
if (status > 0) {
- mlog(0,
- "jbd2_journal_extend failed, trying "
- "jbd2_journal_restart\n");
+ trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
status = jbd2_journal_restart(handle,
old_nblocks + nblocks);
if (status < 0) {
@@ -453,8 +447,6 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
status = 0;
bail:
-
- mlog_exit(status);
return status;
}
@@ -622,12 +614,9 @@ static int __ocfs2_journal_access(handle_t *handle,
BUG_ON(!handle);
BUG_ON(!bh);
- mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
- (unsigned long long)bh->b_blocknr, type,
- (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
- "OCFS2_JOURNAL_ACCESS_CREATE" :
- "OCFS2_JOURNAL_ACCESS_WRITE",
- bh->b_size);
+ trace_ocfs2_journal_access(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)bh->b_blocknr, type, bh->b_size);
/* we can safely remove this assertion after testing. */
if (!buffer_uptodate(bh)) {
@@ -668,7 +657,6 @@ static int __ocfs2_journal_access(handle_t *handle,
mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
status, type);
- mlog_exit(status);
return status;
}
@@ -737,13 +725,10 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
{
int status;
- mlog_entry("(bh->b_blocknr=%llu)\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
status = jbd2_journal_dirty_metadata(handle, bh);
BUG_ON(status);
-
- mlog_exit_void();
}
#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
@@ -775,8 +760,6 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
struct ocfs2_super *osb;
int inode_lock = 0;
- mlog_entry_void();
-
BUG_ON(!journal);
osb = journal->j_osb;
@@ -820,10 +803,9 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
goto done;
}
- mlog(0, "inode->i_size = %lld\n", inode->i_size);
- mlog(0, "inode->i_blocks = %llu\n",
- (unsigned long long)inode->i_blocks);
- mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
+ trace_ocfs2_journal_init(inode->i_size,
+ (unsigned long long)inode->i_blocks,
+ OCFS2_I(inode)->ip_clusters);
/* call the kernels journal init function now */
j_journal = jbd2_journal_init_inode(inode);
@@ -833,8 +815,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
goto done;
}
- mlog(0, "Returned from jbd2_journal_init_inode\n");
- mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
+ trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
*dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
OCFS2_JOURNAL_DIRTY_FL);
@@ -859,7 +840,6 @@ done:
}
}
- mlog_exit(status);
return status;
}
@@ -882,8 +862,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
struct buffer_head *bh = journal->j_bh;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
fe = (struct ocfs2_dinode *)bh->b_data;
/* The journal bh on the osb always comes from ocfs2_journal_init()
@@ -906,7 +884,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
if (status < 0)
mlog_errno(status);
- mlog_exit(status);
return status;
}
@@ -921,8 +898,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
struct inode *inode = NULL;
int num_running_trans = 0;
- mlog_entry_void();
-
BUG_ON(!osb);
journal = osb->journal;
@@ -939,10 +914,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
BUG();
num_running_trans = atomic_read(&(osb->journal->j_num_trans));
- if (num_running_trans > 0)
- mlog(0, "Shutting down journal: must wait on %d "
- "running transactions!\n",
- num_running_trans);
+ trace_ocfs2_journal_shutdown(num_running_trans);
/* Do a commit_cache here. It will flush our journal, *and*
* release any locks that are still held.
@@ -955,7 +927,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
* completely destroy the journal. */
if (osb->commit_task) {
/* Wait for the commit thread */
- mlog(0, "Waiting for ocfs2commit to exit....\n");
+ trace_ocfs2_journal_shutdown_wait(osb->commit_task);
kthread_stop(osb->commit_task);
osb->commit_task = NULL;
}
@@ -998,7 +970,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
done:
if (inode)
iput(inode);
- mlog_exit_void();
}
static void ocfs2_clear_journal_error(struct super_block *sb,
@@ -1024,8 +995,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
int status = 0;
struct ocfs2_super *osb;
- mlog_entry_void();
-
BUG_ON(!journal);
osb = journal->j_osb;
@@ -1059,7 +1028,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
osb->commit_task = NULL;
done:
- mlog_exit(status);
return status;
}
@@ -1070,8 +1038,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
{
int status;
- mlog_entry_void();
-
BUG_ON(!journal);
status = jbd2_journal_wipe(journal->j_journal, full);
@@ -1085,7 +1051,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
mlog_errno(status);
bail:
- mlog_exit(status);
return status;
}
@@ -1124,8 +1089,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
#define CONCURRENT_JOURNAL_FILL 32ULL
struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
- mlog_entry_void();
-
memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
@@ -1161,7 +1124,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
bail:
for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
brelse(bhs[i]);
- mlog_exit(status);
return status;
}
@@ -1185,7 +1147,7 @@ struct ocfs2_la_recovery_item {
*/
void ocfs2_complete_recovery(struct work_struct *work)
{
- int ret;
+ int ret = 0;
struct ocfs2_journal *journal =
container_of(work, struct ocfs2_journal, j_recovery_work);
struct ocfs2_super *osb = journal->j_osb;
@@ -1194,9 +1156,8 @@ void ocfs2_complete_recovery(struct work_struct *work)
struct ocfs2_quota_recovery *qrec;
LIST_HEAD(tmp_la_list);
- mlog_entry_void();
-
- mlog(0, "completing recovery from keventd\n");
+ trace_ocfs2_complete_recovery(
+ (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
spin_lock(&journal->j_lock);
list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
@@ -1205,15 +1166,18 @@ void ocfs2_complete_recovery(struct work_struct *work)
list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
list_del_init(&item->lri_list);
- mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
-
ocfs2_wait_on_quotas(osb);
la_dinode = item->lri_la_dinode;
- if (la_dinode) {
- mlog(0, "Clean up local alloc %llu\n",
- (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
+ tl_dinode = item->lri_tl_dinode;
+ qrec = item->lri_qrec;
+
+ trace_ocfs2_complete_recovery_slot(item->lri_slot,
+ la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
+ tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
+ qrec);
+ if (la_dinode) {
ret = ocfs2_complete_local_alloc_recovery(osb,
la_dinode);
if (ret < 0)
@@ -1222,11 +1186,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
kfree(la_dinode);
}
- tl_dinode = item->lri_tl_dinode;
if (tl_dinode) {
- mlog(0, "Clean up truncate log %llu\n",
- (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
-
ret = ocfs2_complete_truncate_log_recovery(osb,
tl_dinode);
if (ret < 0)
@@ -1239,9 +1199,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
if (ret < 0)
mlog_errno(ret);
- qrec = item->lri_qrec;
if (qrec) {
- mlog(0, "Recovering quota files");
ret = ocfs2_finish_quota_recovery(osb, qrec,
item->lri_slot);
if (ret < 0)
@@ -1252,8 +1210,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
kfree(item);
}
- mlog(0, "Recovery completion\n");
- mlog_exit_void();
+ trace_ocfs2_complete_recovery_end(ret);
}
/* NOTE: This function always eats your references to la_dinode and
@@ -1339,8 +1296,6 @@ static int __ocfs2_recovery_thread(void *arg)
int rm_quota_used = 0, i;
struct ocfs2_quota_recovery *qrec;
- mlog_entry_void();
-
status = ocfs2_wait_on_mount(osb);
if (status < 0) {
goto bail;
@@ -1372,15 +1327,12 @@ restart:
* clear it until ocfs2_recover_node() has succeeded. */
node_num = rm->rm_entries[0];
spin_unlock(&osb->osb_lock);
- mlog(0, "checking node %d\n", node_num);
slot_num = ocfs2_node_num_to_slot(osb, node_num);
+ trace_ocfs2_recovery_thread_node(node_num, slot_num);
if (slot_num == -ENOENT) {
status = 0;
- mlog(0, "no slot for this node, so no recovery"
- "required.\n");
goto skip_recovery;
}
- mlog(0, "node %d was using slot %d\n", node_num, slot_num);
/* It is a bit subtle with quota recovery. We cannot do it
* immediately because we have to obtain cluster locks from
@@ -1407,7 +1359,7 @@ skip_recovery:
spin_lock(&osb->osb_lock);
}
spin_unlock(&osb->osb_lock);
- mlog(0, "All nodes recovered\n");
+ trace_ocfs2_recovery_thread_end(status);
/* Refresh all journal recovery generations from disk */
status = ocfs2_check_journals_nolocks(osb);
@@ -1416,7 +1368,7 @@ skip_recovery:
mlog_errno(status);
/* Now it is right time to recover quotas... We have to do this under
- * superblock lock so that noone can start using the slot (and crash)
+ * superblock lock so that no one can start using the slot (and crash)
* before we recover it */
for (i = 0; i < rm_quota_used; i++) {
qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
@@ -1451,7 +1403,6 @@ bail:
if (rm_quota)
kfree(rm_quota);
- mlog_exit(status);
/* no one is callint kthread_stop() for us so the kthread() api
* requires that we call do_exit(). And it isn't exported, but
* complete_and_exit() seems to be a minimal wrapper around it. */
@@ -1461,19 +1412,15 @@ bail:
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
{
- mlog_entry("(node_num=%d, osb->node_num = %d)\n",
- node_num, osb->node_num);
-
mutex_lock(&osb->recovery_lock);
- if (osb->disable_recovery)
- goto out;
- /* People waiting on recovery will wait on
- * the recovery map to empty. */
- if (ocfs2_recovery_map_set(osb, node_num))
- mlog(0, "node %d already in recovery map.\n", node_num);
+ trace_ocfs2_recovery_thread(node_num, osb->node_num,
+ osb->disable_recovery, osb->recovery_thread_task,
+ osb->disable_recovery ?
+ -1 : ocfs2_recovery_map_set(osb, node_num));
- mlog(0, "starting recovery thread...\n");
+ if (osb->disable_recovery)
+ goto out;
if (osb->recovery_thread_task)
goto out;
@@ -1488,8 +1435,6 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
out:
mutex_unlock(&osb->recovery_lock);
wake_up(&osb->recovery_event);
-
- mlog_exit_void();
}
static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
@@ -1563,7 +1508,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
* If not, it needs recovery.
*/
if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
- mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
+ trace_ocfs2_replay_journal_recovered(slot_num,
osb->slot_recovery_generations[slot_num], slot_reco_gen);
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
status = -EBUSY;
@@ -1574,7 +1519,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
if (status < 0) {
- mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
+ trace_ocfs2_replay_journal_lock_err(status);
if (status != -ERESTARTSYS)
mlog(ML_ERROR, "Could not lock journal!\n");
goto done;
@@ -1587,7 +1532,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
slot_reco_gen = ocfs2_get_recovery_generation(fe);
if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
- mlog(0, "No recovery required for node %d\n", node_num);
+ trace_ocfs2_replay_journal_skip(node_num);
/* Refresh recovery generation for the slot */
osb->slot_recovery_generations[slot_num] = slot_reco_gen;
goto done;
@@ -1608,7 +1553,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
goto done;
}
- mlog(0, "calling journal_init_inode\n");
journal = jbd2_journal_init_inode(inode);
if (journal == NULL) {
mlog(ML_ERROR, "Linux journal layer error\n");
@@ -1628,7 +1572,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
ocfs2_clear_journal_error(osb->sb, journal, slot_num);
/* wipe the journal */
- mlog(0, "flushing the journal.\n");
jbd2_journal_lock_updates(journal);
status = jbd2_journal_flush(journal);
jbd2_journal_unlock_updates(journal);
@@ -1665,7 +1608,6 @@ done:
brelse(bh);
- mlog_exit(status);
return status;
}
@@ -1688,8 +1630,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
struct ocfs2_dinode *la_copy = NULL;
struct ocfs2_dinode *tl_copy = NULL;
- mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
- node_num, slot_num, osb->node_num);
+ trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
/* Should not ever be called to recover ourselves -- in that
* case we should've called ocfs2_journal_load instead. */
@@ -1698,9 +1639,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
status = ocfs2_replay_journal(osb, node_num, slot_num);
if (status < 0) {
if (status == -EBUSY) {
- mlog(0, "Skipping recovery for slot %u (node %u) "
- "as another node has recovered it\n", slot_num,
- node_num);
+ trace_ocfs2_recover_node_skip(slot_num, node_num);
status = 0;
goto done;
}
@@ -1735,7 +1674,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
status = 0;
done:
- mlog_exit(status);
return status;
}
@@ -1808,8 +1746,8 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
spin_lock(&osb->osb_lock);
osb->slot_recovery_generations[i] = gen;
- mlog(0, "Slot %u recovery generation is %u\n", i,
- osb->slot_recovery_generations[i]);
+ trace_ocfs2_mark_dead_nodes(i,
+ osb->slot_recovery_generations[i]);
if (i == osb->slot_num) {
spin_unlock(&osb->osb_lock);
@@ -1845,7 +1783,6 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
status = 0;
bail:
- mlog_exit(status);
return status;
}
@@ -1884,11 +1821,12 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
os = &osb->osb_orphan_scan;
- mlog(0, "Begin orphan scan\n");
-
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
goto out;
+ trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
+ atomic_read(&os->os_state));
+
status = ocfs2_orphan_scan_lock(osb, &seqno);
if (status < 0) {
if (status != -EAGAIN)
@@ -1918,7 +1856,8 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
unlock:
ocfs2_orphan_scan_unlock(osb, seqno);
out:
- mlog(0, "Orphan scan completed\n");
+ trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
+ atomic_read(&os->os_state));
return;
}
@@ -2002,8 +1941,7 @@ static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
if (IS_ERR(iter))
return 0;
- mlog(0, "queue orphan %llu\n",
- (unsigned long long)OCFS2_I(iter)->ip_blkno);
+ trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
/* No locking is required for the next_orphan queue as there
* is only ever a single process doing orphan recovery. */
OCFS2_I(iter)->ip_next_orphan = p->head;
@@ -2119,7 +2057,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
struct inode *iter;
struct ocfs2_inode_info *oi;
- mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
+ trace_ocfs2_recover_orphans(slot);
ocfs2_mark_recovering_orphan_dir(osb, slot);
ret = ocfs2_queue_orphans(osb, slot, &inode);
@@ -2132,7 +2070,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
while (inode) {
oi = OCFS2_I(inode);
- mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
+ trace_ocfs2_recover_orphans_iput(
+ (unsigned long long)oi->ip_blkno);
iter = oi->ip_next_orphan;
@@ -2170,6 +2109,7 @@ static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
* MOUNTED flag, but this is set right before
* dismount_volume() so we can trust it. */
if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
+ trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
mlog(0, "mount error, exiting!\n");
return -EBUSY;
}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 6180da1e37e6..68cf2f6d3c6a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -215,7 +215,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
/* WARNING: This only kicks off a single
* checkpoint. If someone races you and adds more
* metadata to the journal, you won't know, and will
- * wind up waiting *alot* longer than necessary. Right
+ * wind up waiting *a lot* longer than necessary. Right
* now we only use this in clear_inode so that's
* OK. */
ocfs2_start_checkpoint(osb);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ec6adbf8f551..210c35237548 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -29,7 +29,6 @@
#include <linux/highmem.h>
#include <linux/bitops.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -43,6 +42,7 @@
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -201,8 +201,7 @@ void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
la_max_mb = ocfs2_clusters_to_megabytes(sb,
ocfs2_local_alloc_size(sb) * 8);
- mlog(0, "requested: %dM, max: %uM, default: %uM\n",
- requested_mb, la_max_mb, la_default_mb);
+ trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
if (requested_mb == -1) {
/* No user request - use defaults */
@@ -276,8 +275,8 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
ret = 1;
bail:
- mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n",
- osb->local_alloc_state, (unsigned long long)bits, la_bits, ret);
+ trace_ocfs2_alloc_should_use_local(
+ (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
spin_unlock(&osb->osb_lock);
return ret;
}
@@ -291,8 +290,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
struct inode *inode = NULL;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
-
if (osb->local_alloc_bits == 0)
goto bail;
@@ -364,9 +361,10 @@ bail:
if (inode)
iput(inode);
- mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits);
+ trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -388,8 +386,6 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_dinode *alloc = NULL;
- mlog_entry_void();
-
cancel_delayed_work(&osb->la_enable_wq);
flush_workqueue(ocfs2_wq);
@@ -482,8 +478,6 @@ out:
if (alloc_copy)
kfree(alloc_copy);
-
- mlog_exit_void();
}
/*
@@ -502,7 +496,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
struct inode *inode = NULL;
struct ocfs2_dinode *alloc;
- mlog_entry("(slot_num = %d)\n", slot_num);
+ trace_ocfs2_begin_local_alloc_recovery(slot_num);
*alloc_copy = NULL;
@@ -552,7 +546,8 @@ bail:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -570,8 +565,6 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
struct buffer_head *main_bm_bh = NULL;
struct inode *main_bm_inode;
- mlog_entry_void();
-
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -620,7 +613,8 @@ out_mutex:
out:
if (!status)
ocfs2_init_steal_slots(osb);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -640,8 +634,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
struct inode *local_alloc_inode;
unsigned int free_bits;
- mlog_entry_void();
-
BUG_ON(!ac);
local_alloc_inode =
@@ -712,10 +704,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
goto bail;
}
- if (ac->ac_max_block)
- mlog(0, "Calling in_range for max block %llu\n",
- (unsigned long long)ac->ac_max_block);
-
ac->ac_inode = local_alloc_inode;
/* We should never use localalloc from another slot */
ac->ac_alloc_slot = osb->slot_num;
@@ -729,10 +717,12 @@ bail:
iput(local_alloc_inode);
}
- mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num,
- status);
+ trace_ocfs2_reserve_local_alloc_bits(
+ (unsigned long long)ac->ac_max_block,
+ bits_wanted, osb->slot_num, status);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -749,7 +739,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
local_alloc_inode = ac->ac_inode;
@@ -788,7 +777,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, osb->local_alloc_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -799,13 +789,11 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
u32 count = 0;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
- mlog_entry_void();
-
buffer = la->la_bitmap;
for (i = 0; i < le16_to_cpu(la->la_size); i++)
count += hweight8(buffer[i]);
- mlog_exit(count);
+ trace_ocfs2_local_alloc_count_bits(count);
return count;
}
@@ -820,10 +808,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
void *bitmap = NULL;
struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
- mlog_entry("(numbits wanted = %u)\n", *numbits);
-
if (!alloc->id1.bitmap1.i_total) {
- mlog(0, "No bits in my window!\n");
bitoff = -1;
goto bail;
}
@@ -883,8 +868,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
}
}
- mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff,
- numfound);
+ trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
if (numfound == *numbits)
bitoff = startoff - numfound;
@@ -895,7 +879,10 @@ bail:
if (local_resv)
ocfs2_resv_discard(resmap, resv);
- mlog_exit(bitoff);
+ trace_ocfs2_local_alloc_find_clear_bits(*numbits,
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ bitoff, numfound);
+
return bitoff;
}
@@ -903,15 +890,12 @@ static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
{
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
int i;
- mlog_entry_void();
alloc->id1.bitmap1.i_total = 0;
alloc->id1.bitmap1.i_used = 0;
la->la_bm_off = 0;
for(i = 0; i < le16_to_cpu(la->la_size); i++)
la->la_bitmap[i] = 0;
-
- mlog_exit_void();
}
#if 0
@@ -952,18 +936,16 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
void *bitmap;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
- mlog_entry("total = %u, used = %u\n",
- le32_to_cpu(alloc->id1.bitmap1.i_total),
- le32_to_cpu(alloc->id1.bitmap1.i_used));
+ trace_ocfs2_sync_local_to_main(
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ le32_to_cpu(alloc->id1.bitmap1.i_used));
if (!alloc->id1.bitmap1.i_total) {
- mlog(0, "nothing to sync!\n");
goto bail;
}
if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
le32_to_cpu(alloc->id1.bitmap1.i_total)) {
- mlog(0, "all bits were taken!\n");
goto bail;
}
@@ -985,8 +967,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
ocfs2_clusters_to_blocks(osb->sb,
start - count);
- mlog(0, "freeing %u bits starting at local alloc bit "
- "%u (la_start_blk = %llu, blkno = %llu)\n",
+ trace_ocfs2_sync_local_to_main_free(
count, start - count,
(unsigned long long)la_start_blk,
(unsigned long long)blkno);
@@ -1007,7 +988,8 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1132,7 +1114,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1148,17 +1131,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc = NULL;
struct ocfs2_local_alloc *la;
- mlog_entry_void();
-
alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
la = OCFS2_LOCAL_ALLOC(alloc);
- if (alloc->id1.bitmap1.i_total)
- mlog(0, "asking me to alloc a new window over a non-empty "
- "one\n");
-
- mlog(0, "Allocating %u clusters for a new window.\n",
- osb->local_alloc_bits);
+ trace_ocfs2_local_alloc_new_window(
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ osb->local_alloc_bits);
/* Instruct the allocation code to try the most recently used
* cluster group. We'll re-record the group used this pass
@@ -1220,13 +1198,13 @@ retry_enospc:
ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
- mlog(0, "New window allocated:\n");
- mlog(0, "window la_bm_off = %u\n",
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
- mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total));
+ trace_ocfs2_local_alloc_new_window_result(
+ OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
+ le32_to_cpu(alloc->id1.bitmap1.i_total));
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1243,8 +1221,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct ocfs2_dinode *alloc_copy = NULL;
struct ocfs2_alloc_context *ac = NULL;
- mlog_entry_void();
-
ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
/* This will lock the main bitmap for us. */
@@ -1324,7 +1300,8 @@ bail:
if (ac)
ocfs2_free_alloc_context(ac);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index b5cb3ede9408..e57c804069ea 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/fcntl.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 7e32db9c2c99..3e9393ca39eb 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -31,7 +31,6 @@
#include <linux/signal.h>
#include <linux/rbtree.h>
-#define MLOG_MASK_PREFIX ML_FILE_IO
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -42,6 +41,7 @@
#include "inode.h"
#include "mmap.h"
#include "super.h"
+#include "ocfs2_trace.h"
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
@@ -49,13 +49,12 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
sigset_t oldset;
int ret;
- mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
-
ocfs2_block_signals(&oldset);
ret = filemap_fault(area, vmf);
ocfs2_unblock_signals(&oldset);
- mlog_exit_ptr(vmf->page);
+ trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
+ area, vmf->page, vmf->pgoff);
return ret;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d6c25d76b537..e5d738cd9cc0 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -42,7 +42,6 @@
#include <linux/highmem.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -63,6 +62,7 @@
#include "uptodate.h"
#include "xattr.h"
#include "acl.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -106,17 +106,15 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *ret;
struct ocfs2_inode_info *oi;
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_lookup(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, 0);
if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) {
ret = ERR_PTR(-ENAMETOOLONG);
goto bail;
}
- mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
- dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
-
status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
@@ -182,7 +180,7 @@ bail_unlock:
bail:
- mlog_exit_ptr(ret);
+ trace_ocfs2_lookup_ret(ret);
return ret;
}
@@ -235,9 +233,9 @@ static int ocfs2_mknod(struct inode *dir,
sigset_t oldset;
int did_block_signals = 0;
- mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
- (unsigned long)dev, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long)dev, mode);
dquot_initialize(dir);
@@ -354,10 +352,6 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
did_quota_inode = 1;
- mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
- inode->i_mode, (unsigned long)dev, dentry->d_name.len,
- dentry->d_name.name);
-
/* do the real work now. */
status = ocfs2_mknod_locked(osb, dir, inode, dev,
&new_fe_bh, parent_fe_bh, handle,
@@ -436,9 +430,6 @@ leave:
if (did_block_signals)
ocfs2_unblock_signals(&oldset);
- if (status == -ENOSPC)
- mlog(0, "Disk is full\n");
-
brelse(new_fe_bh);
brelse(parent_fe_bh);
kfree(si.name);
@@ -466,7 +457,8 @@ leave:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -577,7 +569,8 @@ leave:
}
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -615,10 +608,11 @@ static int ocfs2_mkdir(struct inode *dir,
{
int ret;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -630,10 +624,11 @@ static int ocfs2_create(struct inode *dir,
{
int ret;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno, mode);
ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0);
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -652,9 +647,9 @@ static int ocfs2_link(struct dentry *old_dentry,
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
- mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino,
- old_dentry->d_name.len, old_dentry->d_name.name,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ dentry->d_name.len, dentry->d_name.name);
if (S_ISDIR(inode->i_mode))
return -EPERM;
@@ -757,7 +752,8 @@ out:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(err);
+ if (err)
+ mlog_errno(err);
return err;
}
@@ -809,19 +805,17 @@ static int ocfs2_unlink(struct inode *dir,
struct ocfs2_dir_lookup_result lookup = { NULL, };
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
- mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_unlink(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
dquot_initialize(dir);
BUG_ON(dentry->d_parent->d_inode != dir);
- mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
-
- if (inode == osb->root_inode) {
- mlog(0, "Cannot delete the root directory\n");
+ if (inode == osb->root_inode)
return -EPERM;
- }
status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
OI_LS_PARENT);
@@ -843,9 +837,10 @@ static int ocfs2_unlink(struct inode *dir,
if (OCFS2_I(inode)->ip_blkno != blkno) {
status = -ENOENT;
- mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)blkno, OCFS2_I(inode)->ip_flags);
+ trace_ocfs2_unlink_noent(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno,
+ OCFS2_I(inode)->ip_flags);
goto leave;
}
@@ -954,7 +949,8 @@ leave:
ocfs2_free_dir_lookup_result(&orphan_insert);
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -975,9 +971,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **tmpbh;
struct inode *tmpinode;
- mlog_entry("(inode1 = %llu, inode2 = %llu)\n",
- (unsigned long long)oi1->ip_blkno,
- (unsigned long long)oi2->ip_blkno);
+ trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
+ (unsigned long long)oi2->ip_blkno);
if (*bh1)
*bh1 = NULL;
@@ -988,7 +983,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
if (oi1->ip_blkno != oi2->ip_blkno) {
if (oi1->ip_blkno < oi2->ip_blkno) {
/* switch id1 and id2 around */
- mlog(0, "switching them around...\n");
tmpbh = bh2;
bh2 = bh1;
bh1 = tmpbh;
@@ -1024,8 +1018,13 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
mlog_errno(status);
}
+ trace_ocfs2_double_lock_end(
+ (unsigned long long)OCFS2_I(inode1)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode2)->ip_blkno);
+
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1067,10 +1066,9 @@ static int ocfs2_rename(struct inode *old_dir,
/* At some point it might be nice to break this function up a
* bit. */
- mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n",
- old_dir, old_dentry, new_dir, new_dentry,
- old_dentry->d_name.len, old_dentry->d_name.name,
- new_dentry->d_name.len, new_dentry->d_name.name);
+ trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ new_dentry->d_name.len, new_dentry->d_name.name);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
@@ -1227,16 +1225,15 @@ static int ocfs2_rename(struct inode *old_dir,
if (!new_inode) {
status = -EACCES;
- mlog(0, "We found an inode for name %.*s but VFS "
- "didn't give us one.\n", new_dentry->d_name.len,
- new_dentry->d_name.name);
+ trace_ocfs2_rename_target_exists(new_dentry->d_name.len,
+ new_dentry->d_name.name);
goto bail;
}
if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) {
status = -EACCES;
- mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n",
+ trace_ocfs2_rename_disagree(
(unsigned long long)OCFS2_I(new_inode)->ip_blkno,
(unsigned long long)newfe_blkno,
OCFS2_I(new_inode)->ip_flags);
@@ -1259,8 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir,
newfe = (struct ocfs2_dinode *) newfe_bh->b_data;
- mlog(0, "aha rename over existing... new_blkno=%llu "
- "newfebh=%p bhblocknr=%llu\n",
+ trace_ocfs2_rename_over_existing(
(unsigned long long)newfe_blkno, newfe_bh, newfe_bh ?
(unsigned long long)newfe_bh->b_blocknr : 0ULL);
@@ -1476,7 +1472,8 @@ bail:
brelse(old_dir_bh);
brelse(new_dir_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1501,9 +1498,8 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
* write i_size + 1 bytes. */
blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
- (unsigned long long)inode->i_blocks,
- i_size_read(inode), blocks);
+ trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks,
+ i_size_read(inode), blocks);
/* Sanity check -- make sure we're going to fit. */
if (bytes_left >
@@ -1579,7 +1575,8 @@ bail:
kfree(bhs);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1610,8 +1607,8 @@ static int ocfs2_symlink(struct inode *dir,
sigset_t oldset;
int did_block_signals = 0;
- mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir,
- dentry, symname, dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_symlink_begin(dir, dentry, symname,
+ dentry->d_name.len, dentry->d_name.name);
dquot_initialize(dir);
@@ -1713,9 +1710,10 @@ static int ocfs2_symlink(struct inode *dir,
goto bail;
did_quota_inode = 1;
- mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry,
- inode->i_mode, dentry->d_name.len,
- dentry->d_name.name);
+ trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len,
+ dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ inode->i_mode);
status = ocfs2_mknod_locked(osb, dir, inode,
0, &new_fe_bh, parent_fe_bh, handle,
@@ -1835,7 +1833,8 @@ bail:
iput(inode);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1844,8 +1843,6 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
{
int status, namelen;
- mlog_entry_void();
-
namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx",
(long long)blkno);
if (namelen <= 0) {
@@ -1862,12 +1859,12 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
goto bail;
}
- mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name,
- namelen);
+ trace_ocfs2_blkno_stringify(blkno, name, namelen);
status = 0;
bail:
- mlog_exit(status);
+ if (status < 0)
+ mlog_errno(status);
return status;
}
@@ -1980,7 +1977,8 @@ out:
iput(orphan_dir_inode);
}
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -1997,7 +1995,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
struct ocfs2_dinode *orphan_fe;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
- mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+ trace_ocfs2_orphan_add_begin(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh);
if (status < 0) {
@@ -2056,13 +2055,14 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, fe_bh);
- mlog(0, "Inode %llu orphaned in slot %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
+ trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
+ osb->slot_num);
leave:
brelse(orphan_dir_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2078,17 +2078,15 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
int status = 0;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry_void();
-
status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
if (status < 0) {
mlog_errno(status);
goto leave;
}
- mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n",
- name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
- OCFS2_ORPHAN_NAMELEN);
+ trace_ocfs2_orphan_del(
+ (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
+ name, OCFS2_ORPHAN_NAMELEN);
/* find it's spot in the orphan directory */
status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode,
@@ -2124,12 +2122,13 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
leave:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
/**
- * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to receive a newly
* allocated file. This is different from the typical 'add to orphan dir'
* operation in that the inode does not yet exist. This is a problem because
* the orphan dir stringifies the inode block number to come up with it's
@@ -2321,9 +2320,6 @@ leave:
iput(orphan_dir);
}
- if (status == -ENOSPC)
- mlog(0, "Disk is full\n");
-
if ((status < 0) && inode) {
clear_nlink(inode);
iput(inode);
@@ -2358,8 +2354,10 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
struct buffer_head *di_bh = NULL;
struct ocfs2_dir_lookup_result lookup = { NULL, };
- mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry,
- dentry->d_name.len, dentry->d_name.name);
+ trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry,
+ dentry->d_name.len, dentry->d_name.name,
+ (unsigned long long)OCFS2_I(dir)->ip_blkno,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
@@ -2476,7 +2474,8 @@ leave:
ocfs2_free_dir_lookup_result(&lookup);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 51cd6898e7f1..409285854f64 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -147,6 +147,17 @@ struct ocfs2_lock_res_ops;
typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
+#ifdef CONFIG_OCFS2_FS_STATS
+struct ocfs2_lock_stats {
+ u64 ls_total; /* Total wait in NSEC */
+ u32 ls_gets; /* Num acquires */
+ u32 ls_fail; /* Num failed acquires */
+
+ /* Storing max wait in usecs saves 24 bytes per inode */
+ u32 ls_max; /* Max wait in USEC */
+};
+#endif
+
struct ocfs2_lock_res {
void *l_priv;
struct ocfs2_lock_res_ops *l_ops;
@@ -182,15 +193,9 @@ struct ocfs2_lock_res {
struct list_head l_debug_list;
#ifdef CONFIG_OCFS2_FS_STATS
- unsigned long long l_lock_num_prmode; /* PR acquires */
- unsigned long long l_lock_num_exmode; /* EX acquires */
- unsigned int l_lock_num_prmode_failed; /* Failed PR gets */
- unsigned int l_lock_num_exmode_failed; /* Failed EX gets */
- unsigned long long l_lock_total_prmode; /* Tot wait for PR */
- unsigned long long l_lock_total_exmode; /* Tot wait for EX */
- unsigned int l_lock_max_prmode; /* Max wait for PR */
- unsigned int l_lock_max_exmode; /* Max wait for EX */
- unsigned int l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
+ u32 l_lock_refresh; /* Disk refreshes */
+ struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map l_lockdep_map;
@@ -831,18 +836,18 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
- ext2_set_bit(bit, bitmap);
+ __test_and_set_bit_le(bit, bitmap);
}
#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
{
- ext2_clear_bit(bit, bitmap);
+ __test_and_clear_bit_le(bit, bitmap);
}
#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
-#define ocfs2_test_bit ext2_test_bit
-#define ocfs2_find_next_zero_bit ext2_find_next_zero_bit
-#define ocfs2_find_next_bit ext2_find_next_bit
+#define ocfs2_test_bit test_bit_le
+#define ocfs2_find_next_zero_bit find_next_zero_bit_le
+#define ocfs2_find_next_bit find_next_bit_le
#endif /* OCFS2_H */
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index bf2e7764920e..b68f87a83924 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -441,7 +441,7 @@ static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = {
struct ocfs2_block_check {
/*00*/ __le32 bc_crc32e; /* 802.3 Ethernet II CRC32 */
__le16 bc_ecc; /* Single-error-correction parity vector.
- This is a simple Hamming code dependant
+ This is a simple Hamming code dependent
on the blocksize. OCFS2's maximum
blocksize, 4K, requires 16 parity bits,
so we fit in __le16. */
@@ -750,7 +750,7 @@ struct ocfs2_dinode {
after an unclean
shutdown */
} journal1;
- } id1; /* Inode type dependant 1 */
+ } id1; /* Inode type dependent 1 */
/*C0*/ union {
struct ocfs2_super_block i_super;
struct ocfs2_local_alloc i_lab;
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
new file mode 100644
index 000000000000..a1dae5bb54ac
--- /dev/null
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -0,0 +1,2739 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ocfs2
+
+#if !defined(_TRACE_OCFS2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OCFS2_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ocfs2__int,
+ TP_PROTO(int num),
+ TP_ARGS(num),
+ TP_STRUCT__entry(
+ __field(int, num)
+ ),
+ TP_fast_assign(
+ __entry->num = num;
+ ),
+ TP_printk("%d", __entry->num)
+);
+
+#define DEFINE_OCFS2_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__int, name, \
+ TP_PROTO(int num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__uint,
+ TP_PROTO(unsigned int num),
+ TP_ARGS(num),
+ TP_STRUCT__entry(
+ __field( unsigned int, num )
+ ),
+ TP_fast_assign(
+ __entry->num = num;
+ ),
+ TP_printk("%u", __entry->num)
+);
+
+#define DEFINE_OCFS2_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint, name, \
+ TP_PROTO(unsigned int num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__ull,
+ TP_PROTO(unsigned long long blkno),
+ TP_ARGS(blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu", __entry->blkno)
+);
+
+#define DEFINE_OCFS2_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull, name, \
+ TP_PROTO(unsigned long long num), \
+ TP_ARGS(num))
+
+DECLARE_EVENT_CLASS(ocfs2__pointer,
+ TP_PROTO(void *pointer),
+ TP_ARGS(pointer),
+ TP_STRUCT__entry(
+ __field(void *, pointer)
+ ),
+ TP_fast_assign(
+ __entry->pointer = pointer;
+ ),
+ TP_printk("%p", __entry->pointer)
+);
+
+#define DEFINE_OCFS2_POINTER_EVENT(name) \
+DEFINE_EVENT(ocfs2__pointer, name, \
+ TP_PROTO(void *pointer), \
+ TP_ARGS(pointer))
+
+DECLARE_EVENT_CLASS(ocfs2__string,
+ TP_PROTO(const char *name),
+ TP_ARGS(name),
+ TP_STRUCT__entry(
+ __string(name,name)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+ TP_printk("%s", __get_str(name))
+);
+
+#define DEFINE_OCFS2_STRING_EVENT(name) \
+DEFINE_EVENT(ocfs2__string, name, \
+ TP_PROTO(const char *name), \
+ TP_ARGS(name))
+
+DECLARE_EVENT_CLASS(ocfs2__int_int,
+ TP_PROTO(int value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(int, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%d %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_INT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__int_int, name, \
+ TP_PROTO(int val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_int,
+ TP_PROTO(unsigned int value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned int, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%u %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_UINT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_int, name, \
+ TP_PROTO(unsigned int val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_uint,
+ TP_PROTO(unsigned int value1, unsigned int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%u %u", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_uint, name, \
+ TP_PROTO(unsigned int val1, unsigned int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint,
+ TP_PROTO(unsigned long long value1, unsigned int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %u", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint, name, \
+ TP_PROTO(unsigned long long val1, unsigned int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_int,
+ TP_PROTO(unsigned long long value1, int value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(int, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %d", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_int, name, \
+ TP_PROTO(unsigned long long val1, int val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull,
+ TP_PROTO(unsigned long long value1, unsigned long long value2),
+ TP_ARGS(value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %llu", __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull, name, \
+ TP_PROTO(unsigned long long val1, unsigned long long val2), \
+ TP_ARGS(val1, val2))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint,
+ TP_PROTO(unsigned long long value1,
+ unsigned long long value2, unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %llu %u",
+ __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_uint, name, \
+ TP_PROTO(unsigned long long val1, \
+ unsigned long long val2, unsigned int val3), \
+ TP_ARGS(val1, val2, val3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint,
+ TP_PROTO(unsigned long long value1,
+ unsigned int value2, unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned int, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %u %u", __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint_uint, name, \
+ TP_PROTO(unsigned long long val1, \
+ unsigned int val2, unsigned int val3), \
+ TP_ARGS(val1, val2, val3))
+
+DECLARE_EVENT_CLASS(ocfs2__uint_uint_uint,
+ TP_PROTO(unsigned int value1, unsigned int value2,
+ unsigned int value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field( unsigned int, value1 )
+ __field( unsigned int, value2 )
+ __field( unsigned int, value3 )
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%u %u %u", __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_UINT_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__uint_uint_uint, name, \
+ TP_PROTO(unsigned int value1, unsigned int value2, \
+ unsigned int value3), \
+ TP_ARGS(value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_ull,
+ TP_PROTO(unsigned long long value1,
+ unsigned long long value2, unsigned long long value3),
+ TP_ARGS(value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned long long, value3)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %llu %llu",
+ __entry->value1, __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_ULL_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_ull, name, \
+ TP_PROTO(unsigned long long value1, unsigned long long value2, \
+ unsigned long long value3), \
+ TP_ARGS(value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_int_int_int,
+ TP_PROTO(unsigned long long ull, int value1, int value2, int value3),
+ TP_ARGS(ull, value1, value2, value3),
+ TP_STRUCT__entry(
+ __field( unsigned long long, ull )
+ __field( int, value1 )
+ __field( int, value2 )
+ __field( int, value3 )
+ ),
+ TP_fast_assign(
+ __entry->ull = ull;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %d %d %d",
+ __entry->ull, __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_int_int_int, name, \
+ TP_PROTO(unsigned long long ull, int value1, \
+ int value2, int value3), \
+ TP_ARGS(ull, value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint_uint,
+ TP_PROTO(unsigned long long ull, unsigned int value1,
+ unsigned int value2, unsigned int value3),
+ TP_ARGS(ull, value1, value2, value3),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ull)
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ __field(unsigned int, value3)
+ ),
+ TP_fast_assign(
+ __entry->ull = ull;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->ull, __entry->value1,
+ __entry->value2, __entry->value3)
+);
+
+#define DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_uint_uint_uint, name, \
+ TP_PROTO(unsigned long long ull, unsigned int value1, \
+ unsigned int value2, unsigned int value3), \
+ TP_ARGS(ull, value1, value2, value3))
+
+DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint_uint,
+ TP_PROTO(unsigned long long value1, unsigned long long value2,
+ unsigned int value3, unsigned int value4),
+ TP_ARGS(value1, value2, value3, value4),
+ TP_STRUCT__entry(
+ __field(unsigned long long, value1)
+ __field(unsigned long long, value2)
+ __field(unsigned int, value3)
+ __field(unsigned int, value4)
+ ),
+ TP_fast_assign(
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ __entry->value3 = value3;
+ __entry->value4 = value4;
+ ),
+ TP_printk("%llu %llu %u %u",
+ __entry->value1, __entry->value2,
+ __entry->value3, __entry->value4)
+);
+
+#define DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(name) \
+DEFINE_EVENT(ocfs2__ull_ull_uint_uint, name, \
+ TP_PROTO(unsigned long long ull, unsigned long long ull1, \
+ unsigned int value2, unsigned int value3), \
+ TP_ARGS(ull, ull1, value2, value3))
+
+/* Trace events for fs/ocfs2/alloc.c. */
+DECLARE_EVENT_CLASS(ocfs2__btree_ops,
+ TP_PROTO(unsigned long long owner,\
+ unsigned int value1, unsigned int value2),
+ TP_ARGS(owner, value1, value2),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, value1)
+ __field(unsigned int, value2)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%llu %u %u",
+ __entry->owner, __entry->value1, __entry->value2)
+);
+
+#define DEFINE_OCFS2_BTREE_EVENT(name) \
+DEFINE_EVENT(ocfs2__btree_ops, name, \
+ TP_PROTO(unsigned long long owner, \
+ unsigned int value1, unsigned int value2), \
+ TP_ARGS(owner, value1, value2))
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_adjust_rightmost_branch);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_rotate_tree_right);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_append_rec_to_path);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_insert_extent_start);
+
+DEFINE_OCFS2_BTREE_EVENT(ocfs2_add_clusters_in_btree);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_num_free_extents);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_complete_edge_insert);
+
+TRACE_EVENT(ocfs2_grow_tree,
+ TP_PROTO(unsigned long long owner, int depth),
+ TP_ARGS(owner, depth),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(int, depth)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->depth = depth;
+ ),
+ TP_printk("%llu %d", __entry->owner, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_rotate_subtree,
+ TP_PROTO(int subtree_root, unsigned long long blkno,
+ int depth),
+ TP_ARGS(subtree_root, blkno, depth),
+ TP_STRUCT__entry(
+ __field(int, subtree_root)
+ __field(unsigned long long, blkno)
+ __field(int, depth)
+ ),
+ TP_fast_assign(
+ __entry->subtree_root = subtree_root;
+ __entry->blkno = blkno;
+ __entry->depth = depth;
+ ),
+ TP_printk("%d %llu %d", __entry->subtree_root,
+ __entry->blkno, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_insert_extent,
+ TP_PROTO(unsigned int ins_appending, unsigned int ins_contig,
+ int ins_contig_index, int free_records, int ins_tree_depth),
+ TP_ARGS(ins_appending, ins_contig, ins_contig_index, free_records,
+ ins_tree_depth),
+ TP_STRUCT__entry(
+ __field(unsigned int, ins_appending)
+ __field(unsigned int, ins_contig)
+ __field(int, ins_contig_index)
+ __field(int, free_records)
+ __field(int, ins_tree_depth)
+ ),
+ TP_fast_assign(
+ __entry->ins_appending = ins_appending;
+ __entry->ins_contig = ins_contig;
+ __entry->ins_contig_index = ins_contig_index;
+ __entry->free_records = free_records;
+ __entry->ins_tree_depth = ins_tree_depth;
+ ),
+ TP_printk("%u %u %d %d %d",
+ __entry->ins_appending, __entry->ins_contig,
+ __entry->ins_contig_index, __entry->free_records,
+ __entry->ins_tree_depth)
+);
+
+TRACE_EVENT(ocfs2_split_extent,
+ TP_PROTO(int split_index, unsigned int c_contig_type,
+ unsigned int c_has_empty_extent,
+ unsigned int c_split_covers_rec),
+ TP_ARGS(split_index, c_contig_type,
+ c_has_empty_extent, c_split_covers_rec),
+ TP_STRUCT__entry(
+ __field(int, split_index)
+ __field(unsigned int, c_contig_type)
+ __field(unsigned int, c_has_empty_extent)
+ __field(unsigned int, c_split_covers_rec)
+ ),
+ TP_fast_assign(
+ __entry->split_index = split_index;
+ __entry->c_contig_type = c_contig_type;
+ __entry->c_has_empty_extent = c_has_empty_extent;
+ __entry->c_split_covers_rec = c_split_covers_rec;
+ ),
+ TP_printk("%d %u %u %u", __entry->split_index, __entry->c_contig_type,
+ __entry->c_has_empty_extent, __entry->c_split_covers_rec)
+);
+
+TRACE_EVENT(ocfs2_remove_extent,
+ TP_PROTO(unsigned long long owner, unsigned int cpos,
+ unsigned int len, int index,
+ unsigned int e_cpos, unsigned int clusters),
+ TP_ARGS(owner, cpos, len, index, e_cpos, clusters),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(int, index)
+ __field(unsigned int, e_cpos)
+ __field(unsigned int, clusters)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->index = index;
+ __entry->e_cpos = e_cpos;
+ __entry->clusters = clusters;
+ ),
+ TP_printk("%llu %u %u %d %u %u",
+ __entry->owner, __entry->cpos, __entry->len, __entry->index,
+ __entry->e_cpos, __entry->clusters)
+);
+
+TRACE_EVENT(ocfs2_commit_truncate,
+ TP_PROTO(unsigned long long ino, unsigned int new_cpos,
+ unsigned int clusters, unsigned int depth),
+ TP_ARGS(ino, new_cpos, clusters, depth),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, new_cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, depth)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->new_cpos = new_cpos;
+ __entry->clusters = clusters;
+ __entry->depth = depth;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->ino, __entry->new_cpos,
+ __entry->clusters, __entry->depth)
+);
+
+TRACE_EVENT(ocfs2_validate_extent_block,
+ TP_PROTO(unsigned long long blkno),
+ TP_ARGS(blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu ", __entry->blkno)
+);
+
+TRACE_EVENT(ocfs2_rotate_leaf,
+ TP_PROTO(unsigned int insert_cpos, int insert_index,
+ int has_empty, int next_free,
+ unsigned int l_count),
+ TP_ARGS(insert_cpos, insert_index, has_empty,
+ next_free, l_count),
+ TP_STRUCT__entry(
+ __field(unsigned int, insert_cpos)
+ __field(int, insert_index)
+ __field(int, has_empty)
+ __field(int, next_free)
+ __field(unsigned int, l_count)
+ ),
+ TP_fast_assign(
+ __entry->insert_cpos = insert_cpos;
+ __entry->insert_index = insert_index;
+ __entry->has_empty = has_empty;
+ __entry->next_free = next_free;
+ __entry->l_count = l_count;
+ ),
+ TP_printk("%u %d %d %d %u", __entry->insert_cpos,
+ __entry->insert_index, __entry->has_empty,
+ __entry->next_free, __entry->l_count)
+);
+
+TRACE_EVENT(ocfs2_add_clusters_in_btree_ret,
+ TP_PROTO(int status, int reason, int err),
+ TP_ARGS(status, reason, err),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, reason)
+ __field(int, err)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->reason = reason;
+ __entry->err = err;
+ ),
+ TP_printk("%d %d %d", __entry->status,
+ __entry->reason, __entry->err)
+);
+
+TRACE_EVENT(ocfs2_mark_extent_written,
+ TP_PROTO(unsigned long long owner, unsigned int cpos,
+ unsigned int len, unsigned int phys),
+ TP_ARGS(owner, cpos, len, phys),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(unsigned int, phys)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->phys = phys;
+ ),
+ TP_printk("%llu %u %u %u",
+ __entry->owner, __entry->cpos,
+ __entry->len, __entry->phys)
+);
+
+DECLARE_EVENT_CLASS(ocfs2__truncate_log_ops,
+ TP_PROTO(unsigned long long blkno, int index,
+ unsigned int start, unsigned int num),
+ TP_ARGS(blkno, index, start, num),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __field(int, index)
+ __field(unsigned int, start)
+ __field(unsigned int, num)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __entry->index = index;
+ __entry->start = start;
+ __entry->num = num;
+ ),
+ TP_printk("%llu %d %u %u",
+ __entry->blkno, __entry->index,
+ __entry->start, __entry->num)
+);
+
+#define DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(name) \
+DEFINE_EVENT(ocfs2__truncate_log_ops, name, \
+ TP_PROTO(unsigned long long blkno, int index, \
+ unsigned int start, unsigned int num), \
+ TP_ARGS(blkno, index, start, num))
+
+DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_truncate_log_append);
+
+DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_replay_truncate_records);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_flush_truncate_log);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_begin_truncate_log_recovery);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_truncate_log_recovery_num);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_complete_truncate_log_recovery);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_free_cached_blocks);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_cache_cluster_dealloc);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_run_deallocs);
+
+TRACE_EVENT(ocfs2_cache_block_dealloc,
+ TP_PROTO(int type, int slot, unsigned long long suballoc,
+ unsigned long long blkno, unsigned int bit),
+ TP_ARGS(type, slot, suballoc, blkno, bit),
+ TP_STRUCT__entry(
+ __field(int, type)
+ __field(int, slot)
+ __field(unsigned long long, suballoc)
+ __field(unsigned long long, blkno)
+ __field(unsigned int, bit)
+ ),
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->slot = slot;
+ __entry->suballoc = suballoc;
+ __entry->blkno = blkno;
+ __entry->bit = bit;
+ ),
+ TP_printk("%d %d %llu %llu %u",
+ __entry->type, __entry->slot, __entry->suballoc,
+ __entry->blkno, __entry->bit)
+);
+
+/* End of trace events for fs/ocfs2/alloc.c. */
+
+/* Trace events for fs/ocfs2/localalloc.c. */
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_la_set_sizes);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_alloc_should_use_local);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_load_local_alloc);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_begin_local_alloc_recovery);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_reserve_local_alloc_bits);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_local_alloc_count_bits);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits_search_bitmap);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_sync_local_to_main);
+
+TRACE_EVENT(ocfs2_sync_local_to_main_free,
+ TP_PROTO(int count, int bit, unsigned long long start_blk,
+ unsigned long long blkno),
+ TP_ARGS(count, bit, start_blk, blkno),
+ TP_STRUCT__entry(
+ __field(int, count)
+ __field(int, bit)
+ __field(unsigned long long, start_blk)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->bit = bit;
+ __entry->start_blk = start_blk;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%d %d %llu %llu",
+ __entry->count, __entry->bit, __entry->start_blk,
+ __entry->blkno)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_new_window);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_local_alloc_new_window_result);
+
+/* End of trace events for fs/ocfs2/localalloc.c. */
+
+/* Trace events for fs/ocfs2/resize.c. */
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_update_last_group_and_inode);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_group_extend);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_group_add);
+
+/* End of trace events for fs/ocfs2/resize.c. */
+
+/* Trace events for fs/ocfs2/suballoc.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_group_descriptor);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_contig);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_discontig);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_block_group_alloc);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_nospc);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_no_new_group);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_reserve_new_inode_new_group);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_set_bits);
+
+TRACE_EVENT(ocfs2_relink_block_group,
+ TP_PROTO(unsigned long long i_blkno, unsigned int chain,
+ unsigned long long bg_blkno,
+ unsigned long long prev_blkno),
+ TP_ARGS(i_blkno, chain, bg_blkno, prev_blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, i_blkno)
+ __field(unsigned int, chain)
+ __field(unsigned long long, bg_blkno)
+ __field(unsigned long long, prev_blkno)
+ ),
+ TP_fast_assign(
+ __entry->i_blkno = i_blkno;
+ __entry->chain = chain;
+ __entry->bg_blkno = bg_blkno;
+ __entry->prev_blkno = prev_blkno;
+ ),
+ TP_printk("%llu %u %llu %llu",
+ __entry->i_blkno, __entry->chain, __entry->bg_blkno,
+ __entry->prev_blkno)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_cluster_group_search_wrong_max_bits);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cluster_group_search_max_block);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_block_group_search_max_block);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_search_chain_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_succ);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_end);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_claim_suballoc_bits);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_claim_new_inode_at_loc);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_clear_bits);
+
+TRACE_EVENT(ocfs2_free_suballoc_bits,
+ TP_PROTO(unsigned long long inode, unsigned long long group,
+ unsigned int start_bit, unsigned int count),
+ TP_ARGS(inode, group, start_bit, count),
+ TP_STRUCT__entry(
+ __field(unsigned long long, inode)
+ __field(unsigned long long, group)
+ __field(unsigned int, start_bit)
+ __field(unsigned int, count)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->group = group;
+ __entry->start_bit = start_bit;
+ __entry->count = count;
+ ),
+ TP_printk("%llu %llu %u %u", __entry->inode, __entry->group,
+ __entry->start_bit, __entry->count)
+);
+
+TRACE_EVENT(ocfs2_free_clusters,
+ TP_PROTO(unsigned long long bg_blkno, unsigned long long start_blk,
+ unsigned int start_bit, unsigned int count),
+ TP_ARGS(bg_blkno, start_blk, start_bit, count),
+ TP_STRUCT__entry(
+ __field(unsigned long long, bg_blkno)
+ __field(unsigned long long, start_blk)
+ __field(unsigned int, start_bit)
+ __field(unsigned int, count)
+ ),
+ TP_fast_assign(
+ __entry->bg_blkno = bg_blkno;
+ __entry->start_blk = start_blk;
+ __entry->start_bit = start_bit;
+ __entry->count = count;
+ ),
+ TP_printk("%llu %llu %u %u", __entry->bg_blkno, __entry->start_blk,
+ __entry->start_bit, __entry->count)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_get_suballoc_slot_bit);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_test_suballoc_bit);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_test_inode_bit);
+
+/* End of trace events for fs/ocfs2/suballoc.c. */
+
+/* Trace events for fs/ocfs2/refcounttree.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_refcount_block);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_refcount_trees);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree_blkno);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_change_refcount_rec);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_expand_inline_ref_root);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_divide_leaf_refcount_block);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_new_leaf_refcount_block);
+
+DECLARE_EVENT_CLASS(ocfs2__refcount_tree_ops,
+ TP_PROTO(unsigned long long blkno, int index,
+ unsigned long long cpos,
+ unsigned int clusters, unsigned int refcount),
+ TP_ARGS(blkno, index, cpos, clusters, refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __field(int, index)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, refcount)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __entry->index = index;
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->refcount = refcount;
+ ),
+ TP_printk("%llu %d %llu %u %u", __entry->blkno, __entry->index,
+ __entry->cpos, __entry->clusters, __entry->refcount)
+);
+
+#define DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(name) \
+DEFINE_EVENT(ocfs2__refcount_tree_ops, name, \
+ TP_PROTO(unsigned long long blkno, int index, \
+ unsigned long long cpos, \
+ unsigned int count, unsigned int refcount), \
+ TP_ARGS(blkno, index, cpos, count, refcount))
+
+DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_insert_refcount_rec);
+
+TRACE_EVENT(ocfs2_split_refcount_rec,
+ TP_PROTO(unsigned long long cpos,
+ unsigned int clusters, unsigned int refcount,
+ unsigned long long split_cpos,
+ unsigned int split_clusters, unsigned int split_refcount),
+ TP_ARGS(cpos, clusters, refcount,
+ split_cpos, split_clusters, split_refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned int, refcount)
+ __field(unsigned long long, split_cpos)
+ __field(unsigned int, split_clusters)
+ __field(unsigned int, split_refcount)
+ ),
+ TP_fast_assign(
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->refcount = refcount;
+ __entry->split_cpos = split_cpos;
+ __entry->split_clusters = split_clusters;
+ __entry->split_refcount = split_refcount;
+ ),
+ TP_printk("%llu %u %u %llu %u %u",
+ __entry->cpos, __entry->clusters, __entry->refcount,
+ __entry->split_cpos, __entry->split_clusters,
+ __entry->split_refcount)
+);
+
+DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_split_refcount_rec_insert);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_increase_refcount_begin);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_change);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_increase_refcount_insert);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_split);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_remove_refcount_extent);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_restore_refcount_block);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_decrease_refcount_rec);
+
+TRACE_EVENT(ocfs2_decrease_refcount,
+ TP_PROTO(unsigned long long owner,
+ unsigned long long cpos,
+ unsigned int len, int delete),
+ TP_ARGS(owner, cpos, len, delete),
+ TP_STRUCT__entry(
+ __field(unsigned long long, owner)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, len)
+ __field(int, delete)
+ ),
+ TP_fast_assign(
+ __entry->owner = owner;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->delete = delete;
+ ),
+ TP_printk("%llu %llu %u %d",
+ __entry->owner, __entry->cpos, __entry->len, __entry->delete)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_mark_extent_refcounted);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_calc_refcount_meta_credits);
+
+TRACE_EVENT(ocfs2_calc_refcount_meta_credits_iterate,
+ TP_PROTO(int recs_add, unsigned long long cpos,
+ unsigned int clusters, unsigned long long r_cpos,
+ unsigned int r_clusters, unsigned int refcount, int index),
+ TP_ARGS(recs_add, cpos, clusters, r_cpos, r_clusters, refcount, index),
+ TP_STRUCT__entry(
+ __field(int, recs_add)
+ __field(unsigned long long, cpos)
+ __field(unsigned int, clusters)
+ __field(unsigned long long, r_cpos)
+ __field(unsigned int, r_clusters)
+ __field(unsigned int, refcount)
+ __field(int, index)
+ ),
+ TP_fast_assign(
+ __entry->recs_add = recs_add;
+ __entry->cpos = cpos;
+ __entry->clusters = clusters;
+ __entry->r_cpos = r_cpos;
+ __entry->r_clusters = r_clusters;
+ __entry->refcount = refcount;
+ __entry->index = index;
+ ),
+ TP_printk("%d %llu %u %llu %u %u %d",
+ __entry->recs_add, __entry->cpos, __entry->clusters,
+ __entry->r_cpos, __entry->r_clusters,
+ __entry->refcount, __entry->index)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_add_refcount_flag);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_prepare_refcount_change_for_del);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_lock_refcount_allocators);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_page);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_jbd);
+
+TRACE_EVENT(ocfs2_clear_ext_refcount,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int len, unsigned int p_cluster,
+ unsigned int ext_flags),
+ TP_ARGS(ino, cpos, len, p_cluster, ext_flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, len)
+ __field(unsigned int, p_cluster)
+ __field(unsigned int, ext_flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->len = len;
+ __entry->p_cluster = p_cluster;
+ __entry->ext_flags = ext_flags;
+ ),
+ TP_printk("%llu %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->len,
+ __entry->p_cluster, __entry->ext_flags)
+);
+
+TRACE_EVENT(ocfs2_replace_clusters,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int old, unsigned int new, unsigned int len,
+ unsigned int ext_flags),
+ TP_ARGS(ino, cpos, old, new, len, ext_flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, old)
+ __field(unsigned int, new)
+ __field(unsigned int, len)
+ __field(unsigned int, ext_flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->old = old;
+ __entry->new = new;
+ __entry->len = len;
+ __entry->ext_flags = ext_flags;
+ ),
+ TP_printk("%llu %u %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->old, __entry->new,
+ __entry->len, __entry->ext_flags)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_make_clusters_writable);
+
+TRACE_EVENT(ocfs2_refcount_cow_hunk,
+ TP_PROTO(unsigned long long ino, unsigned int cpos,
+ unsigned int write_len, unsigned int max_cpos,
+ unsigned int cow_start, unsigned int cow_len),
+ TP_ARGS(ino, cpos, write_len, max_cpos, cow_start, cow_len),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, cpos)
+ __field(unsigned int, write_len)
+ __field(unsigned int, max_cpos)
+ __field(unsigned int, cow_start)
+ __field(unsigned int, cow_len)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->cpos = cpos;
+ __entry->write_len = write_len;
+ __entry->max_cpos = max_cpos;
+ __entry->cow_start = cow_start;
+ __entry->cow_len = cow_len;
+ ),
+ TP_printk("%llu %u %u %u %u %u",
+ __entry->ino, __entry->cpos, __entry->write_len,
+ __entry->max_cpos, __entry->cow_start, __entry->cow_len)
+);
+
+/* End of trace events for fs/ocfs2/refcounttree.c. */
+
+/* Trace events for fs/ocfs2/aops.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__get_block,
+ TP_PROTO(unsigned long long ino, unsigned long long iblock,
+ void *bh_result, int create),
+ TP_ARGS(ino, iblock, bh_result, create),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, iblock)
+ __field(void *, bh_result)
+ __field(int, create)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->iblock = iblock;
+ __entry->bh_result = bh_result;
+ __entry->create = create;
+ ),
+ TP_printk("%llu %llu %p %d",
+ __entry->ino, __entry->iblock,
+ __entry->bh_result, __entry->create)
+);
+
+#define DEFINE_OCFS2_GET_BLOCK_EVENT(name) \
+DEFINE_EVENT(ocfs2__get_block, name, \
+ TP_PROTO(unsigned long long ino, unsigned long long iblock, \
+ void *bh_result, int create), \
+ TP_ARGS(ino, iblock, bh_result, create))
+
+DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_symlink_get_block);
+
+DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_get_block);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap);
+
+TRACE_EVENT(ocfs2_try_to_write_inline_data,
+ TP_PROTO(unsigned long long ino, unsigned int len,
+ unsigned long long pos, unsigned int flags),
+ TP_ARGS(ino, len, pos, flags),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, len)
+ __field(unsigned long long, pos)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->len = len;
+ __entry->pos = pos;
+ __entry->flags = flags;
+ ),
+ TP_printk("%llu %u %llu 0x%x",
+ __entry->ino, __entry->len, __entry->pos, __entry->flags)
+);
+
+TRACE_EVENT(ocfs2_write_begin_nolock,
+ TP_PROTO(unsigned long long ino,
+ long long i_size, unsigned int i_clusters,
+ unsigned long long pos, unsigned int len,
+ unsigned int flags, void *page,
+ unsigned int clusters, unsigned int extents_to_split),
+ TP_ARGS(ino, i_size, i_clusters, pos, len, flags,
+ page, clusters, extents_to_split),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(long long, i_size)
+ __field(unsigned int, i_clusters)
+ __field(unsigned long long, pos)
+ __field(unsigned int, len)
+ __field(unsigned int, flags)
+ __field(void *, page)
+ __field(unsigned int, clusters)
+ __field(unsigned int, extents_to_split)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->i_size = i_size;
+ __entry->i_clusters = i_clusters;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->flags = flags;
+ __entry->page = page;
+ __entry->clusters = clusters;
+ __entry->extents_to_split = extents_to_split;
+ ),
+ TP_printk("%llu %lld %u %llu %u %u %p %u %u",
+ __entry->ino, __entry->i_size, __entry->i_clusters,
+ __entry->pos, __entry->len,
+ __entry->flags, __entry->page, __entry->clusters,
+ __entry->extents_to_split)
+);
+
+TRACE_EVENT(ocfs2_write_end_inline,
+ TP_PROTO(unsigned long long ino,
+ unsigned long long pos, unsigned int copied,
+ unsigned int id_count, unsigned int features),
+ TP_ARGS(ino, pos, copied, id_count, features),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, pos)
+ __field(unsigned int, copied)
+ __field(unsigned int, id_count)
+ __field(unsigned int, features)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->pos = pos;
+ __entry->copied = copied;
+ __entry->id_count = id_count;
+ __entry->features = features;
+ ),
+ TP_printk("%llu %llu %u %u %u",
+ __entry->ino, __entry->pos, __entry->copied,
+ __entry->id_count, __entry->features)
+);
+
+/* End of trace events for fs/ocfs2/aops.c. */
+
+/* Trace events for fs/ocfs2/mmap.c. */
+
+TRACE_EVENT(ocfs2_fault,
+ TP_PROTO(unsigned long long ino,
+ void *area, void *page, unsigned long pgoff),
+ TP_ARGS(ino, area, page, pgoff),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(void *, area)
+ __field(void *, page)
+ __field(unsigned long, pgoff)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->area = area;
+ __entry->page = page;
+ __entry->pgoff = pgoff;
+ ),
+ TP_printk("%llu %p %p %lu",
+ __entry->ino, __entry->area, __entry->page, __entry->pgoff)
+);
+
+/* End of trace events for fs/ocfs2/mmap.c. */
+
+/* Trace events for fs/ocfs2/file.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__file_ops,
+ TP_PROTO(void *inode, void *file, void *dentry,
+ unsigned long long ino,
+ unsigned int d_len, const unsigned char *d_name,
+ unsigned long long para),
+ TP_ARGS(inode, file, dentry, ino, d_len, d_name, para),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(void *, file)
+ __field(void *, dentry)
+ __field(unsigned long long, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, d_name)
+ __field(unsigned long long, para)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->file = file;
+ __entry->dentry = dentry;
+ __entry->ino = ino;
+ __entry->d_len = d_len;
+ __assign_str(d_name, d_name);
+ __entry->para = para;
+ ),
+ TP_printk("%p %p %p %llu %llu %.*s", __entry->inode, __entry->file,
+ __entry->dentry, __entry->ino, __entry->para,
+ __entry->d_len, __get_str(d_name))
+);
+
+#define DEFINE_OCFS2_FILE_OPS(name) \
+DEFINE_EVENT(ocfs2__file_ops, name, \
+TP_PROTO(void *inode, void *file, void *dentry, \
+ unsigned long long ino, \
+ unsigned int d_len, const unsigned char *d_name, \
+ unsigned long long mode), \
+ TP_ARGS(inode, file, dentry, ino, d_len, d_name, mode))
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_open);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
+
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_truncate_file_error);
+
+TRACE_EVENT(ocfs2_extend_allocation,
+ TP_PROTO(unsigned long long ip_blkno, unsigned long long size,
+ unsigned int clusters, unsigned int clusters_to_add,
+ int why, int restart_func),
+ TP_ARGS(ip_blkno, size, clusters, clusters_to_add, why, restart_func),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ip_blkno)
+ __field(unsigned long long, size)
+ __field(unsigned int, clusters)
+ __field(unsigned int, clusters_to_add)
+ __field(int, why)
+ __field(int, restart_func)
+ ),
+ TP_fast_assign(
+ __entry->ip_blkno = ip_blkno;
+ __entry->size = size;
+ __entry->clusters = clusters;
+ __entry->clusters_to_add = clusters_to_add;
+ __entry->why = why;
+ __entry->restart_func = restart_func;
+ ),
+ TP_printk("%llu %llu %u %u %d %d",
+ __entry->ip_blkno, __entry->size, __entry->clusters,
+ __entry->clusters_to_add, __entry->why, __entry->restart_func)
+);
+
+TRACE_EVENT(ocfs2_extend_allocation_end,
+ TP_PROTO(unsigned long long ino,
+ unsigned int di_clusters, unsigned long long di_size,
+ unsigned int ip_clusters, unsigned long long i_size),
+ TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, di_clusters)
+ __field(unsigned long long, di_size)
+ __field(unsigned int, ip_clusters)
+ __field(unsigned long long, i_size)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->di_clusters = di_clusters;
+ __entry->di_size = di_size;
+ __entry->ip_clusters = ip_clusters;
+ __entry->i_size = i_size;
+ ),
+ TP_printk("%llu %u %llu %u %llu", __entry->ino, __entry->di_clusters,
+ __entry->di_size, __entry->ip_clusters, __entry->i_size)
+);
+
+TRACE_EVENT(ocfs2_write_zero_page,
+ TP_PROTO(unsigned long long ino,
+ unsigned long long abs_from, unsigned long long abs_to,
+ unsigned long index, unsigned int zero_from,
+ unsigned int zero_to),
+ TP_ARGS(ino, abs_from, abs_to, index, zero_from, zero_to),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, abs_from)
+ __field(unsigned long long, abs_to)
+ __field(unsigned long, index)
+ __field(unsigned int, zero_from)
+ __field(unsigned int, zero_to)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->abs_from = abs_from;
+ __entry->abs_to = abs_to;
+ __entry->index = index;
+ __entry->zero_from = zero_from;
+ __entry->zero_to = zero_to;
+ ),
+ TP_printk("%llu %llu %llu %lu %u %u", __entry->ino,
+ __entry->abs_from, __entry->abs_to,
+ __entry->index, __entry->zero_from, __entry->zero_to)
+);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend_range);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend);
+
+TRACE_EVENT(ocfs2_setattr,
+ TP_PROTO(void *inode, void *dentry,
+ unsigned long long ino,
+ unsigned int d_len, const unsigned char *d_name,
+ unsigned int ia_valid, unsigned int ia_mode,
+ unsigned int ia_uid, unsigned int ia_gid),
+ TP_ARGS(inode, dentry, ino, d_len, d_name,
+ ia_valid, ia_mode, ia_uid, ia_gid),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(void *, dentry)
+ __field(unsigned long long, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, d_name)
+ __field(unsigned int, ia_valid)
+ __field(unsigned int, ia_mode)
+ __field(unsigned int, ia_uid)
+ __field(unsigned int, ia_gid)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->dentry = dentry;
+ __entry->ino = ino;
+ __entry->d_len = d_len;
+ __assign_str(d_name, d_name);
+ __entry->ia_valid = ia_valid;
+ __entry->ia_mode = ia_mode;
+ __entry->ia_uid = ia_uid;
+ __entry->ia_gid = ia_gid;
+ ),
+ TP_printk("%p %p %llu %.*s %u %u %u %u", __entry->inode,
+ __entry->dentry, __entry->ino, __entry->d_len,
+ __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
+ __entry->ia_uid, __entry->ia_gid)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_write_remove_suid);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_partial_clusters);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range1);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range2);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range);
+
+TRACE_EVENT(ocfs2_prepare_inode_for_write,
+ TP_PROTO(unsigned long long ino, unsigned long long saved_pos,
+ int appending, unsigned long count,
+ int *direct_io, int *has_refcount),
+ TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned long long, saved_pos)
+ __field(int, appending)
+ __field(unsigned long, count)
+ __field(int, direct_io)
+ __field(int, has_refcount)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->saved_pos = saved_pos;
+ __entry->appending = appending;
+ __entry->count = count;
+ __entry->direct_io = direct_io ? *direct_io : -1;
+ __entry->has_refcount = has_refcount ? *has_refcount : -1;
+ ),
+ TP_printk("%llu %llu %d %lu %d %d", __entry->ino,
+ __entry->saved_pos, __entry->appending, __entry->count,
+ __entry->direct_io, __entry->has_refcount)
+);
+
+DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+
+/* End of trace events for fs/ocfs2/file.c. */
+
+/* Trace events for fs/ocfs2/inode.c. */
+
+TRACE_EVENT(ocfs2_iget_begin,
+ TP_PROTO(unsigned long long ino, unsigned int flags, int sysfile_type),
+ TP_ARGS(ino, flags, sysfile_type),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ __field(int, sysfile_type)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->flags = flags;
+ __entry->sysfile_type = sysfile_type;
+ ),
+ TP_printk("%llu %u %d", __entry->ino,
+ __entry->flags, __entry->sysfile_type)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_iget5_locked);
+
+TRACE_EVENT(ocfs2_iget_end,
+ TP_PROTO(void *inode, unsigned long long ino),
+ TP_ARGS(inode, ino),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ ),
+ TP_printk("%p %llu", __entry->inode, __entry->ino)
+);
+
+TRACE_EVENT(ocfs2_find_actor,
+ TP_PROTO(void *inode, unsigned long long ino,
+ void *args, unsigned long long fi_blkno),
+ TP_ARGS(inode, ino, args, fi_blkno),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ __field(void *, args)
+ __field(unsigned long long, fi_blkno)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->args = args;
+ __entry->fi_blkno = fi_blkno;
+ ),
+ TP_printk("%p %llu %p %llu", __entry->inode, __entry->ino,
+ __entry->args, __entry->fi_blkno)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_populate_inode);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block);
+
+TRACE_EVENT(ocfs2_inode_is_valid_to_delete,
+ TP_PROTO(void *task, void *dc_task, unsigned long long ino,
+ unsigned int flags),
+ TP_ARGS(task, dc_task, ino, flags),
+ TP_STRUCT__entry(
+ __field(void *, task)
+ __field(void *, dc_task)
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->task = task;
+ __entry->dc_task = dc_task;
+ __entry->ino = ino;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task,
+ __entry->ino, __entry->flags)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_query_inode_wipe_begin);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_query_inode_wipe_succ);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_query_inode_wipe_end);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_cleanup_delete_inode);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode);
+
+TRACE_EVENT(ocfs2_inode_revalidate,
+ TP_PROTO(void *inode, unsigned long long ino,
+ unsigned int flags),
+ TP_ARGS(inode, ino, flags),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, ino)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %llu %u", __entry->inode, __entry->ino, __entry->flags)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_mark_inode_dirty);
+
+/* End of trace events for fs/ocfs2/inode.c. */
+
+/* Trace events for fs/ocfs2/extent_map.c. */
+
+TRACE_EVENT(ocfs2_read_virt_blocks,
+ TP_PROTO(void *inode, unsigned long long vblock, int nr,
+ void *bhs, unsigned int flags, void *validate),
+ TP_ARGS(inode, vblock, nr, bhs, flags, validate),
+ TP_STRUCT__entry(
+ __field(void *, inode)
+ __field(unsigned long long, vblock)
+ __field(int, nr)
+ __field(void *, bhs)
+ __field(unsigned int, flags)
+ __field(void *, validate)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->vblock = vblock;
+ __entry->nr = nr;
+ __entry->bhs = bhs;
+ __entry->flags = flags;
+ __entry->validate = validate;
+ ),
+ TP_printk("%p %llu %d %p %x %p", __entry->inode, __entry->vblock,
+ __entry->nr, __entry->bhs, __entry->flags, __entry->validate)
+);
+
+/* End of trace events for fs/ocfs2/extent_map.c. */
+
+/* Trace events for fs/ocfs2/slot_map.c. */
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_refresh_slot_info);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers_block);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_find_slot);
+
+/* End of trace events for fs/ocfs2/slot_map.c. */
+
+/* Trace events for fs/ocfs2/heartbeat.c. */
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_do_node_down);
+
+/* End of trace events for fs/ocfs2/heartbeat.c. */
+
+/* Trace events for fs/ocfs2/super.c. */
+
+TRACE_EVENT(ocfs2_remount,
+ TP_PROTO(unsigned long s_flags, unsigned long osb_flags, int flags),
+ TP_ARGS(s_flags, osb_flags, flags),
+ TP_STRUCT__entry(
+ __field(unsigned long, s_flags)
+ __field(unsigned long, osb_flags)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->s_flags = s_flags;
+ __entry->osb_flags = osb_flags;
+ __entry->flags = flags;
+ ),
+ TP_printk("%lu %lu %d", __entry->s_flags,
+ __entry->osb_flags, __entry->flags)
+);
+
+TRACE_EVENT(ocfs2_fill_super,
+ TP_PROTO(void *sb, void *data, int silent),
+ TP_ARGS(sb, data, silent),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, data)
+ __field(int, silent)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->data = data;
+ __entry->silent = silent;
+ ),
+ TP_printk("%p %p %d", __entry->sb,
+ __entry->data, __entry->silent)
+);
+
+TRACE_EVENT(ocfs2_parse_options,
+ TP_PROTO(int is_remount, char *options),
+ TP_ARGS(is_remount, options),
+ TP_STRUCT__entry(
+ __field(int, is_remount)
+ __string(options, options)
+ ),
+ TP_fast_assign(
+ __entry->is_remount = is_remount;
+ __assign_str(options, options);
+ ),
+ TP_printk("%d %s", __entry->is_remount, __get_str(options))
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super);
+
+TRACE_EVENT(ocfs2_statfs,
+ TP_PROTO(void *sb, void *buf),
+ TP_ARGS(sb, buf),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, buf)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->buf = buf;
+ ),
+ TP_printk("%p %p", __entry->sb, __entry->buf)
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_dismount_volume);
+
+TRACE_EVENT(ocfs2_initialize_super,
+ TP_PROTO(char *label, char *uuid_str, unsigned long long root_dir,
+ unsigned long long system_dir, int cluster_bits),
+ TP_ARGS(label, uuid_str, root_dir, system_dir, cluster_bits),
+ TP_STRUCT__entry(
+ __string(label, label)
+ __string(uuid_str, uuid_str)
+ __field(unsigned long long, root_dir)
+ __field(unsigned long long, system_dir)
+ __field(int, cluster_bits)
+ ),
+ TP_fast_assign(
+ __assign_str(label, label);
+ __assign_str(uuid_str, uuid_str);
+ __entry->root_dir = root_dir;
+ __entry->system_dir = system_dir;
+ __entry->cluster_bits = cluster_bits;
+ ),
+ TP_printk("%s %s %llu %llu %d", __get_str(label), __get_str(uuid_str),
+ __entry->root_dir, __entry->system_dir, __entry->cluster_bits)
+);
+
+/* End of trace events for fs/ocfs2/super.c. */
+
+/* Trace events for fs/ocfs2/xattr.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_xattr_block);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_xattr_extend_allocation);
+
+TRACE_EVENT(ocfs2_init_xattr_set_ctxt,
+ TP_PROTO(const char *name, int meta, int clusters, int credits),
+ TP_ARGS(name, meta, clusters, credits),
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, meta)
+ __field(int, clusters)
+ __field(int, credits)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->meta = meta;
+ __entry->clusters = clusters;
+ __entry->credits = credits;
+ ),
+ TP_printk("%s %d %d %d", __get_str(name), __entry->meta,
+ __entry->clusters, __entry->credits)
+);
+
+DECLARE_EVENT_CLASS(ocfs2__xattr_find,
+ TP_PROTO(unsigned long long ino, const char *name, int name_index,
+ unsigned int hash, unsigned long long location,
+ int xe_index),
+ TP_ARGS(ino, name, name_index, hash, location, xe_index),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __string(name, name)
+ __field(int, name_index)
+ __field(unsigned int, hash)
+ __field(unsigned long long, location)
+ __field(int, xe_index)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __assign_str(name, name);
+ __entry->name_index = name_index;
+ __entry->hash = hash;
+ __entry->location = location;
+ __entry->xe_index = xe_index;
+ ),
+ TP_printk("%llu %s %d %u %llu %d", __entry->ino, __get_str(name),
+ __entry->name_index, __entry->hash, __entry->location,
+ __entry->xe_index)
+);
+
+#define DEFINE_OCFS2_XATTR_FIND_EVENT(name) \
+DEFINE_EVENT(ocfs2__xattr_find, name, \
+TP_PROTO(unsigned long long ino, const char *name, int name_index, \
+ unsigned int hash, unsigned long long bucket, \
+ int xe_index), \
+ TP_ARGS(ino, name, name_index, hash, bucket, xe_index))
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_bucket_find);
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find);
+
+DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find_rec);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_iterate_xattr_buckets);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_iterate_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cp_xattr_block_to_bucket_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cp_xattr_block_to_bucket_end);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block_begin);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_defrag_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_bucket_cross_cluster);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_divide_xattr_bucket_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_divide_xattr_bucket_move);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_cp_xattr_bucket);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_buckets);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_adjust_xattr_cross_cluster);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_add_new_xattr_cluster);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_insert);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_extend_xattr_bucket);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_add_new_xattr_bucket);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_xattr_bucket_value_truncate);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_rm_xattr_cluster);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_header);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_create_empty_xattr_block);
+
+DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_bucket);
+
+DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_index_block);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_xattr_bucket_value_refcount);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_reflink_xattr_buckets);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_rec);
+
+/* End of trace events for fs/ocfs2/xattr.c. */
+
+/* Trace events for fs/ocfs2/reservations.c. */
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_insert);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_begin);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_end);
+
+TRACE_EVENT(ocfs2_resv_find_window_begin,
+ TP_PROTO(unsigned int r_start, unsigned int r_end, unsigned int goal,
+ unsigned int wanted, int empty_root),
+ TP_ARGS(r_start, r_end, goal, wanted, empty_root),
+ TP_STRUCT__entry(
+ __field(unsigned int, r_start)
+ __field(unsigned int, r_end)
+ __field(unsigned int, goal)
+ __field(unsigned int, wanted)
+ __field(int, empty_root)
+ ),
+ TP_fast_assign(
+ __entry->r_start = r_start;
+ __entry->r_end = r_end;
+ __entry->goal = goal;
+ __entry->wanted = wanted;
+ __entry->empty_root = empty_root;
+ ),
+ TP_printk("%u %u %u %u %d", __entry->r_start, __entry->r_end,
+ __entry->goal, __entry->wanted, __entry->empty_root)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_find_window_prev);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_resv_find_window_next);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cannibalize_resv_begin);
+
+TRACE_EVENT(ocfs2_cannibalize_resv_end,
+ TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(start, end, len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, start)
+ __field(unsigned int, end)
+ __field(unsigned int, len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->len = len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
+ __entry->len, __entry->last_start, __entry->last_len)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_resv_bits);
+
+TRACE_EVENT(ocfs2_resmap_claimed_bits_begin,
+ TP_PROTO(unsigned int cstart, unsigned int cend, unsigned int clen,
+ unsigned int r_start, unsigned int r_end, unsigned int r_len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(cstart, cend, clen, r_start, r_end,
+ r_len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, cstart)
+ __field(unsigned int, cend)
+ __field(unsigned int, clen)
+ __field(unsigned int, r_start)
+ __field(unsigned int, r_end)
+ __field(unsigned int, r_len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->cstart = cstart;
+ __entry->cend = cend;
+ __entry->clen = clen;
+ __entry->r_start = r_start;
+ __entry->r_end = r_end;
+ __entry->r_len = r_len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u %u %u %u",
+ __entry->cstart, __entry->cend, __entry->clen,
+ __entry->r_start, __entry->r_end, __entry->r_len,
+ __entry->last_start, __entry->last_len)
+);
+
+TRACE_EVENT(ocfs2_resmap_claimed_bits_end,
+ TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
+ unsigned int last_start, unsigned int last_len),
+ TP_ARGS(start, end, len, last_start, last_len),
+ TP_STRUCT__entry(
+ __field(unsigned int, start)
+ __field(unsigned int, end)
+ __field(unsigned int, len)
+ __field(unsigned int, last_start)
+ __field(unsigned int, last_len)
+ ),
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->len = len;
+ __entry->last_start = last_start;
+ __entry->last_len = last_len;
+ ),
+ TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
+ __entry->len, __entry->last_start, __entry->last_len)
+);
+
+/* End of trace events for fs/ocfs2/reservations.c. */
+
+/* Trace events for fs/ocfs2/quota_local.c. */
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_recover_local_quota_file);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_finish_quota_recovery);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(olq_set_dquot);
+
+/* End of trace events for fs/ocfs2/quota_local.c. */
+
+/* Trace events for fs/ocfs2/quota_global.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_quota_block);
+
+TRACE_EVENT(ocfs2_sync_dquot,
+ TP_PROTO(unsigned int dq_id, long long dqb_curspace,
+ long long spacechange, long long curinodes,
+ long long inodechange),
+ TP_ARGS(dq_id, dqb_curspace, spacechange, curinodes, inodechange),
+ TP_STRUCT__entry(
+ __field(unsigned int, dq_id)
+ __field(long long, dqb_curspace)
+ __field(long long, spacechange)
+ __field(long long, curinodes)
+ __field(long long, inodechange)
+ ),
+ TP_fast_assign(
+ __entry->dq_id = dq_id;
+ __entry->dqb_curspace = dqb_curspace;
+ __entry->spacechange = spacechange;
+ __entry->curinodes = curinodes;
+ __entry->inodechange = inodechange;
+ ),
+ TP_printk("%u %lld %lld %lld %lld", __entry->dq_id,
+ __entry->dqb_curspace, __entry->spacechange,
+ __entry->curinodes, __entry->inodechange)
+);
+
+TRACE_EVENT(ocfs2_sync_dquot_helper,
+ TP_PROTO(unsigned int dq_id, unsigned int dq_type, unsigned long type,
+ const char *s_id),
+ TP_ARGS(dq_id, dq_type, type, s_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, dq_id)
+ __field(unsigned int, dq_type)
+ __field(unsigned long, type)
+ __string(s_id, s_id)
+ ),
+ TP_fast_assign(
+ __entry->dq_id = dq_id;
+ __entry->dq_type = dq_type;
+ __entry->type = type;
+ __assign_str(s_id, s_id);
+ ),
+ TP_printk("%u %u %lu %s", __entry->dq_id, __entry->dq_type,
+ __entry->type, __get_str(s_id))
+);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_write_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot);
+
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty);
+
+/* End of trace events for fs/ocfs2/quota_global.c. */
+
+/* Trace events for fs/ocfs2/dir.c. */
+DEFINE_OCFS2_INT_EVENT(ocfs2_search_dirblock);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_dir_block);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_find_entry_el);
+
+TRACE_EVENT(ocfs2_dx_dir_search,
+ TP_PROTO(unsigned long long ino, int namelen, const char *name,
+ unsigned int major_hash, unsigned int minor_hash,
+ unsigned long long blkno),
+ TP_ARGS(ino, namelen, name, major_hash, minor_hash, blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(int, namelen)
+ __string(name, name)
+ __field(unsigned int, major_hash)
+ __field(unsigned int,minor_hash)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->major_hash = major_hash;
+ __entry->minor_hash = minor_hash;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%llu %.*s %u %u %llu", __entry->ino,
+ __entry->namelen, __get_str(name),
+ __entry->major_hash, __entry->minor_hash, __entry->blkno)
+);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_dx_dir_search_leaf_info);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_delete_entry_dx);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_readdir);
+
+TRACE_EVENT(ocfs2_find_files_on_disk,
+ TP_PROTO(int namelen, const char *name, void *blkno,
+ unsigned long long dir),
+ TP_ARGS(namelen, name, blkno, dir),
+ TP_STRUCT__entry(
+ __field(int, namelen)
+ __string(name, name)
+ __field(void *, blkno)
+ __field(unsigned long long, dir)
+ ),
+ TP_fast_assign(
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->blkno = blkno;
+ __entry->dir = dir;
+ ),
+ TP_printk("%.*s %p %llu", __entry->namelen, __get_str(name),
+ __entry->blkno, __entry->dir)
+);
+
+TRACE_EVENT(ocfs2_check_dir_for_entry,
+ TP_PROTO(unsigned long long dir, int namelen, const char *name),
+ TP_ARGS(dir, namelen, name),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __field(int, namelen)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ ),
+ TP_printk("%llu %.*s", __entry->dir,
+ __entry->namelen, __get_str(name))
+);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_dx_dir_attach_index);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_format_cluster);
+
+TRACE_EVENT(ocfs2_dx_dir_index_root_block,
+ TP_PROTO(unsigned long long dir,
+ unsigned int major_hash, unsigned int minor_hash,
+ int namelen, const char *name, unsigned int num_used),
+ TP_ARGS(dir, major_hash, minor_hash, namelen, name, num_used),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __field(unsigned int, major_hash)
+ __field(unsigned int, minor_hash)
+ __field(int, namelen)
+ __string(name, name)
+ __field(unsigned int, num_used)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->major_hash = major_hash;
+ __entry->minor_hash = minor_hash;
+ __entry->namelen = namelen;
+ __assign_str(name, name);
+ __entry->num_used = num_used;
+ ),
+ TP_printk("%llu %x %x %.*s %u", __entry->dir,
+ __entry->major_hash, __entry->minor_hash,
+ __entry->namelen, __get_str(name), __entry->num_used)
+);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_extend_dir);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_rebalance);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_dx_dir_rebalance_split);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_prepare_dir_for_insert);
+
+/* End of trace events for fs/ocfs2/dir.c. */
+
+/* Trace events for fs/ocfs2/namei.c. */
+
+DECLARE_EVENT_CLASS(ocfs2__dentry_ops,
+ TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
+ unsigned long long dir_blkno, unsigned long long extra),
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(unsigned long long, dir_blkno)
+ __field(unsigned long long, extra)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->dir_blkno = dir_blkno;
+ __entry->extra = extra;
+ ),
+ TP_printk("%p %p %.*s %llu %llu", __entry->dir, __entry->dentry,
+ __entry->name_len, __get_str(name),
+ __entry->dir_blkno, __entry->extra)
+);
+
+#define DEFINE_OCFS2_DENTRY_OPS(name) \
+DEFINE_EVENT(ocfs2__dentry_ops, name, \
+TP_PROTO(void *dir, void *dentry, int name_len, const char *name, \
+ unsigned long long dir_blkno, unsigned long long extra), \
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra))
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_lookup);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_mkdir);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_create);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_unlink);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_symlink_create);
+
+DEFINE_OCFS2_DENTRY_OPS(ocfs2_mv_orphaned_inode_to_new);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_lookup_ret);
+
+TRACE_EVENT(ocfs2_mknod,
+ TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
+ unsigned long long dir_blkno, unsigned long dev, int mode),
+ TP_ARGS(dir, dentry, name_len, name, dir_blkno, dev, mode),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(unsigned long long, dir_blkno)
+ __field(unsigned long, dev)
+ __field(int, mode)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->dir_blkno = dir_blkno;
+ __entry->dev = dev;
+ __entry->mode = mode;
+ ),
+ TP_printk("%p %p %.*s %llu %lu %d", __entry->dir, __entry->dentry,
+ __entry->name_len, __get_str(name),
+ __entry->dir_blkno, __entry->dev, __entry->mode)
+);
+
+TRACE_EVENT(ocfs2_link,
+ TP_PROTO(unsigned long long ino, int old_len, const char *old_name,
+ int name_len, const char *name),
+ TP_ARGS(ino, old_len, old_name, name_len, name),
+ TP_STRUCT__entry(
+ __field(unsigned long long, ino)
+ __field(int, old_len)
+ __string(old_name, old_name)
+ __field(int, name_len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->old_len = old_len;
+ __assign_str(old_name, old_name);
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%llu %.*s %.*s", __entry->ino,
+ __entry->old_len, __get_str(old_name),
+ __entry->name_len, __get_str(name))
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_unlink_noent);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock_end);
+
+TRACE_EVENT(ocfs2_rename,
+ TP_PROTO(void *old_dir, void *old_dentry,
+ void *new_dir, void *new_dentry,
+ int old_len, const char *old_name,
+ int new_len, const char *new_name),
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry,
+ old_len, old_name, new_len, new_name),
+ TP_STRUCT__entry(
+ __field(void *, old_dir)
+ __field(void *, old_dentry)
+ __field(void *, new_dir)
+ __field(void *, new_dentry)
+ __field(int, old_len)
+ __string(old_name, old_name)
+ __field(int, new_len)
+ __string(new_name, new_name)
+ ),
+ TP_fast_assign(
+ __entry->old_dir = old_dir;
+ __entry->old_dentry = old_dentry;
+ __entry->new_dir = new_dir;
+ __entry->new_dentry = new_dentry;
+ __entry->old_len = old_len;
+ __assign_str(old_name, old_name);
+ __entry->new_len = new_len;
+ __assign_str(new_name, new_name);
+ ),
+ TP_printk("%p %p %p %p %.*s %.*s",
+ __entry->old_dir, __entry->old_dentry,
+ __entry->new_dir, __entry->new_dentry,
+ __entry->old_len, __get_str(old_name),
+ __entry->new_len, __get_str(new_name))
+);
+
+TRACE_EVENT(ocfs2_rename_target_exists,
+ TP_PROTO(int new_len, const char *new_name),
+ TP_ARGS(new_len, new_name),
+ TP_STRUCT__entry(
+ __field(int, new_len)
+ __string(new_name, new_name)
+ ),
+ TP_fast_assign(
+ __entry->new_len = new_len;
+ __assign_str(new_name, new_name);
+ ),
+ TP_printk("%.*s", __entry->new_len, __get_str(new_name))
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_rename_disagree);
+
+TRACE_EVENT(ocfs2_rename_over_existing,
+ TP_PROTO(unsigned long long new_blkno, void *new_bh,
+ unsigned long long newdi_blkno),
+ TP_ARGS(new_blkno, new_bh, newdi_blkno),
+ TP_STRUCT__entry(
+ __field(unsigned long long, new_blkno)
+ __field(void *, new_bh)
+ __field(unsigned long long, newdi_blkno)
+ ),
+ TP_fast_assign(
+ __entry->new_blkno = new_blkno;
+ __entry->new_bh = new_bh;
+ __entry->newdi_blkno = newdi_blkno;
+ ),
+ TP_printk("%llu %p %llu", __entry->new_blkno, __entry->new_bh,
+ __entry->newdi_blkno)
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_create_symlink_data);
+
+TRACE_EVENT(ocfs2_symlink_begin,
+ TP_PROTO(void *dir, void *dentry, const char *symname,
+ int len, const char *name),
+ TP_ARGS(dir, dentry, symname, len, name),
+ TP_STRUCT__entry(
+ __field(void *, dir)
+ __field(void *, dentry)
+ __field(const char *, symname)
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __entry->dentry = dentry;
+ __entry->symname = symname;
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%p %p %s %.*s", __entry->dir, __entry->dentry,
+ __entry->symname, __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_blkno_stringify,
+ TP_PROTO(unsigned long long blkno, const char *name, int namelen),
+ TP_ARGS(blkno, name, namelen),
+ TP_STRUCT__entry(
+ __field(unsigned long long, blkno)
+ __string(name, name)
+ __field(int, namelen)
+ ),
+ TP_fast_assign(
+ __entry->blkno = blkno;
+ __assign_str(name, name);
+ __entry->namelen = namelen;
+ ),
+ TP_printk("%llu %s %d", __entry->blkno, __get_str(name),
+ __entry->namelen)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_add_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_orphan_add_end);
+
+TRACE_EVENT(ocfs2_orphan_del,
+ TP_PROTO(unsigned long long dir, const char *name, int namelen),
+ TP_ARGS(dir, name, namelen),
+ TP_STRUCT__entry(
+ __field(unsigned long long, dir)
+ __string(name, name)
+ __field(int, namelen)
+ ),
+ TP_fast_assign(
+ __entry->dir = dir;
+ __assign_str(name, name);
+ __entry->namelen = namelen;
+ ),
+ TP_printk("%llu %s %d", __entry->dir, __get_str(name),
+ __entry->namelen)
+);
+
+/* End of trace events for fs/ocfs2/namei.c. */
+
+/* Trace events for fs/ocfs2/dcache.c. */
+
+TRACE_EVENT(ocfs2_dentry_revalidate,
+ TP_PROTO(void *dentry, int len, const char *name),
+ TP_ARGS(dentry, len, name),
+ TP_STRUCT__entry(
+ __field(void *, dentry)
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->dentry = dentry;
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%p %.*s", __entry->dentry, __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_dentry_revalidate_negative,
+ TP_PROTO(int len, const char *name, unsigned long pgen,
+ unsigned long gen),
+ TP_ARGS(len, name, pgen, gen),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long, pgen)
+ __field(unsigned long, gen)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->pgen = pgen;
+ __entry->gen = gen;
+ ),
+ TP_printk("%.*s %lu %lu", __entry->len, __get_str(name),
+ __entry->pgen, __entry->gen)
+);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_delete);
+
+DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_dentry_revalidate_orphaned);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_nofsdata);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_dentry_revalidate_ret);
+
+TRACE_EVENT(ocfs2_find_local_alias,
+ TP_PROTO(int len, const char *name),
+ TP_ARGS(len, name),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ ),
+ TP_printk("%.*s", __entry->len, __get_str(name))
+);
+
+TRACE_EVENT(ocfs2_dentry_attach_lock,
+ TP_PROTO(int len, const char *name,
+ unsigned long long parent, void *fsdata),
+ TP_ARGS(len, name, parent, fsdata),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long long, parent)
+ __field(void *, fsdata)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->parent = parent;
+ __entry->fsdata = fsdata;
+ ),
+ TP_printk("%.*s %llu %p", __entry->len, __get_str(name),
+ __entry->parent, __entry->fsdata)
+);
+
+TRACE_EVENT(ocfs2_dentry_attach_lock_found,
+ TP_PROTO(const char *name, unsigned long long parent,
+ unsigned long long ino),
+ TP_ARGS(name, parent, ino),
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned long long, parent)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->parent = parent;
+ __entry->ino = ino;
+ ),
+ TP_printk("%s %llu %llu", __get_str(name), __entry->parent, __entry->ino)
+);
+/* End of trace events for fs/ocfs2/dcache.c. */
+
+/* Trace events for fs/ocfs2/export.c. */
+
+TRACE_EVENT(ocfs2_get_dentry_begin,
+ TP_PROTO(void *sb, void *handle, unsigned long long blkno),
+ TP_ARGS(sb, handle, blkno),
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, handle)
+ __field(unsigned long long, blkno)
+ ),
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->handle = handle;
+ __entry->blkno = blkno;
+ ),
+ TP_printk("%p %p %llu", __entry->sb, __entry->handle, __entry->blkno)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_get_dentry_test_bit);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_get_dentry_stale);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_get_dentry_generation);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_dentry_end);
+
+TRACE_EVENT(ocfs2_get_parent,
+ TP_PROTO(void *child, int len, const char *name,
+ unsigned long long ino),
+ TP_ARGS(child, len, name, ino),
+ TP_STRUCT__entry(
+ __field(void *, child)
+ __field(int, len)
+ __string(name, name)
+ __field(unsigned long long, ino)
+ ),
+ TP_fast_assign(
+ __entry->child = child;
+ __entry->len = len;
+ __assign_str(name, name);
+ __entry->ino = ino;
+ ),
+ TP_printk("%p %.*s %llu", __entry->child, __entry->len,
+ __get_str(name), __entry->ino)
+);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_parent_end);
+
+TRACE_EVENT(ocfs2_encode_fh_begin,
+ TP_PROTO(void *dentry, int name_len, const char *name,
+ void *fh, int len, int connectable),
+ TP_ARGS(dentry, name_len, name, fh, len, connectable),
+ TP_STRUCT__entry(
+ __field(void *, dentry)
+ __field(int, name_len)
+ __string(name, name)
+ __field(void *, fh)
+ __field(int, len)
+ __field(int, connectable)
+ ),
+ TP_fast_assign(
+ __entry->dentry = dentry;
+ __entry->name_len = name_len;
+ __assign_str(name, name);
+ __entry->fh = fh;
+ __entry->len = len;
+ __entry->connectable = connectable;
+ ),
+ TP_printk("%p %.*s %p %d %d", __entry->dentry, __entry->name_len,
+ __get_str(name), __entry->fh, __entry->len,
+ __entry->connectable)
+);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_self);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_parent);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_encode_fh_type);
+
+/* End of trace events for fs/ocfs2/export.c. */
+
+/* Trace events for fs/ocfs2/journal.c. */
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_commit_cache_begin);
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_journal_init);
+
+DEFINE_OCFS2_UINT_EVENT(ocfs2_journal_init_maxlen);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_journal_shutdown);
+
+DEFINE_OCFS2_POINTER_EVENT(ocfs2_journal_shutdown_wait);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_complete_recovery);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_complete_recovery_end);
+
+TRACE_EVENT(ocfs2_complete_recovery_slot,
+ TP_PROTO(int slot, unsigned long long la_ino,
+ unsigned long long tl_ino, void *qrec),
+ TP_ARGS(slot, la_ino, tl_ino, qrec),
+ TP_STRUCT__entry(
+ __field(int, slot)
+ __field(unsigned long long, la_ino)
+ __field(unsigned long long, tl_ino)
+ __field(void *, qrec)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->la_ino = la_ino;
+ __entry->tl_ino = tl_ino;
+ __entry->qrec = qrec;
+ ),
+ TP_printk("%d %llu %llu %p", __entry->slot, __entry->la_ino,
+ __entry->tl_ino, __entry->qrec)
+);
+
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_recovery_thread_node);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_recovery_thread_end);
+
+TRACE_EVENT(ocfs2_recovery_thread,
+ TP_PROTO(int node_num, int osb_node_num, int disable,
+ void *recovery_thread, int map_set),
+ TP_ARGS(node_num, osb_node_num, disable, recovery_thread, map_set),
+ TP_STRUCT__entry(
+ __field(int, node_num)
+ __field(int, osb_node_num)
+ __field(int,disable)
+ __field(void *, recovery_thread)
+ __field(int,map_set)
+ ),
+ TP_fast_assign(
+ __entry->node_num = node_num;
+ __entry->osb_node_num = osb_node_num;
+ __entry->disable = disable;
+ __entry->recovery_thread = recovery_thread;
+ __entry->map_set = map_set;
+ ),
+ TP_printk("%d %d %d %p %d", __entry->node_num,
+ __entry->osb_node_num, __entry->disable,
+ __entry->recovery_thread, __entry->map_set)
+);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_replay_journal_recovered);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_lock_err);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_skip);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_recover_node);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_recover_node_skip);
+
+DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_mark_dead_nodes);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_begin);
+
+DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_end);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_filldir);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_recover_orphans);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_recover_orphans_iput);
+
+DEFINE_OCFS2_INT_EVENT(ocfs2_wait_on_mount);
+
+/* End of trace events for fs/ocfs2/journal.c. */
+
+/* Trace events for fs/ocfs2/buffer_head_io.c. */
+
+DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_read_blocks_sync);
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_read_blocks_sync_jbd);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_read_blocks_from_disk);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_bh);
+
+DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_end);
+
+TRACE_EVENT(ocfs2_write_block,
+ TP_PROTO(unsigned long long block, void *ci),
+ TP_ARGS(block, ci),
+ TP_STRUCT__entry(
+ __field(unsigned long long, block)
+ __field(void *, ci)
+ ),
+ TP_fast_assign(
+ __entry->block = block;
+ __entry->ci = ci;
+ ),
+ TP_printk("%llu %p", __entry->block, __entry->ci)
+);
+
+TRACE_EVENT(ocfs2_read_blocks_begin,
+ TP_PROTO(void *ci, unsigned long long block,
+ unsigned int nr, int flags),
+ TP_ARGS(ci, block, nr, flags),
+ TP_STRUCT__entry(
+ __field(void *, ci)
+ __field(unsigned long long, block)
+ __field(unsigned int, nr)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->ci = ci;
+ __entry->block = block;
+ __entry->nr = nr;
+ __entry->flags = flags;
+ ),
+ TP_printk("%p %llu %u %d", __entry->ci, __entry->block,
+ __entry->nr, __entry->flags)
+);
+
+/* End of trace events for fs/ocfs2/buffer_head_io.c. */
+
+/* Trace events for fs/ocfs2/uptodate.c. */
+
+DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_copied_metadata_tree);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_metadata_cache_purge);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_buffer_cached_begin);
+
+TRACE_EVENT(ocfs2_buffer_cached_end,
+ TP_PROTO(int index, void *item),
+ TP_ARGS(index, item),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(void *, item)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->item = item;
+ ),
+ TP_printk("%d %p", __entry->index, __entry->item)
+);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_append_cache_array);
+
+DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_insert_cache_tree);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_expand_cache);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_set_buffer_uptodate);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_set_buffer_uptodate_begin);
+
+DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_remove_metadata_array);
+
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_remove_metadata_tree);
+
+DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_remove_block_from_cache);
+
+/* End of trace events for fs/ocfs2/uptodate.c. */
+#endif /* _TRACE_OCFS2_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE ocfs2_trace
+#include <trace/define_trace.h>
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index a73f64166481..92fcd575775a 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -11,7 +11,6 @@
#include <linux/writeback.h>
#include <linux/workqueue.h>
-#define MLOG_MASK_PREFIX ML_QUOTA
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
@@ -27,6 +26,7 @@
#include "super.h"
#include "buffer_head_io.h"
#include "quota.h"
+#include "ocfs2_trace.h"
/*
* Locking of quotas with OCFS2 is rather complex. Here are rules that
@@ -130,8 +130,7 @@ int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
- mlog(0, "Validating quota block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -341,8 +340,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
u64 pcount;
int status;
- mlog_entry_void();
-
/* Read global header */
gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
@@ -402,7 +399,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
@@ -508,9 +506,10 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
olditime = dquot->dq_dqb.dqb_itime;
oldbtime = dquot->dq_dqb.dqb_btime;
ocfs2_global_disk2memdqb(dquot, &dqblk);
- mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
- dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
- dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
+ trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace,
+ (long long)spacechange,
+ dquot->dq_dqb.dqb_curinodes,
+ (long long)inodechange);
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curspace += spacechange;
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
@@ -557,7 +556,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
spin_unlock(&dq_data_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
- mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
+ mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
" (type=%d, id=%u)\n", dquot->dq_type,
(unsigned)dquot->dq_id);
goto out;
@@ -594,8 +593,8 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
struct ocfs2_super *osb = OCFS2_SB(sb);
int status = 0;
- mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
- dquot->dq_type, type, sb->s_id);
+ trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type,
+ type, sb->s_id);
if (type != dquot->dq_type)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
@@ -621,7 +620,6 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
return status;
}
@@ -647,7 +645,7 @@ static int ocfs2_write_dquot(struct dquot *dquot)
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
- mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
+ trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type);
handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
if (IS_ERR(handle)) {
@@ -660,7 +658,6 @@ static int ocfs2_write_dquot(struct dquot *dquot)
mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out:
- mlog_exit(status);
return status;
}
@@ -686,7 +683,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
- mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
+ trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
@@ -722,7 +719,8 @@ out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
mutex_unlock(&dquot->dq_lock);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -743,7 +741,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
handle_t *handle;
- mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+ trace_ocfs2_acquire_dquot(dquot->dq_id, type);
mutex_lock(&dquot->dq_lock);
/*
* We need an exclusive lock, because we're going to update use count
@@ -809,7 +807,8 @@ out_dq:
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
mutex_unlock(&dquot->dq_lock);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -829,7 +828,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(sb);
- mlog_entry("id=%u, type=%d", dquot->dq_id, type);
+ trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type);
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
@@ -866,7 +865,8 @@ out_dlock:
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -877,8 +877,6 @@ static int ocfs2_write_info(struct super_block *sb, int type)
int status = 0;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
- mlog_entry_void();
-
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
@@ -893,7 +891,8 @@ static int ocfs2_write_info(struct super_block *sb, int type)
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc78764ccc4c..dc8007fc9247 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -8,7 +8,6 @@
#include <linux/quotaops.h>
#include <linux/module.h>
-#define MLOG_MASK_PREFIX ML_QUOTA
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
@@ -23,6 +22,7 @@
#include "quota.h"
#include "uptodate.h"
#include "super.h"
+#include "ocfs2_trace.h"
/* Number of local quota structures per block */
static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -475,7 +475,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
struct ocfs2_recovery_chunk *rchunk, *next;
qsize_t spacechange, inodechange;
- mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type);
+ trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type);
list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
chunk = rchunk->rc_chunk;
@@ -575,7 +575,8 @@ out_put_bh:
}
if (status < 0)
free_recovery_list(&(rec->r_list[type]));
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -600,7 +601,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
- mlog(0, "Recovering quota in slot %d\n", slot_num);
+ trace_ocfs2_finish_quota_recovery(slot_num);
lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
if (!lqinode) {
status = -ENOENT;
@@ -882,9 +883,10 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
od->dq_originodes);
spin_unlock(&dq_data_lock);
- mlog(0, "Writing local dquot %u space %lld inodes %lld\n",
- od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod),
- (long long)le64_to_cpu(dqblk->dqb_inodemod));
+ trace_olq_set_dquot(
+ (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod),
+ (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod),
+ od->dq_dquot.dq_id);
}
/* Write dquot to local quota file */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index c384d634872a..5d32749c896d 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -16,7 +16,6 @@
*/
#include <linux/sort.h>
-#define MLOG_MASK_PREFIX ML_REFCOUNT
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
@@ -34,6 +33,7 @@
#include "aops.h"
#include "xattr.h"
#include "namei.h"
+#include "ocfs2_trace.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -84,8 +84,7 @@ static int ocfs2_validate_refcount_block(struct super_block *sb,
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)bh->b_data;
- mlog(0, "Validating refcount block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -545,8 +544,8 @@ void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
while ((node = rb_last(root)) != NULL) {
tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
- mlog(0, "Purge tree %llu\n",
- (unsigned long long) tree->rf_blkno);
+ trace_ocfs2_purge_refcount_trees(
+ (unsigned long long) tree->rf_blkno);
rb_erase(&tree->rf_node, root);
ocfs2_free_refcount_tree(tree);
@@ -575,7 +574,8 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
- mlog(0, "create tree for inode %lu\n", inode->i_ino);
+ trace_ocfs2_create_refcount_tree(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
@@ -646,8 +646,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
di->i_refcount_loc = cpu_to_le64(first_blkno);
spin_unlock(&oi->ip_lock);
- mlog(0, "created tree for inode %lu, refblock %llu\n",
- inode->i_ino, (unsigned long long)first_blkno);
+ trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
ocfs2_journal_dirty(handle, di_bh);
@@ -1256,8 +1255,9 @@ static int ocfs2_change_refcount_rec(handle_t *handle,
goto out;
}
- mlog(0, "change index %d, old count %u, change %d\n", index,
- le32_to_cpu(rec->r_refcount), change);
+ trace_ocfs2_change_refcount_rec(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ index, le32_to_cpu(rec->r_refcount), change);
le32_add_cpu(&rec->r_refcount, change);
if (!rec->r_refcount) {
@@ -1353,8 +1353,8 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
ocfs2_journal_dirty(handle, ref_root_bh);
- mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
- le16_to_cpu(new_rb->rf_records.rl_used));
+ trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
+ le16_to_cpu(new_rb->rf_records.rl_used));
*ref_leaf_bh = new_bh;
new_bh = NULL;
@@ -1466,9 +1466,9 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
(struct ocfs2_refcount_block *)new_bh->b_data;
struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
- mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
- (unsigned long long)ref_leaf_bh->b_blocknr,
- le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+ trace_ocfs2_divide_leaf_refcount_block(
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+ le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
/*
* XXX: Improvement later.
@@ -1601,8 +1601,8 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
- mlog(0, "insert new leaf block %llu at %u\n",
- (unsigned long long)new_bh->b_blocknr, new_cpos);
+ trace_ocfs2_new_leaf_refcount_block(
+ (unsigned long long)new_bh->b_blocknr, new_cpos);
/* Insert the new leaf block with the specific offset cpos. */
ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
@@ -1794,11 +1794,10 @@ static int ocfs2_insert_refcount_rec(handle_t *handle,
(le16_to_cpu(rf_list->rl_used) - index) *
sizeof(struct ocfs2_refcount_rec));
- mlog(0, "insert refcount record start %llu, len %u, count %u "
- "to leaf block %llu at index %d\n",
- (unsigned long long)le64_to_cpu(rec->r_cpos),
- le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
- (unsigned long long)ref_leaf_bh->b_blocknr, index);
+ trace_ocfs2_insert_refcount_rec(
+ (unsigned long long)ref_leaf_bh->b_blocknr, index,
+ (unsigned long long)le64_to_cpu(rec->r_cpos),
+ le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
rf_list->rl_recs[index] = *rec;
@@ -1850,10 +1849,12 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
- mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
- le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
- le64_to_cpu(split_rec->r_cpos),
- le32_to_cpu(split_rec->r_clusters));
+ trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
+ le32_to_cpu(orig_rec->r_clusters),
+ le32_to_cpu(orig_rec->r_refcount),
+ le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters),
+ le32_to_cpu(split_rec->r_refcount));
/*
* If we just need to split the header or tail clusters,
@@ -1967,12 +1968,11 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
if (split_rec->r_refcount) {
rf_list->rl_recs[index] = *split_rec;
- mlog(0, "insert refcount record start %llu, len %u, count %u "
- "to leaf block %llu at index %d\n",
- (unsigned long long)le64_to_cpu(split_rec->r_cpos),
- le32_to_cpu(split_rec->r_clusters),
- le32_to_cpu(split_rec->r_refcount),
- (unsigned long long)ref_leaf_bh->b_blocknr, index);
+ trace_ocfs2_split_refcount_rec_insert(
+ (unsigned long long)ref_leaf_bh->b_blocknr, index,
+ (unsigned long long)le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters),
+ le32_to_cpu(split_rec->r_refcount));
if (merge)
ocfs2_refcount_rec_merge(rb, index);
@@ -1997,7 +1997,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_refcount_rec rec;
unsigned int set_len = 0;
- mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
+ trace_ocfs2_increase_refcount_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
@@ -2024,9 +2024,9 @@ static int __ocfs2_increase_refcount(handle_t *handle,
*/
if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
set_len <= len) {
- mlog(0, "increase refcount rec, start %llu, len %u, "
- "count %u\n", (unsigned long long)cpos, set_len,
- le32_to_cpu(rec.r_refcount));
+ trace_ocfs2_increase_refcount_change(
+ (unsigned long long)cpos, set_len,
+ le32_to_cpu(rec.r_refcount));
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index,
merge, 1);
@@ -2037,7 +2037,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
} else if (!rec.r_refcount) {
rec.r_refcount = cpu_to_le32(1);
- mlog(0, "insert refcount rec, start %llu, len %u\n",
+ trace_ocfs2_increase_refcount_insert(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len);
ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
@@ -2055,8 +2055,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
rec.r_clusters = cpu_to_le32(set_len);
le32_add_cpu(&rec.r_refcount, 1);
- mlog(0, "split refcount rec, start %llu, "
- "len %u, count %u\n",
+ trace_ocfs2_increase_refcount_split(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len, le32_to_cpu(rec.r_refcount));
ret = ocfs2_split_refcount_rec(handle, ci,
@@ -2095,6 +2094,11 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
BUG_ON(rb->rf_records.rl_used);
+ trace_ocfs2_remove_refcount_extent(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+ le32_to_cpu(rb->rf_cpos));
+
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1, meta_ac, dealloc);
@@ -2137,7 +2141,7 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
if (!rb->rf_list.l_next_free_rec) {
BUG_ON(rb->rf_clusters);
- mlog(0, "reset refcount tree root %llu to be a record block.\n",
+ trace_ocfs2_restore_refcount_block(
(unsigned long long)ref_root_bh->b_blocknr);
rb->rf_flags = 0;
@@ -2184,6 +2188,10 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
BUG_ON(cpos + len >
le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
+ trace_ocfs2_decrease_refcount_rec(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len);
+
if (cpos == le64_to_cpu(rec->r_cpos) &&
len == le32_to_cpu(rec->r_clusters))
ret = ocfs2_change_refcount_rec(handle, ci,
@@ -2195,12 +2203,6 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
le32_add_cpu(&split.r_refcount, -1);
- mlog(0, "split refcount rec, start %llu, "
- "len %u, count %u, original start %llu, len %u\n",
- (unsigned long long)le64_to_cpu(split.r_cpos),
- len, le32_to_cpu(split.r_refcount),
- (unsigned long long)le64_to_cpu(rec->r_cpos),
- le32_to_cpu(rec->r_clusters));
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&split, index, 1,
@@ -2239,10 +2241,9 @@ static int __ocfs2_decrease_refcount(handle_t *handle,
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *ref_leaf_bh = NULL;
- mlog(0, "Tree owner %llu, decrease refcount start %llu, "
- "len %u, delete %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)cpos, len, delete);
+ trace_ocfs2_decrease_refcount(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len, delete);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
@@ -2352,8 +2353,8 @@ static int ocfs2_mark_extent_refcounted(struct inode *inode,
{
int ret;
- mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
- inode->i_ino, cpos, len, phys);
+ trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
+ cpos, len, phys);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
@@ -2392,8 +2393,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
u32 len;
- mlog(0, "start_cpos %llu, clusters %u\n",
- (unsigned long long)start_cpos, clusters);
while (clusters) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, clusters, &rec,
@@ -2427,12 +2426,11 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
- mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
- "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
- recs_add, (unsigned long long)cpos, clusters,
- (unsigned long long)le64_to_cpu(rec.r_cpos),
- le32_to_cpu(rec.r_clusters),
- le32_to_cpu(rec.r_refcount), index);
+ trace_ocfs2_calc_refcount_meta_credits_iterate(
+ recs_add, (unsigned long long)cpos, clusters,
+ (unsigned long long)le64_to_cpu(rec.r_cpos),
+ le32_to_cpu(rec.r_clusters),
+ le32_to_cpu(rec.r_refcount), index);
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
@@ -2488,7 +2486,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
if (!ref_blocks)
goto out;
- mlog(0, "we need ref_blocks %d\n", ref_blocks);
*meta_add += ref_blocks;
*credits += ref_blocks;
@@ -2514,6 +2511,10 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
}
out:
+
+ trace_ocfs2_calc_refcount_meta_credits(
+ (unsigned long long)start_cpos, clusters,
+ *meta_add, *credits);
brelse(ref_leaf_bh);
brelse(prev_bh);
return ret;
@@ -2578,8 +2579,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
goto out;
}
- mlog(0, "reserve new metadata %d blocks, credits = %d\n",
- *ref_blocks, *credits);
+ trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
out:
brelse(ref_root_bh);
@@ -2886,8 +2886,7 @@ static int ocfs2_lock_refcount_allocators(struct super_block *sb,
goto out;
}
- mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
- meta_add, num_clusters, *credits);
+ trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
meta_ac);
if (ret) {
@@ -2937,8 +2936,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
loff_t offset, end, map_end;
struct address_space *mapping = context->inode->i_mapping;
- mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
- new_cluster, new_len, cpos);
+ trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
+ new_cluster, new_len);
readahead_pages =
(ocfs2_cow_contig_clusters(sb) <<
@@ -3031,8 +3030,8 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
struct buffer_head *old_bh = NULL;
struct buffer_head *new_bh = NULL;
- mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
- new_cluster, new_len);
+ trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
+ new_cluster, new_len);
for (i = 0; i < blocks; i++, old_block++, new_block++) {
new_bh = sb_getblk(osb->sb, new_block);
@@ -3085,8 +3084,8 @@ static int ocfs2_clear_ext_refcount(handle_t *handle,
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
- mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
- (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
+ trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
+ cpos, len, p_cluster, ext_flags);
memset(&replace_rec, 0, sizeof(replace_rec));
replace_rec.e_cpos = cpu_to_le32(cpos);
@@ -3141,8 +3140,8 @@ static int ocfs2_replace_clusters(handle_t *handle,
struct ocfs2_caching_info *ci = context->data_et.et_ci;
u64 ino = ocfs2_metadata_cache_owner(ci);
- mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
- (unsigned long long)ino, cpos, old, new, len, ext_flags);
+ trace_ocfs2_replace_clusters((unsigned long long)ino,
+ cpos, old, new, len, ext_flags);
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
@@ -3236,8 +3235,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
struct ocfs2_refcount_rec rec;
- mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
- cpos, p_cluster, num_clusters, e_flags);
+ trace_ocfs2_make_clusters_writable(cpos, p_cluster,
+ num_clusters, e_flags);
ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
&context->data_et,
@@ -3475,9 +3474,9 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
goto out;
}
- mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
- "cow_len %u\n", inode->i_ino,
- cpos, write_len, cow_start, cow_len);
+ trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
+ cpos, write_len, max_cpos,
+ cow_start, cow_len);
BUG_ON(cow_len == 0);
@@ -3756,8 +3755,7 @@ int ocfs2_add_refcount_flag(struct inode *inode,
goto out;
}
- mlog(0, "reserve new metadata %d, credits = %d\n",
- ref_blocks, credits);
+ trace_ocfs2_add_refcount_flag(ref_blocks, credits);
if (ref_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 3e78db361bc7..41ffd36c689c 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -30,10 +30,10 @@
#include <linux/bitops.h>
#include <linux/list.h>
-#define MLOG_MASK_PREFIX ML_RESERVATIONS
#include <cluster/masklog.h>
#include "ocfs2.h"
+#include "ocfs2_trace.h"
#ifdef CONFIG_OCFS2_DEBUG_FS
#define OCFS2_CHECK_RESERVATIONS
@@ -321,8 +321,7 @@ static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
assert_spin_locked(&resv_lock);
- mlog(0, "Insert reservation start: %u len: %u\n", new->r_start,
- new->r_len);
+ trace_ocfs2_resv_insert(new->r_start, new->r_len);
while (*p) {
parent = *p;
@@ -423,8 +422,8 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
unsigned int best_start, best_len = 0;
int offset, start, found;
- mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n",
- wanted, search_start, search_len, resmap->m_bitmap_len);
+ trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
+ wanted, resmap->m_bitmap_len);
found = best_start = best_len = 0;
@@ -463,7 +462,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
*rlen = best_len;
*rstart = best_start;
- mlog(0, "Found start: %u len: %u\n", best_start, best_len);
+ trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
return *rlen;
}
@@ -487,9 +486,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
* - our window should be last in all reservations
* - need to make sure we don't go past end of bitmap
*/
-
- mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n",
- resv->r_start, ocfs2_resv_end(resv), goal, wanted);
+ trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
+ goal, wanted, RB_EMPTY_ROOT(root));
assert_spin_locked(&resv_lock);
@@ -498,9 +496,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
* Easiest case - empty tree. We can just take
* whatever window of free bits we want.
*/
-
- mlog(0, "Empty root\n");
-
clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
resmap->m_bitmap_len - goal,
&cstart, &clen);
@@ -524,8 +519,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
prev_resv = ocfs2_find_resv_lhs(resmap, goal);
if (prev_resv == NULL) {
- mlog(0, "Goal on LHS of leftmost window\n");
-
/*
* A NULL here means that the search code couldn't
* find a window that starts before goal.
@@ -570,13 +563,15 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
next_resv = NULL;
}
+ trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
+ ocfs2_resv_end(prev_resv));
+
prev = &prev_resv->r_node;
/* Now we do a linear search for a window, starting at 'prev_rsv' */
while (1) {
next = rb_next(prev);
if (next) {
- mlog(0, "One more resv found in linear search\n");
next_resv = rb_entry(next,
struct ocfs2_alloc_reservation,
r_node);
@@ -585,7 +580,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
gap_end = next_resv->r_start - 1;
gap_len = gap_end - gap_start + 1;
} else {
- mlog(0, "No next node\n");
/*
* We're at the rightmost edge of the
* tree. See if a reservation between this
@@ -596,6 +590,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
gap_end = resmap->m_bitmap_len - 1;
}
+ trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
+ next ? ocfs2_resv_end(next_resv) : -1);
/*
* No need to check this gap if we have already found
* a larger region of free bits.
@@ -654,8 +650,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
lru_resv = list_first_entry(&resmap->m_lru,
struct ocfs2_alloc_reservation, r_lru);
- mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start,
- lru_resv->r_len, ocfs2_resv_end(lru_resv));
+ trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
+ lru_resv->r_len,
+ ocfs2_resv_end(lru_resv));
/*
* Cannibalize (some or all) of the target reservation and
@@ -684,10 +681,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
resv->r_len = shrink;
}
- mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
- "r_len: %u r_last_start: %u r_last_len: %u\n",
- resv->r_start, ocfs2_resv_end(resv), resv->r_len,
- resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start,
+ resv->r_last_len);
ocfs2_resv_insert(resmap, resv);
}
@@ -748,7 +744,6 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
wanted = *clen;
- mlog(0, "empty reservation, find new window\n");
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
@@ -757,6 +752,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
* that inode.
*/
ocfs2_resv_find_window(resmap, resv, wanted);
+ trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
}
BUG_ON(ocfs2_resv_empty(resv));
@@ -813,10 +809,10 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
spin_lock(&resv_lock);
- mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u "
- "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n",
- cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv),
- resv->r_len, resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
+ ocfs2_resv_end(resv), resv->r_len,
+ resv->r_last_start,
+ resv->r_last_len);
BUG_ON(cstart < resv->r_start);
BUG_ON(cstart > ocfs2_resv_end(resv));
@@ -833,10 +829,9 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
if (!ocfs2_resv_empty(resv))
ocfs2_resv_mark_lru(resmap, resv);
- mlog(0, "Reservation now looks like: r_start: %u r_end: %u "
- "r_len: %u r_last_start: %u r_last_len: %u\n",
- resv->r_start, ocfs2_resv_end(resv), resv->r_len,
- resv->r_last_start, resv->r_last_len);
+ trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
+ resv->r_len, resv->r_last_start,
+ resv->r_last_len);
ocfs2_check_resmap(resmap);
diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h
index 1e49cc29d06c..42c2b804f3fd 100644
--- a/fs/ocfs2/reservations.h
+++ b/fs/ocfs2/reservations.h
@@ -29,7 +29,7 @@
struct ocfs2_alloc_reservation {
struct rb_node r_node;
- unsigned int r_start; /* Begining of current window */
+ unsigned int r_start; /* Beginning of current window */
unsigned int r_len; /* Length of the window */
unsigned int r_last_len; /* Length of most recent alloc */
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index dacd553d8617..ec55add7604a 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -27,7 +27,6 @@
#include <linux/fs.h>
#include <linux/types.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#include "suballoc.h"
@@ -82,7 +82,6 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
backups++;
}
- mlog_exit_void();
return backups;
}
@@ -103,8 +102,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
- mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n",
- new_clusters, first_new_cluster);
+ trace_ocfs2_update_last_group_and_inode(new_clusters,
+ first_new_cluster);
ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -176,7 +175,8 @@ out_rollback:
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
}
out:
- mlog_exit(ret);
+ if (ret)
+ mlog_errno(ret);
return ret;
}
@@ -281,8 +281,6 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
u32 first_new_cluster;
u64 lgd_blkno;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
@@ -342,7 +340,8 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
goto out_unlock;
}
- mlog(0, "extend the last group at %llu, new clusters = %d\n",
+
+ trace_ocfs2_group_extend(
(unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
@@ -377,7 +376,6 @@ out_mutex:
iput(main_bm_inode);
out:
- mlog_exit_void();
return ret;
}
@@ -472,8 +470,6 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
struct ocfs2_chain_rec *cr;
u16 cl_bpc;
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
return -EROFS;
@@ -520,8 +516,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
goto out_unlock;
}
- mlog(0, "Add a new group %llu in chain = %u, length = %u\n",
- (unsigned long long)input->group, input->chain, input->clusters);
+ trace_ocfs2_group_add((unsigned long long)input->group,
+ input->chain, input->clusters, input->frees);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
if (IS_ERR(handle)) {
@@ -589,6 +585,5 @@ out_mutex:
iput(main_bm_inode);
out:
- mlog_exit_void();
return ret;
}
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index ab4e0172cc1d..26fc0014d509 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_SUPER
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -39,6 +38,7 @@
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -142,8 +142,7 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
BUG_ON(si->si_blocks == 0);
BUG_ON(si->si_bh == NULL);
- mlog(0, "Refreshing slot map, reading %u block(s)\n",
- si->si_blocks);
+ trace_ocfs2_refresh_slot_info(si->si_blocks);
/*
* We pass -1 as blocknr because we expect all of si->si_bh to
@@ -381,8 +380,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
/* The size checks above should ensure this */
BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);
- mlog(0, "Slot map needs %u buffers for %llu bytes\n",
- si->si_blocks, bytes);
+ trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
GFP_KERNEL);
@@ -400,8 +398,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
goto bail;
}
- mlog(0, "Reading slot map block %u at %llu\n", i,
- (unsigned long long)blkno);
+ trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i);
bh = NULL; /* Acquire a fresh bh */
status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
@@ -475,8 +472,6 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
int slot;
struct ocfs2_slot_info *si;
- mlog_entry_void();
-
si = osb->slot_info;
spin_lock(&osb->osb_lock);
@@ -505,14 +500,13 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
osb->slot_num = slot;
spin_unlock(&osb->osb_lock);
- mlog(0, "taking node slot %d\n", osb->slot_num);
+ trace_ocfs2_find_slot(osb->slot_num);
status = ocfs2_update_disk_slot(osb, si, osb->slot_num);
if (status < 0)
mlog_errno(status);
bail:
- mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 8ce7398ae1d2..1ec56fdb8d0d 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -126,7 +126,7 @@ struct ocfs2_stack_operations {
*
* ->connect() must not return until it is guaranteed that
*
- * - Node down notifications for the filesystem will be recieved
+ * - Node down notifications for the filesystem will be received
* and passed to conn->cc_recovery_handler().
* - Locking requests for the filesystem will be processed.
*/
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 71998d4d61d5..ba5d97e4a73e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -44,6 +43,7 @@
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -308,8 +308,8 @@ static int ocfs2_validate_group_descriptor(struct super_block *sb,
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
- mlog(0, "Validating group descriptor %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_group_descriptor(
+ (unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -389,8 +389,6 @@ static int ocfs2_block_group_fill(handle_t *handle,
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct super_block * sb = alloc_inode->i_sb;
- mlog_entry_void();
-
if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
"b_blocknr (%llu)",
@@ -436,7 +434,8 @@ static int ocfs2_block_group_fill(handle_t *handle,
* allocation time. */
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -477,8 +476,8 @@ ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "new descriptor, record %u, at block %llu\n",
- alloc_rec, (unsigned long long)bg_blkno);
+ trace_ocfs2_block_group_alloc_contig(
+ (unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
@@ -657,8 +656,8 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "new descriptor, record %u, at block %llu\n",
- alloc_rec, (unsigned long long)bg_blkno);
+ trace_ocfs2_block_group_alloc_discontig(
+ (unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
@@ -707,8 +706,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
- mlog_entry_void();
-
cl = &fe->id2.i_chain;
status = ocfs2_reserve_clusters_with_limit(osb,
le16_to_cpu(cl->cl_cpg),
@@ -730,8 +727,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
}
if (last_alloc_group && *last_alloc_group != 0) {
- mlog(0, "use old allocation group %llu for block group alloc\n",
- (unsigned long long)*last_alloc_group);
+ trace_ocfs2_block_group_alloc(
+ (unsigned long long)*last_alloc_group);
ac->ac_last_group = *last_alloc_group;
}
@@ -796,7 +793,8 @@ bail:
brelse(bg_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -814,8 +812,6 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
struct ocfs2_dinode *fe;
u32 free_bits;
- mlog_entry_void();
-
alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
if (!alloc_inode) {
mlog_errno(-EINVAL);
@@ -855,16 +851,15 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
if (bits_wanted > free_bits) {
/* cluster bitmap never grows */
if (ocfs2_is_cluster_bitmap(alloc_inode)) {
- mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
- bits_wanted, free_bits);
+ trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
+ free_bits);
status = -ENOSPC;
goto bail;
}
if (!(flags & ALLOC_NEW_GROUP)) {
- mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
- "and we don't alloc a new group for it.\n",
- slot, bits_wanted, free_bits);
+ trace_ocfs2_reserve_suballoc_bits_no_new_group(
+ slot, bits_wanted, free_bits);
status = -ENOSPC;
goto bail;
}
@@ -890,7 +885,8 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
bail:
brelse(bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1052,7 +1048,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1119,8 +1116,8 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
spin_lock(&osb->osb_lock);
osb->osb_inode_alloc_group = alloc_group;
spin_unlock(&osb->osb_lock);
- mlog(0, "after reservation, new allocation group is "
- "%llu\n", (unsigned long long)alloc_group);
+ trace_ocfs2_reserve_new_inode_new_group(
+ (unsigned long long)alloc_group);
/*
* Some inodes must be freed by us, so try to allocate
@@ -1152,7 +1149,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1189,8 +1187,6 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
{
int status;
- mlog_entry_void();
-
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
@@ -1229,7 +1225,8 @@ bail:
*ac = NULL;
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1357,15 +1354,12 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
void *bitmap = bg->bg_bitmap;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
- mlog_entry_void();
-
/* All callers get the descriptor via
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
- mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
- num_bits);
+ trace_ocfs2_block_group_set_bits(bit_off, num_bits);
if (ocfs2_is_cluster_bitmap(alloc_inode))
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
@@ -1394,7 +1388,8 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
ocfs2_journal_dirty(handle, group_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1437,10 +1432,10 @@ static int ocfs2_relink_block_group(handle_t *handle,
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
- mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
- (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
- (unsigned long long)le64_to_cpu(bg->bg_blkno),
- (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
+ trace_ocfs2_relink_block_group(
+ (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
+ (unsigned long long)le64_to_cpu(bg->bg_blkno),
+ (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
bg_ptr = le64_to_cpu(bg->bg_next_group);
@@ -1484,7 +1479,8 @@ out_rollback:
prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1515,7 +1511,7 @@ static int ocfs2_cluster_group_search(struct inode *inode,
max_bits = le16_to_cpu(gd->bg_bits);
/* Tail groups in cluster bitmaps which aren't cpg
- * aligned are prone to partial extention by a failed
+ * aligned are prone to partial extension by a failed
* fs resize. If the file system resize never got to
* update the dinode cluster count, then we don't want
* to trust any clusters past it, regardless of what
@@ -1525,10 +1521,10 @@ static int ocfs2_cluster_group_search(struct inode *inode,
if ((gd_cluster_off + max_bits) >
OCFS2_I(inode)->ip_clusters) {
max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
- mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
- (unsigned long long)le64_to_cpu(gd->bg_blkno),
- le16_to_cpu(gd->bg_bits),
- OCFS2_I(inode)->ip_clusters, max_bits);
+ trace_ocfs2_cluster_group_search_wrong_max_bits(
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits),
+ OCFS2_I(inode)->ip_clusters, max_bits);
}
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
@@ -1542,9 +1538,9 @@ static int ocfs2_cluster_group_search(struct inode *inode,
gd_cluster_off +
res->sr_bit_offset +
res->sr_bits);
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)blkoff,
- (unsigned long long)max_block);
+ trace_ocfs2_cluster_group_search_max_block(
+ (unsigned long long)blkoff,
+ (unsigned long long)max_block);
if (blkoff > max_block)
return -ENOSPC;
}
@@ -1588,9 +1584,9 @@ static int ocfs2_block_group_search(struct inode *inode,
if (!ret && max_block) {
blkoff = le64_to_cpu(bg->bg_blkno) +
res->sr_bit_offset + res->sr_bits;
- mlog(0, "Checking %llu against %llu\n",
- (unsigned long long)blkoff,
- (unsigned long long)max_block);
+ trace_ocfs2_block_group_search_max_block(
+ (unsigned long long)blkoff,
+ (unsigned long long)max_block);
if (blkoff > max_block)
ret = -ENOSPC;
}
@@ -1756,9 +1752,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
struct ocfs2_group_desc *bg;
chain = ac->ac_chain;
- mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
- bits_wanted, chain,
- (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
+ trace_ocfs2_search_chain_begin(
+ (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
+ bits_wanted, chain);
status = ocfs2_read_group_descriptor(alloc_inode, fe,
le64_to_cpu(cl->cl_recs[chain].c_blkno),
@@ -1799,8 +1795,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
- res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
+ trace_ocfs2_search_chain_succ(
+ (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
@@ -1861,8 +1857,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
- (unsigned long long)le64_to_cpu(fe->i_blkno));
+ trace_ocfs2_search_chain_end(
+ (unsigned long long)le64_to_cpu(fe->i_blkno),
+ res->sr_bits);
out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
@@ -1870,7 +1867,8 @@ bail:
brelse(group_bh);
brelse(prev_group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1888,8 +1886,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
BUG_ON(!ac->ac_bh);
@@ -1945,8 +1941,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
goto bail;
}
- mlog(0, "Search of victim chain %u came up with nothing, "
- "trying all chains now.\n", victim);
+ trace_ocfs2_claim_suballoc_bits(victim);
/* If we didn't pick a good victim, then just default to
* searching each chain in order. Don't allow chain relinking
@@ -1984,7 +1979,8 @@ set_hint:
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2021,7 +2017,8 @@ int ocfs2_claim_metadata(handle_t *handle,
*num_bits = res.sr_bits;
status = 0;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2172,8 +2169,8 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
goto out;
}
- mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
- (unsigned long long)di_blkno);
+ trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+ res->sr_bits);
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
@@ -2201,8 +2198,6 @@ int ocfs2_claim_new_inode(handle_t *handle,
int status;
struct ocfs2_suballoc_result res;
- mlog_entry_void();
-
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
@@ -2230,7 +2225,8 @@ int ocfs2_claim_new_inode(handle_t *handle,
ocfs2_save_inode_ac_group(dir, ac);
status = 0;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2307,8 +2303,6 @@ int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
- mlog_entry_void();
-
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
@@ -2363,7 +2357,8 @@ int __ocfs2_claim_clusters(handle_t *handle,
ac->ac_bits_given += *num_clusters;
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2392,13 +2387,11 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
- mlog_entry_void();
-
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
- mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
+ trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
@@ -2463,19 +2456,18 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group;
- mlog_entry_void();
-
/* The alloc_bh comes from ocfs2_free_dinode() or
* ocfs2_free_clusters(). The callers have all locked the
* allocator and gotten alloc_bh from the lock call. This
- * validates the dinode buffer. Any corruption that has happended
+ * validates the dinode buffer. Any corruption that has happened
* is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
- mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
- (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
- (unsigned long long)bg_blkno, start_bit);
+ trace_ocfs2_free_suballoc_bits(
+ (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
+ (unsigned long long)bg_blkno,
+ start_bit, count);
status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
&group_bh);
@@ -2511,7 +2503,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
bail:
brelse(group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2556,11 +2549,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
/* You can't ever have a contiguous set of clusters
* bigger than a block group bitmap so we never have to worry
- * about looping on them. */
-
- mlog_entry_void();
-
- /* This is expensive. We can safely remove once this stuff has
+ * about looping on them.
+ * This is expensive. We can safely remove once this stuff has
* gotten tested really well. */
BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
@@ -2569,10 +2559,9 @@ static int _ocfs2_free_clusters(handle_t *handle,
ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
&bg_start_bit);
- mlog(0, "want to free %u clusters starting at block %llu\n",
- num_clusters, (unsigned long long)start_blk);
- mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
- (unsigned long long)bg_blkno, bg_start_bit);
+ trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
+ (unsigned long long)start_blk,
+ bg_start_bit, num_clusters);
status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
bg_start_bit, bg_blkno,
@@ -2586,7 +2575,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
num_clusters);
out:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2756,7 +2746,7 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
struct buffer_head *inode_bh = NULL;
struct ocfs2_dinode *inode_fe;
- mlog_entry("blkno: %llu\n", (unsigned long long)blkno);
+ trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
/* dirty read disk */
status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
@@ -2793,7 +2783,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
bail:
brelse(inode_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2816,8 +2807,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
u64 bg_blkno;
int status;
- mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno,
- (unsigned int)bit);
+ trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
+ (unsigned int)bit);
alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
@@ -2844,7 +2835,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
bail:
brelse(group_bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2869,7 +2861,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
- mlog_entry("blkno: %llu", (unsigned long long)blkno);
+ trace_ocfs2_test_inode_bit((unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
&group_blkno, &suballoc_bit);
@@ -2910,6 +2902,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
iput(inode_alloc_inode);
brelse(alloc_bh);
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 236ed1bdca2c..5a521c748859 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -42,7 +42,9 @@
#include <linux/seq_file.h>
#include <linux/quotaops.h>
-#define MLOG_MASK_PREFIX ML_SUPER
+#define CREATE_TRACE_POINTS
+#include "ocfs2_trace.h"
+
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -76,7 +78,7 @@ static struct kmem_cache *ocfs2_inode_cachep = NULL;
struct kmem_cache *ocfs2_dquot_cachep;
struct kmem_cache *ocfs2_qf_chunk_cachep;
-/* OCFS2 needs to schedule several differnt types of work which
+/* OCFS2 needs to schedule several different types of work which
* require cluster locking, disk I/O, recovery waits, etc. Since these
* types of work tend to be heavy we avoid using the kernel events
* workqueue and schedule on our own. */
@@ -441,8 +443,6 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
int status = 0;
int i;
- mlog_entry_void();
-
new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0);
if (IS_ERR(new)) {
status = PTR_ERR(new);
@@ -478,7 +478,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -488,8 +489,6 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
int status = 0;
int i;
- mlog_entry_void();
-
for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1;
i < NUM_SYSTEM_INODES;
i++) {
@@ -508,7 +507,8 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
}
bail:
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -517,8 +517,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
int i;
struct inode *inode;
- mlog_entry_void();
-
for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) {
inode = osb->global_system_inodes[i];
if (inode) {
@@ -540,7 +538,7 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
}
if (!osb->local_system_inodes)
- goto out;
+ return;
for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) {
if (osb->local_system_inodes[i]) {
@@ -551,9 +549,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
kfree(osb->local_system_inodes);
osb->local_system_inodes = NULL;
-
-out:
- mlog_exit(0);
}
/* We're allocating fs objects, use GFP_NOFS */
@@ -684,12 +679,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
}
if (*flags & MS_RDONLY) {
- mlog(0, "Going to ro mode.\n");
sb->s_flags |= MS_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
- mlog(0, "Making ro filesystem writeable.\n");
-
if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
mlog(ML_ERROR, "Cannot remount RDWR "
"filesystem due to previous errors.\n");
@@ -707,6 +699,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
+ trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
@@ -1032,7 +1025,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
char nodestr[8];
struct ocfs2_blockcheck_stats stats;
- mlog_entry("%p, %p, %i", sb, data, silent);
+ trace_ocfs2_fill_super(sb, data, silent);
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
@@ -1208,7 +1201,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
mlog_errno(status);
atomic_set(&osb->vol_state, VOLUME_DISABLED);
wake_up(&osb->osb_mount_event);
- mlog_exit(status);
return status;
}
}
@@ -1222,7 +1214,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
/* Start this when the mount is almost sure of being successful */
ocfs2_orphan_scan_start(osb);
- mlog_exit(status);
return status;
read_super_error:
@@ -1237,7 +1228,8 @@ read_super_error:
ocfs2_dismount_volume(sb, 1);
}
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1320,8 +1312,7 @@ static int ocfs2_parse_options(struct super_block *sb,
char *p;
u32 tmp;
- mlog_entry("remount: %d, options: \"%s\"\n", is_remount,
- options ? options : "(none)");
+ trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
mopt->commit_interval = 0;
mopt->mount_opt = OCFS2_MOUNT_NOINTR;
@@ -1538,7 +1529,6 @@ static int ocfs2_parse_options(struct super_block *sb,
status = 1;
bail:
- mlog_exit(status);
return status;
}
@@ -1629,8 +1619,6 @@ static int __init ocfs2_init(void)
{
int status;
- mlog_entry_void();
-
ocfs2_print_version();
status = init_ocfs2_uptodate_cache();
@@ -1664,10 +1652,9 @@ leave:
if (status < 0) {
ocfs2_free_mem_caches();
exit_ocfs2_uptodate_cache();
+ mlog_errno(status);
}
- mlog_exit(status);
-
if (status >= 0) {
return register_filesystem(&ocfs2_fs_type);
} else
@@ -1676,8 +1663,6 @@ leave:
static void __exit ocfs2_exit(void)
{
- mlog_entry_void();
-
if (ocfs2_wq) {
flush_workqueue(ocfs2_wq);
destroy_workqueue(ocfs2_wq);
@@ -1692,18 +1677,14 @@ static void __exit ocfs2_exit(void)
unregister_filesystem(&ocfs2_fs_type);
exit_ocfs2_uptodate_cache();
-
- mlog_exit_void();
}
static void ocfs2_put_super(struct super_block *sb)
{
- mlog_entry("(0x%p)\n", sb);
+ trace_ocfs2_put_super(sb);
ocfs2_sync_blockdev(sb);
ocfs2_dismount_volume(sb, 0);
-
- mlog_exit_void();
}
static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -1715,7 +1696,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
struct buffer_head *bh = NULL;
struct inode *inode = NULL;
- mlog_entry("(%p, %p)\n", dentry->d_sb, buf);
+ trace_ocfs2_statfs(dentry->d_sb, buf);
osb = OCFS2_SB(dentry->d_sb);
@@ -1762,7 +1743,8 @@ bail:
if (inode)
iput(inode);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -1882,8 +1864,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
int unlock_super = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
- mlog_entry_void();
-
if (ocfs2_is_hard_readonly(osb))
goto leave;
@@ -1928,7 +1908,6 @@ leave:
if (unlock_super)
ocfs2_super_unlock(osb, 1);
- mlog_exit(status);
return status;
}
@@ -1938,7 +1917,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
struct ocfs2_super *osb = NULL;
char nodestr[8];
- mlog_entry("(0x%p)\n", sb);
+ trace_ocfs2_dismount_volume(sb);
BUG_ON(!sb);
osb = OCFS2_SB(sb);
@@ -2090,8 +2069,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
struct ocfs2_super *osb;
u64 total_blocks;
- mlog_entry_void();
-
osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
if (!osb) {
status = -ENOMEM;
@@ -2155,7 +2132,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
status = -EINVAL;
goto bail;
}
- mlog(0, "max_slots for this device: %u\n", osb->max_slots);
ocfs2_orphan_scan_init(osb);
@@ -2294,7 +2270,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->s_clustersize_bits =
le32_to_cpu(di->id2.i_super.s_clustersize_bits);
osb->s_clustersize = 1 << osb->s_clustersize_bits;
- mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits);
if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE ||
osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) {
@@ -2333,11 +2308,10 @@ static int ocfs2_initialize_super(struct super_block *sb,
le64_to_cpu(di->id2.i_super.s_first_cluster_group);
osb->fs_generation = le32_to_cpu(di->i_fs_generation);
osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash);
- mlog(0, "vol_label: %s\n", osb->vol_label);
- mlog(0, "uuid: %s\n", osb->uuid_str);
- mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n",
- (unsigned long long)osb->root_blkno,
- (unsigned long long)osb->system_dir_blkno);
+ trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str,
+ (unsigned long long)osb->root_blkno,
+ (unsigned long long)osb->system_dir_blkno,
+ osb->s_clustersize_bits);
osb->osb_dlm_debug = ocfs2_new_dlm_debug();
if (!osb->osb_dlm_debug) {
@@ -2380,7 +2354,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
bail:
- mlog_exit(status);
return status;
}
@@ -2396,8 +2369,6 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
{
int status = -EAGAIN;
- mlog_entry_void();
-
if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
/* We have to do a raw check of the feature here */
@@ -2452,7 +2423,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
}
out:
- mlog_exit(status);
+ if (status && status != -EAGAIN)
+ mlog_errno(status);
return status;
}
@@ -2465,8 +2437,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
* recover
* ourselves. */
- mlog_entry_void();
-
/* Init our journal object. */
status = ocfs2_journal_init(osb->journal, &dirty);
if (status < 0) {
@@ -2516,8 +2486,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
* ourselves as mounted. */
}
- mlog(0, "Journal loaded.\n");
-
status = ocfs2_load_local_alloc(osb);
if (status < 0) {
mlog_errno(status);
@@ -2549,7 +2517,8 @@ finally:
if (local_alloc)
kfree(local_alloc);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return status;
}
@@ -2561,8 +2530,6 @@ finally:
*/
static void ocfs2_delete_osb(struct ocfs2_super *osb)
{
- mlog_entry_void();
-
/* This function assumes that the caller has the main osb resource */
ocfs2_free_slot_info(osb);
@@ -2580,8 +2547,6 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
kfree(osb->uuid_str);
ocfs2_put_dlm_debug(osb->osb_dlm_debug);
memset(osb, 0, sizeof(struct ocfs2_super));
-
- mlog_exit_void();
}
/* Put OCFS2 into a readonly state, or (if the user specifies it),
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 9975457c981f..5d22872e2bb3 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -40,7 +40,6 @@
#include <linux/pagemap.h>
#include <linux/namei.h>
-#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -62,8 +61,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
char *link = NULL;
struct ocfs2_dinode *fe;
- mlog_entry_void();
-
status = ocfs2_read_inode_block(inode, bh);
if (status < 0) {
mlog_errno(status);
@@ -74,7 +71,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
fe = (struct ocfs2_dinode *) (*bh)->b_data;
link = (char *) fe->id2.i_symlink;
bail:
- mlog_exit(status);
return link;
}
@@ -88,8 +84,6 @@ static int ocfs2_readlink(struct dentry *dentry,
struct buffer_head *bh = NULL;
struct inode *inode = dentry->d_inode;
- mlog_entry_void();
-
link = ocfs2_fast_symlink_getlink(inode, &bh);
if (IS_ERR(link)) {
ret = PTR_ERR(link);
@@ -104,7 +98,8 @@ static int ocfs2_readlink(struct dentry *dentry,
brelse(bh);
out:
- mlog_exit(ret);
+ if (ret < 0)
+ mlog_errno(ret);
return ret;
}
@@ -117,8 +112,6 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
- mlog_entry_void();
-
BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
target = ocfs2_fast_symlink_getlink(inode, &bh);
if (IS_ERR(target)) {
@@ -142,7 +135,8 @@ bail:
nd_set_link(nd, status ? ERR_PTR(status) : link);
brelse(bh);
- mlog_exit(status);
+ if (status)
+ mlog_errno(status);
return NULL;
}
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 902efb23b6a6..3d635f4bbb20 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/highmem.h>
-#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
#include "ocfs2.h"
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index a0a120e82b97..52eaf33d346f 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -54,14 +54,13 @@
#include <linux/buffer_head.h>
#include <linux/rbtree.h>
-#define MLOG_MASK_PREFIX ML_UPTODATE
-
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
#include "uptodate.h"
+#include "ocfs2_trace.h"
struct ocfs2_meta_cache_item {
struct rb_node c_node;
@@ -152,8 +151,8 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
while ((node = rb_last(root)) != NULL) {
item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
- mlog(0, "Purge item %llu\n",
- (unsigned long long) item->c_block);
+ trace_ocfs2_purge_copied_metadata_tree(
+ (unsigned long long) item->c_block);
rb_erase(&item->c_node, root);
kmem_cache_free(ocfs2_uptodate_cachep, item);
@@ -180,9 +179,9 @@ void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
to_purge = ci->ci_num_cached;
- mlog(0, "Purge %u %s items from Owner %llu\n", to_purge,
- tree ? "array" : "tree",
- (unsigned long long)ocfs2_metadata_cache_owner(ci));
+ trace_ocfs2_metadata_cache_purge(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ to_purge, tree);
/* If we're a tree, save off the root so that we can safely
* initialize the cache. We do the work to free tree members
@@ -249,10 +248,10 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_lock(ci);
- mlog(0, "Owner %llu, query block %llu (inline = %u)\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long) bh->b_blocknr,
- !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
+ trace_ocfs2_buffer_cached_begin(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long) bh->b_blocknr,
+ !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
index = ocfs2_search_cache_array(ci, bh->b_blocknr);
@@ -261,7 +260,7 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_unlock(ci);
- mlog(0, "index = %d, item = %p\n", index, item);
+ trace_ocfs2_buffer_cached_end(index, item);
return (index != -1) || (item != NULL);
}
@@ -306,8 +305,9 @@ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
{
BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
- mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
- ci->ci_num_cached);
+ trace_ocfs2_append_cache_array(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, ci->ci_num_cached);
ci->ci_cache.ci_array[ci->ci_num_cached] = block;
ci->ci_num_cached++;
@@ -324,8 +324,9 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
struct ocfs2_meta_cache_item *tmp;
- mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block,
- ci->ci_num_cached);
+ trace_ocfs2_insert_cache_tree(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, ci->ci_num_cached);
while(*p) {
parent = *p;
@@ -389,9 +390,9 @@ static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
tree[i] = NULL;
}
- mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- ci->ci_flags, ci->ci_num_cached);
+ trace_ocfs2_expand_cache(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ ci->ci_flags, ci->ci_num_cached);
}
/* Slow path function - memory allocation is necessary. See the
@@ -405,9 +406,9 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
{ NULL, };
- mlog(0, "Owner %llu, block %llu, expand = %d\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)block, expand_tree);
+ trace_ocfs2_set_buffer_uptodate(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)block, expand_tree);
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
@@ -433,7 +434,6 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
ocfs2_metadata_cache_lock(ci);
if (ocfs2_insert_can_use_array(ci)) {
- mlog(0, "Someone cleared the tree underneath us\n");
/* Ok, items were removed from the cache in between
* locks. Detect this and revert back to the fast path */
ocfs2_append_cache_array(ci, block);
@@ -490,9 +490,9 @@ void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
if (ocfs2_buffer_cached(ci, bh))
return;
- mlog(0, "Owner %llu, inserting block %llu\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_set_buffer_uptodate_begin(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)bh->b_blocknr);
/* No need to recheck under spinlock - insertion is guarded by
* co_io_lock() */
@@ -542,8 +542,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
BUG_ON(index >= ci->ci_num_cached);
BUG_ON(!ci->ci_num_cached);
- mlog(0, "remove index %d (num_cached = %u\n", index,
- ci->ci_num_cached);
+ trace_ocfs2_remove_metadata_array(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ index, ci->ci_num_cached);
ci->ci_num_cached--;
@@ -559,8 +560,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *item)
{
- mlog(0, "remove block %llu from tree\n",
- (unsigned long long) item->c_block);
+ trace_ocfs2_remove_metadata_tree(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)item->c_block);
rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
ci->ci_num_cached--;
@@ -573,10 +575,10 @@ static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item *item = NULL;
ocfs2_metadata_cache_lock(ci);
- mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n",
- (unsigned long long)ocfs2_metadata_cache_owner(ci),
- (unsigned long long) block, ci->ci_num_cached,
- ci->ci_flags & OCFS2_CACHE_FL_INLINE);
+ trace_ocfs2_remove_block_from_cache(
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long) block, ci->ci_num_cached,
+ ci->ci_flags);
if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
index = ocfs2_search_cache_array(ci, block);
@@ -626,9 +628,6 @@ int __init init_ocfs2_uptodate_cache(void)
if (!ocfs2_uptodate_cachep)
return -ENOMEM;
- mlog(0, "%u inlined cache items per inode.\n",
- OCFS2_CACHE_INFO_MAX_ARRAY);
-
return 0;
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6bb602486c6b..81ecf9c0bf0a 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -37,7 +37,6 @@
#include <linux/string.h>
#include <linux/security.h>
-#define MLOG_MASK_PREFIX ML_XATTR
#include <cluster/masklog.h>
#include "ocfs2.h"
@@ -57,6 +56,7 @@
#include "xattr.h"
#include "refcounttree.h"
#include "acl.h"
+#include "ocfs2_trace.h"
struct ocfs2_xattr_def_value_root {
struct ocfs2_xattr_value_root xv;
@@ -474,8 +474,7 @@ static int ocfs2_validate_xattr_block(struct super_block *sb,
struct ocfs2_xattr_block *xb =
(struct ocfs2_xattr_block *)bh->b_data;
- mlog(0, "Validating xattr block %llu\n",
- (unsigned long long)bh->b_blocknr);
+ trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
@@ -715,11 +714,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
struct ocfs2_extent_tree et;
- mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
-
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
while (clusters_to_add) {
+ trace_ocfs2_xattr_extend_allocation(clusters_to_add);
+
status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
@@ -754,8 +753,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
*/
BUG_ON(why == RESTART_META);
- mlog(0, "restarting xattr value extension for %u"
- " clusters,.\n", clusters_to_add);
credits = ocfs2_calc_extend_credits(inode->i_sb,
&vb->vb_xv->xr_list,
clusters_to_add);
@@ -3246,8 +3243,8 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
}
meta_add += extra_meta;
- mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
- "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
+ trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
+ clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -3557,7 +3554,7 @@ int ocfs2_xattr_set(struct inode *inode,
down_write(&OCFS2_I(inode)->ip_xattr_sem);
/*
* Scan inode and external block to find the same name
- * extended attribute and collect search infomation.
+ * extended attribute and collect search information.
*/
ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
if (ret)
@@ -3581,7 +3578,7 @@ int ocfs2_xattr_set(struct inode *inode,
goto cleanup;
}
- /* Check whether the value is refcounted and do some prepartion. */
+ /* Check whether the value is refcounted and do some preparation. */
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
(!xis.not_found || !xbs.not_found)) {
ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
@@ -3887,8 +3884,10 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (found) {
xs->here = &xs->header->xh_entries[index];
- mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name,
- (unsigned long long)bucket_blkno(xs->bucket), index);
+ trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
+ name, name_index, name_hash,
+ (unsigned long long)bucket_blkno(xs->bucket),
+ index);
} else
ret = -ENODATA;
@@ -3915,8 +3914,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
if (le16_to_cpu(el->l_next_free_rec) == 0)
return -ENODATA;
- mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n",
- name, name_hash, name_index);
+ trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
+ name, name_index, name_hash,
+ (unsigned long long)root_bh->b_blocknr,
+ -1);
ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
&num_clusters, el);
@@ -3927,9 +3928,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
- mlog(0, "find xattr extent rec %u clusters from %llu, the first hash "
- "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno,
- first_hash);
+ trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
+ name, name_index, first_hash,
+ (unsigned long long)p_blkno,
+ num_clusters);
ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
p_blkno, first_hash, num_clusters, xs);
@@ -3955,8 +3957,9 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
return -ENOMEM;
}
- mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n",
- clusters, (unsigned long long)blkno);
+ trace_ocfs2_iterate_xattr_buckets(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno, clusters);
for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
ret = ocfs2_read_xattr_bucket(bucket, blkno);
@@ -3972,8 +3975,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
if (i == 0)
num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
- mlog(0, "iterating xattr bucket %llu, first hash %u\n",
- (unsigned long long)blkno,
+ trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
if (func) {
ret = func(inode, bucket, para);
@@ -4173,9 +4175,9 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
char *src = xb_bh->b_data;
char *target = bucket_block(bucket, blks - 1);
- mlog(0, "cp xattr from block %llu to bucket %llu\n",
- (unsigned long long)xb_bh->b_blocknr,
- (unsigned long long)bucket_blkno(bucket));
+ trace_ocfs2_cp_xattr_block_to_bucket_begin(
+ (unsigned long long)xb_bh->b_blocknr,
+ (unsigned long long)bucket_blkno(bucket));
for (i = 0; i < blks; i++)
memset(bucket_block(bucket, i), 0, blocksize);
@@ -4211,8 +4213,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
for (i = 0; i < count; i++)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
- mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n",
- offset, size, off_change);
+ trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
cmp_xe, swap_xe);
@@ -4261,8 +4262,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
struct ocfs2_xattr_tree_root *xr;
u16 xb_flags = le16_to_cpu(xb->xb_flags);
- mlog(0, "create xattr index block for %llu\n",
- (unsigned long long)xb_bh->b_blocknr);
+ trace_ocfs2_xattr_create_index_block_begin(
+ (unsigned long long)xb_bh->b_blocknr);
BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
BUG_ON(!xs->bucket);
@@ -4295,8 +4296,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
*/
blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
- mlog(0, "allocate 1 cluster from %llu to xattr block\n",
- (unsigned long long)blkno);
+ trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
if (ret) {
@@ -4400,8 +4400,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
entries = (char *)xh->xh_entries;
xh_free_start = le16_to_cpu(xh->xh_free_start);
- mlog(0, "adjust xattr bucket in %llu, count = %u, "
- "xh_free_start = %u, xh_name_value_len = %u.\n",
+ trace_ocfs2_defrag_xattr_bucket(
(unsigned long long)blkno, le16_to_cpu(xh->xh_count),
xh_free_start, le16_to_cpu(xh->xh_name_value_len));
@@ -4503,8 +4502,9 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
- mlog(0, "move half of xattrs in cluster %llu to %llu\n",
- (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno);
+ trace_ocfs2_mv_xattr_bucket_cross_cluster(
+ (unsigned long long)last_cluster_blkno,
+ (unsigned long long)new_blkno);
ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
last_cluster_blkno, new_blkno,
@@ -4614,8 +4614,8 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
struct ocfs2_xattr_entry *xe;
int blocksize = inode->i_sb->s_blocksize;
- mlog(0, "move some of xattrs from bucket %llu to %llu\n",
- (unsigned long long)blk, (unsigned long long)new_blk);
+ trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
+ (unsigned long long)new_blk);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4714,9 +4714,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
*/
xe = &xh->xh_entries[start];
len = sizeof(struct ocfs2_xattr_entry) * (count - start);
- mlog(0, "mv xattr entry len %d from %d to %d\n", len,
- (int)((char *)xe - (char *)xh),
- (int)((char *)xh->xh_entries - (char *)xh));
+ trace_ocfs2_divide_xattr_bucket_move(len,
+ (int)((char *)xe - (char *)xh),
+ (int)((char *)xh->xh_entries - (char *)xh));
memmove((char *)xh->xh_entries, (char *)xe, len);
xe = &xh->xh_entries[count - start];
len = sizeof(struct ocfs2_xattr_entry) * start;
@@ -4788,9 +4788,9 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode,
BUG_ON(s_blkno == t_blkno);
- mlog(0, "cp bucket %llu to %llu, target is %d\n",
- (unsigned long long)s_blkno, (unsigned long long)t_blkno,
- t_is_new);
+ trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
+ (unsigned long long)t_blkno,
+ t_is_new);
s_bucket = ocfs2_xattr_bucket_new(inode);
t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4862,8 +4862,8 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
struct ocfs2_xattr_bucket *old_first, *new_first;
- mlog(0, "mv xattrs from cluster %llu to %llu\n",
- (unsigned long long)last_blk, (unsigned long long)to_blk);
+ trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
+ (unsigned long long)to_blk);
BUG_ON(start_bucket >= num_buckets);
if (start_bucket) {
@@ -5013,9 +5013,9 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
{
int ret;
- mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n",
- (unsigned long long)bucket_blkno(first), prev_clusters,
- (unsigned long long)new_blk);
+ trace_ocfs2_adjust_xattr_cross_cluster(
+ (unsigned long long)bucket_blkno(first),
+ (unsigned long long)new_blk, prev_clusters);
if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
@@ -5088,10 +5088,10 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_extent_tree et;
- mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, "
- "previous xattr blkno = %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- prev_cpos, (unsigned long long)bucket_blkno(first));
+ trace_ocfs2_add_new_xattr_cluster_begin(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)bucket_blkno(first),
+ prev_cpos, prev_clusters);
ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
@@ -5113,8 +5113,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
BUG_ON(num_bits > clusters_to_add);
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n",
- num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
(prev_clusters + num_bits) << osb->s_clustersize_bits <=
@@ -5130,8 +5129,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
*/
v_start = prev_cpos + prev_clusters;
*num_clusters = prev_clusters + num_bits;
- mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
- num_bits);
} else {
ret = ocfs2_adjust_xattr_cross_cluster(inode,
handle,
@@ -5147,8 +5144,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
}
}
- mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
- num_bits, (unsigned long long)block, v_start);
+ trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
+ v_start, num_bits);
ret = ocfs2_insert_extent(handle, &et, v_start, block,
num_bits, 0, ctxt->meta_ac);
if (ret < 0) {
@@ -5183,9 +5180,9 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode,
u64 end_blk;
u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
- mlog(0, "extend xattr bucket in %llu, xattr extend rec starting "
- "from %llu, len = %u\n", (unsigned long long)target_blk,
- (unsigned long long)bucket_blkno(first), num_clusters);
+ trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
+ (unsigned long long)bucket_blkno(first),
+ num_clusters, new_bucket);
/* The extent must have room for an additional bucket */
BUG_ON(new_bucket >=
@@ -5265,8 +5262,8 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode,
/* The bucket at the front of the extent */
struct ocfs2_xattr_bucket *first;
- mlog(0, "Add new xattr bucket starting from %llu\n",
- (unsigned long long)bucket_blkno(target));
+ trace_ocfs2_add_new_xattr_bucket(
+ (unsigned long long)bucket_blkno(target));
/* The first bucket of the original extent */
first = ocfs2_xattr_bucket_new(inode);
@@ -5382,8 +5379,8 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
* modified something. We have to assume they did, and dirty
* the whole bucket. This leaves us in a consistent state.
*/
- mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n",
- xe_off, (unsigned long long)bucket_blkno(bucket), len);
+ trace_ocfs2_xattr_bucket_value_truncate(
+ (unsigned long long)bucket_blkno(bucket), xe_off, len);
ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
if (ret) {
mlog_errno(ret);
@@ -5433,8 +5430,9 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
ocfs2_init_dealloc_ctxt(&dealloc);
- mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
- cpos, len, (unsigned long long)blkno);
+ trace_ocfs2_rm_xattr_cluster(
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)blkno, cpos, len);
ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
len);
@@ -5538,7 +5536,7 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
int ret;
struct ocfs2_xa_loc loc;
- mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
+ trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
xs->not_found ? NULL : xs->here);
@@ -5570,7 +5568,6 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
out:
- mlog_exit(ret);
return ret;
}
@@ -5581,7 +5578,7 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
{
int ret;
- mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
+ trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
if (!ret)
@@ -5637,7 +5634,6 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
mlog_errno(ret);
out:
- mlog_exit(ret);
return ret;
}
@@ -6041,9 +6037,9 @@ static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
p = &refcount;
- mlog(0, "refcount bucket %llu, count = %u\n",
- (unsigned long long)bucket_blkno(bucket),
- le16_to_cpu(xh->xh_count));
+ trace_ocfs2_xattr_bucket_value_refcount(
+ (unsigned long long)bucket_blkno(bucket),
+ le16_to_cpu(xh->xh_count));
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
@@ -6339,8 +6335,8 @@ static int ocfs2_reflink_xattr_header(handle_t *handle,
u32 clusters, cpos, p_cluster, num_clusters;
unsigned int ext_flags = 0;
- mlog(0, "reflink xattr in container %llu, count = %u\n",
- (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count));
+ trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
+ le16_to_cpu(xh->xh_count));
last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
@@ -6540,8 +6536,8 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
goto out;
}
- mlog(0, "create new xattr block for inode %llu, index = %d\n",
- (unsigned long long)fe_bh->b_blocknr, indexed);
+ trace_ocfs2_create_empty_xattr_block(
+ (unsigned long long)fe_bh->b_blocknr, indexed);
ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
ret_bh);
if (ret)
@@ -6952,8 +6948,8 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
if (ret)
mlog_errno(ret);
- mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
- (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+ trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
+ num_clusters, reflink_cpos);
len -= num_clusters;
blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
@@ -6982,8 +6978,7 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_extent_tree et;
- mlog(0, "reflink xattr buckets %llu len %u\n",
- (unsigned long long)blkno, len);
+ trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
ocfs2_init_xattr_tree_extent_tree(&et,
INODE_CACHE(args->reflink->new_inode),
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 8a6d34fa668a..d738a7e493dd 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
.readpages = omfs_readpages,
.writepage = omfs_writepage,
.writepages = omfs_writepages,
- .sync_page = block_sync_page,
.write_begin = omfs_write_begin,
.write_end = generic_write_end,
.bmap = omfs_bmap,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 9c21119512b9..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -290,7 +290,8 @@ ssize_t part_inflight_show(struct device *dev,
{
struct hd_struct *p = dev_to_part(dev);
- return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
+ return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
+ atomic_read(&p->in_flight[1]));
}
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -499,7 +500,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
- /* suppress uevent if the disk supresses it */
+ /* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
@@ -584,7 +585,7 @@ rescan:
/*
* If any partition code tried to read beyond EOD, try
* unlocking native capacity even if partition table is
- * sucessfully read as we could be missing some partitions.
+ * successfully read as we could be missing some partitions.
*/
if (state->access_beyond_eod) {
printk(KERN_WARNING
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7c99c1cf7e5c..5e4f776b0917 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
vsize,
mm ? get_mm_rss(mm) : 0,
rsslim,
- mm ? mm->start_code : 0,
- mm ? mm->end_code : 0,
+ mm ? (permitted ? mm->start_code : 1) : 0,
+ mm ? (permitted ? mm->end_code : 1) : 0,
(permitted && mm) ? mm->start_stack : 0,
esp,
eip,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d49c4b5d2c3e..dd6628d3ba42 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -191,17 +191,20 @@ static int proc_root_link(struct inode *inode, struct path *path)
return result;
}
-/*
- * Return zero if current may access user memory in @task, -error if not.
- */
-static int check_mem_permission(struct task_struct *task)
+static struct mm_struct *__check_mem_permission(struct task_struct *task)
{
+ struct mm_struct *mm;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ return ERR_PTR(-EINVAL);
+
/*
* A task can always look at itself, in case it chooses
* to use system calls instead of load instructions.
*/
if (task == current)
- return 0;
+ return mm;
/*
* If current is actively ptrace'ing, and would also be
@@ -213,27 +216,53 @@ static int check_mem_permission(struct task_struct *task)
match = (tracehook_tracer_task(task) == current);
rcu_read_unlock();
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
- return 0;
+ return mm;
}
/*
- * Noone else is allowed.
+ * No one else is allowed.
*/
- return -EPERM;
+ mmput(mm);
+ return ERR_PTR(-EPERM);
+}
+
+/*
+ * If current may access user memory in @task return a reference to the
+ * corresponding mm, otherwise ERR_PTR.
+ */
+static struct mm_struct *check_mem_permission(struct task_struct *task)
+{
+ struct mm_struct *mm;
+ int err;
+
+ /*
+ * Avoid racing if task exec's as we might get a new mm but validate
+ * against old credentials.
+ */
+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ mm = __check_mem_permission(task);
+ mutex_unlock(&task->signal->cred_guard_mutex);
+
+ return mm;
}
struct mm_struct *mm_for_maps(struct task_struct *task)
{
struct mm_struct *mm;
+ int err;
- if (mutex_lock_killable(&task->signal->cred_guard_mutex))
- return NULL;
+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, PTRACE_MODE_READ)) {
mmput(mm);
- mm = NULL;
+ mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
@@ -279,9 +308,9 @@ out:
static int proc_pid_auxv(struct task_struct *task, char *buffer)
{
- int res = 0;
- struct mm_struct *mm = get_task_mm(task);
- if (mm) {
+ struct mm_struct *mm = mm_for_maps(task);
+ int res = PTR_ERR(mm);
+ if (mm && !IS_ERR(mm)) {
unsigned int nwords = 0;
do {
nwords += 2;
@@ -318,6 +347,23 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
}
#endif /* CONFIG_KALLSYMS */
+static int lock_trace(struct task_struct *task)
+{
+ int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ if (err)
+ return err;
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static void unlock_trace(struct task_struct *task)
+{
+ mutex_unlock(&task->signal->cred_guard_mutex);
+}
+
#ifdef CONFIG_STACKTRACE
#define MAX_STACK_TRACE_DEPTH 64
@@ -327,6 +373,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
{
struct stack_trace trace;
unsigned long *entries;
+ int err;
int i;
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
@@ -337,15 +384,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
trace.max_entries = MAX_STACK_TRACE_DEPTH;
trace.entries = entries;
trace.skip = 0;
- save_stack_trace_tsk(task, &trace);
- for (i = 0; i < trace.nr_entries; i++) {
- seq_printf(m, "[<%p>] %pS\n",
- (void *)entries[i], (void *)entries[i]);
+ err = lock_trace(task);
+ if (!err) {
+ save_stack_trace_tsk(task, &trace);
+
+ for (i = 0; i < trace.nr_entries; i++) {
+ seq_printf(m, "[<%pK>] %pS\n",
+ (void *)entries[i], (void *)entries[i]);
+ }
+ unlock_trace(task);
}
kfree(entries);
- return 0;
+ return err;
}
#endif
@@ -508,18 +560,22 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
long nr;
unsigned long args[6], sp, pc;
+ int res = lock_trace(task);
+ if (res)
+ return res;
if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
- return sprintf(buffer, "running\n");
-
- if (nr < 0)
- return sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
-
- return sprintf(buffer,
+ res = sprintf(buffer, "running\n");
+ else if (nr < 0)
+ res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+ else
+ res = sprintf(buffer,
"%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
nr,
args[0], args[1], args[2], args[3], args[4], args[5],
sp, pc);
+ unlock_trace(task);
+ return res;
}
#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
@@ -775,18 +831,14 @@ static ssize_t mem_read(struct file * file, char __user * buf,
if (!task)
goto out_no_task;
- if (check_mem_permission(task))
- goto out;
-
ret = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
- ret = 0;
-
- mm = get_task_mm(task);
- if (!mm)
+ mm = check_mem_permission(task);
+ ret = PTR_ERR(mm);
+ if (IS_ERR(mm))
goto out_free;
ret = -EIO;
@@ -800,8 +852,8 @@ static ssize_t mem_read(struct file * file, char __user * buf,
int this_len, retval;
this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- retval = access_process_vm(task, src, page, this_len, 0);
- if (!retval || check_mem_permission(task)) {
+ retval = access_remote_vm(mm, src, page, this_len, 0);
+ if (!retval) {
if (!ret)
ret = -EIO;
break;
@@ -829,10 +881,6 @@ out_no_task:
return ret;
}
-#define mem_write NULL
-
-#ifndef mem_write
-/* This is a security hazard */
static ssize_t mem_write(struct file * file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -840,18 +888,25 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
char *page;
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
unsigned long dst = *ppos;
+ struct mm_struct *mm;
copied = -ESRCH;
if (!task)
goto out_no_task;
- if (check_mem_permission(task))
- goto out;
+ mm = check_mem_permission(task);
+ copied = PTR_ERR(mm);
+ if (IS_ERR(mm))
+ goto out_task;
+
+ copied = -EIO;
+ if (file->private_data != (void *)((long)current->self_exec_id))
+ goto out_mm;
copied = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
- goto out;
+ goto out_mm;
copied = 0;
while (count > 0) {
@@ -862,7 +917,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
copied = -EFAULT;
break;
}
- retval = access_process_vm(task, dst, page, this_len, 1);
+ retval = access_remote_vm(mm, dst, page, this_len, 1);
if (!retval) {
if (!copied)
copied = -EIO;
@@ -875,12 +930,13 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
}
*ppos = dst;
free_page((unsigned long) page);
-out:
+out_mm:
+ mmput(mm);
+out_task:
put_task_struct(task);
out_no_task:
return copied;
}
-#endif
loff_t mem_lseek(struct file *file, loff_t offset, int orig)
{
@@ -917,20 +973,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
if (!task)
goto out_no_task;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
ret = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
- ret = 0;
- mm = get_task_mm(task);
- if (!mm)
+ mm = mm_for_maps(task);
+ ret = PTR_ERR(mm);
+ if (!mm || IS_ERR(mm))
goto out_free;
+ ret = 0;
while (count > 0) {
int this_len, retval, max_len;
@@ -2748,8 +2802,12 @@ static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- seq_printf(m, "%08x\n", task->personality);
- return 0;
+ int err = lock_trace(task);
+ if (!err) {
+ seq_printf(m, "%08x\n", task->personality);
+ unlock_trace(task);
+ }
+ return err;
}
/*
@@ -2768,7 +2826,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
- ONE("personality", S_IRUSR, proc_pid_personality),
+ ONE("personality", S_IRUGO, proc_pid_personality),
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
@@ -2778,7 +2836,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
- INF("syscall", S_IRUSR, proc_pid_syscall),
+ INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
ONE("stat", S_IRUGO, proc_tgid_stat),
@@ -2797,7 +2855,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_smaps_operations),
- REG("pagemap", S_IRUSR, proc_pagemap_operations),
+ REG("pagemap", S_IRUGO, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -2806,7 +2864,7 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("wchan", S_IRUGO, proc_pid_wchan),
#endif
#ifdef CONFIG_STACKTRACE
- ONE("stack", S_IRUSR, proc_pid_stack),
+ ONE("stack", S_IRUGO, proc_pid_stack),
#endif
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3108,14 +3166,14 @@ static const struct pid_entry tid_base_stuff[] = {
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
- ONE("personality", S_IRUSR, proc_pid_personality),
+ ONE("personality", S_IRUGO, proc_pid_personality),
INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
- INF("syscall", S_IRUSR, proc_pid_syscall),
+ INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
ONE("stat", S_IRUGO, proc_tid_stat),
@@ -3133,7 +3191,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_smaps_operations),
- REG("pagemap", S_IRUSR, proc_pagemap_operations),
+ REG("pagemap", S_IRUGO, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -3142,7 +3200,7 @@ static const struct pid_entry tid_base_stuff[] = {
INF("wchan", S_IRUGO, proc_pid_wchan),
#endif
#ifdef CONFIG_STACKTRACE
- ONE("stack", S_IRUSR, proc_pid_stack),
+ ONE("stack", S_IRUGO, proc_pid_stack),
#endif
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3161,7 +3219,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
- REG("sessionid", S_IRUSR, proc_sessionid_operations),
+ REG("sessionid", S_IRUGO, proc_sessionid_operations),
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 01e07f2a188f..f1281339b6fa 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -28,7 +28,7 @@
DEFINE_SPINLOCK(proc_subdir_lock);
-static int proc_match(int len, const char *name, struct proc_dir_entry *de)
+static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
{
if (de->namelen != len)
return 0;
@@ -303,7 +303,7 @@ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
{
const char *cp = name, *next;
struct proc_dir_entry *de;
- int len;
+ unsigned int len;
de = *ret;
if (!de)
@@ -602,7 +602,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
- int len;
+ unsigned int len;
/* make sure name is valid */
if (!name || !strlen(name)) goto out;
@@ -786,7 +786,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
struct proc_dir_entry **p;
struct proc_dir_entry *de = NULL;
const char *fn = name;
- int len;
+ unsigned int len;
spin_lock(&proc_subdir_lock);
if (__xlate_proc_name(name, &parent, &fn) != 0) {
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d6a7ca1fdac5..d15aa1b1cc8f 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -46,8 +46,6 @@ static void proc_evict_inode(struct inode *inode)
}
}
-struct vfsmount *proc_mnt;
-
static struct kmem_cache * proc_inode_cachep;
static struct inode *proc_alloc_inode(struct super_block *sb)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9ad561ded409..c03e8d3a3a5b 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -107,7 +107,6 @@ static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
}
void pde_put(struct proc_dir_entry *pde);
-extern struct vfsmount *proc_mnt;
int proc_fill_super(struct super_block *);
struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ef9fa8e24ad6..a9000e9cfee5 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -43,17 +43,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
struct pid_namespace *ns;
struct proc_inode *ei;
- if (proc_mnt) {
- /* Seed the root directory with a pid so it doesn't need
- * to be special in base.c. I would do this earlier but
- * the only task alive when /proc is mounted the first time
- * is the init_task and it doesn't have any pids.
- */
- ei = PROC_I(proc_mnt->mnt_sb->s_root->d_inode);
- if (!ei->pid)
- ei->pid = find_get_pid(1);
- }
-
if (flags & MS_KERNMOUNT)
ns = (struct pid_namespace *)data;
else
@@ -71,16 +60,16 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
return ERR_PTR(err);
}
- ei = PROC_I(sb->s_root->d_inode);
- if (!ei->pid) {
- rcu_read_lock();
- ei->pid = get_pid(find_pid_ns(1, ns));
- rcu_read_unlock();
- }
-
sb->s_flags |= MS_ACTIVE;
}
+ ei = PROC_I(sb->s_root->d_inode);
+ if (!ei->pid) {
+ rcu_read_lock();
+ ei->pid = get_pid(find_pid_ns(1, ns));
+ rcu_read_unlock();
+ }
+
return dget(sb->s_root);
}
@@ -101,19 +90,20 @@ static struct file_system_type proc_fs_type = {
void __init proc_root_init(void)
{
+ struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
- proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
- if (IS_ERR(proc_mnt)) {
+ mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
+ if (IS_ERR(mnt)) {
unregister_filesystem(&proc_fs_type);
return;
}
- init_pid_ns.proc_mnt = proc_mnt;
+ init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 60b914860f81..2e7addfd9803 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
@@ -7,6 +8,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
+#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -119,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
- return NULL;
+ return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
- if (!mm)
- return NULL;
+ if (!mm || IS_ERR(mm))
+ return mm;
down_read(&mm->mmap_sem);
- tail_vma = get_gate_vma(priv->task);
+ tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
@@ -180,7 +182,8 @@ static void m_stop(struct seq_file *m, void *v)
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
- vma_stop(priv, vma);
+ if (!IS_ERR(vma))
+ vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
@@ -249,8 +252,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
- if (vma->vm_start <= mm->start_brk &&
- vma->vm_end >= mm->brk) {
+ if (vma->vm_start <= mm->brk &&
+ vma->vm_end >= mm->start_brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
@@ -277,7 +280,8 @@ static int show_map(struct seq_file *m, void *v)
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
+ m->version = (vma != get_gate_vma(task->mm))
+ ? vma->vm_start : 0;
return 0;
}
@@ -329,58 +333,86 @@ struct mem_size_stats {
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
+ unsigned long anonymous_thp;
unsigned long swap;
u64 pss;
};
-static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+
+static void smaps_pte_entry(pte_t ptent, unsigned long addr,
+ unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
- pte_t *pte, ptent;
- spinlock_t *ptl;
struct page *page;
int mapcount;
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE) {
- ptent = *pte;
-
- if (is_swap_pte(ptent)) {
- mss->swap += PAGE_SIZE;
- continue;
- }
+ if (is_swap_pte(ptent)) {
+ mss->swap += ptent_size;
+ return;
+ }
- if (!pte_present(ptent))
- continue;
+ if (!pte_present(ptent))
+ return;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ return;
+
+ if (PageAnon(page))
+ mss->anonymous += ptent_size;
+
+ mss->resident += ptent_size;
+ /* Accumulate the size in pages that have been accessed. */
+ if (pte_young(ptent) || PageReferenced(page))
+ mss->referenced += ptent_size;
+ mapcount = page_mapcount(page);
+ if (mapcount >= 2) {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->shared_dirty += ptent_size;
+ else
+ mss->shared_clean += ptent_size;
+ mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
+ } else {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->private_dirty += ptent_size;
+ else
+ mss->private_clean += ptent_size;
+ mss->pss += (ptent_size << PSS_SHIFT);
+ }
+}
- page = vm_normal_page(vma, addr, ptent);
- if (!page)
- continue;
+static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = mss->vma;
+ pte_t *pte;
+ spinlock_t *ptl;
- if (PageAnon(page))
- mss->anonymous += PAGE_SIZE;
-
- mss->resident += PAGE_SIZE;
- /* Accumulate the size in pages that have been accessed. */
- if (pte_young(ptent) || PageReferenced(page))
- mss->referenced += PAGE_SIZE;
- mapcount = page_mapcount(page);
- if (mapcount >= 2) {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->shared_dirty += PAGE_SIZE;
- else
- mss->shared_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+ spin_lock(&walk->mm->page_table_lock);
+ if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_splitting(*pmd)) {
+ spin_unlock(&walk->mm->page_table_lock);
+ wait_split_huge_page(vma->anon_vma, pmd);
} else {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->private_dirty += PAGE_SIZE;
- else
- mss->private_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT);
+ smaps_pte_entry(*(pte_t *)pmd, addr,
+ HPAGE_PMD_SIZE, walk);
+ spin_unlock(&walk->mm->page_table_lock);
+ mss->anonymous_thp += HPAGE_PMD_SIZE;
+ return 0;
}
+ } else {
+ spin_unlock(&walk->mm->page_table_lock);
}
+ /*
+ * The mmap_sem held all the way back in m_start() is what
+ * keeps khugepaged out of here and from collapsing things
+ * in here.
+ */
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
@@ -416,6 +448,7 @@ static int show_smap(struct seq_file *m, void *v)
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
+ "AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
@@ -429,6 +462,7 @@ static int show_smap(struct seq_file *m, void *v)
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
+ mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
@@ -436,7 +470,8 @@ static int show_smap(struct seq_file *m, void *v)
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ m->version = (vma != get_gate_vma(task->mm))
+ ? vma->vm_start : 0;
return 0;
}
@@ -467,6 +502,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl;
struct page *page;
+ split_huge_page_pmd(walk->mm, pmd);
+
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
@@ -623,6 +660,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte;
int err = 0;
+ split_huge_page_pmd(walk->mm, pmd);
+
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
@@ -728,8 +767,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!task)
goto out;
- ret = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ mm = mm_for_maps(task);
+ ret = PTR_ERR(mm);
+ if (!mm || IS_ERR(mm))
goto out_task;
ret = -EINVAL;
@@ -742,10 +782,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!count)
goto out_task;
- mm = get_task_mm(task);
- if (!mm)
- goto out_task;
-
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index b535d3e5d5f1..980de547c070 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -199,13 +199,13 @@ static void *m_start(struct seq_file *m, loff_t *pos)
/* pin the task and mm whilst we play with them */
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
- return NULL;
+ return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
- if (!mm) {
+ if (!mm || IS_ERR(mm)) {
put_task_struct(priv->task);
priv->task = NULL;
- return NULL;
+ return mm;
}
down_read(&mm->mmap_sem);
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 867d0ac026ce..8007ae7c0d8c 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,5 +1,5 @@
config PSTORE
- bool "Persistant store support"
+ bool "Persistent store support"
default n
help
This option enables generic access to platform level
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index ce9ad84d5dd9..f835a25625ff 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -48,6 +48,10 @@ void pstore_set_kmsg_bytes(int bytes)
/* Tag each group of saved records with a sequence number */
static int oopscount;
+static char *reason_str[] = {
+ "Oops", "Panic", "Kexec", "Restart", "Halt", "Poweroff", "Emergency"
+};
+
/*
* callback from kmsg_dump. (s2,l2) has the most recently
* written bytes, older bytes are in (s1,l1). Save as much
@@ -61,15 +65,20 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
unsigned long size, total = 0;
- char *dst;
+ char *dst, *why;
u64 id;
int hsize, part = 1;
+ if (reason < ARRAY_SIZE(reason_str))
+ why = reason_str[reason];
+ else
+ why = "Unknown";
+
mutex_lock(&psinfo->buf_mutex);
oopscount++;
while (total < kmsg_bytes) {
dst = psinfo->buf;
- hsize = sprintf(dst, "Oops#%d Part%d\n", oopscount, part++);
+ hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part++);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -86,7 +95,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
id = psinfo->write(PSTORE_TYPE_DMESG, hsize + l1_cpy + l2_cpy);
- if (pstore_is_mounted())
+ if (reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id,
psinfo->buf, hsize + l1_cpy + l2_cpy,
CURRENT_TIME, psinfo->erase);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index e63b4171d583..2b0646613f5a 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations qnx4_aops = {
.readpage = qnx4_readpage,
.writepage = qnx4_writepage,
- .sync_page = block_sync_page,
.write_begin = qnx4_write_begin,
.write_end = generic_write_end,
.bmap = qnx4_bmap
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index b59ee61f4b9a..d3c032f5fa0a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -76,7 +76,7 @@
#include <linux/buffer_head.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
-#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
+#include "../internal.h" /* ugh */
#include <asm/uaccess.h>
@@ -895,33 +895,38 @@ static void add_dquot_ref(struct super_block *sb, int type)
int reserved = 0;
#endif
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ !atomic_read(&inode->i_writecount) ||
+ !dqinit_needed(inode, type)) {
+ spin_unlock(&inode->i_lock);
continue;
+ }
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
- if (!atomic_read(&inode->i_writecount))
- continue;
- if (!dqinit_needed(inode, type))
- continue;
-
__iget(inode);
- spin_unlock(&inode_lock);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
__dquot_initialize(inode, type);
- /* We hold a reference to 'inode' so it couldn't have been
- * removed from s_inodes list while we dropped the inode_lock.
- * We cannot iput the inode now as we can be holding the last
- * reference and we cannot iput it under inode_lock. So we
- * keep the reference and iput it later. */
+
+ /*
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock We cannot iput the inode now as we can be
+ * holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
+ */
old_inode = inode;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
iput(old_inode);
#ifdef CONFIG_QUOTA_DEBUG
@@ -946,7 +951,7 @@ static inline int dqput_blocks(struct dquot *dquot)
/*
* Remove references to dquots from inode and add dquot to list for freeing
- * if we have the last referece to dquot
+ * if we have the last reference to dquot
* We can't race with anybody because we hold dqptr_sem for writing...
*/
static int remove_inode_dquot_ref(struct inode *inode, int type,
@@ -1002,7 +1007,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
struct inode *inode;
int reserved = 0;
- spin_lock(&inode_lock);
+ spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
/*
* We have to scan also I_NEW inodes because they can already
@@ -1016,7 +1021,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
remove_inode_dquot_ref(inode, type, tofree_head);
}
}
- spin_unlock(&inode_lock);
+ spin_unlock(&inode_sb_list_lock);
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened after quota"
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1bba24bad820..4fd5bb33dbb5 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3217,7 +3217,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
.readpages = reiserfs_readpages,
.releasepage = reiserfs_releasepage,
.invalidatepage = reiserfs_invalidatepage,
- .sync_page = block_sync_page,
.write_begin = reiserfs_write_begin,
.write_end = reiserfs_write_end,
.bmap = reiserfs_aop_bmap,
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 79265fdc317a..4e153051bc75 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -59,7 +59,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
break;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EPERM;
goto setflags_out;
}
@@ -103,7 +103,7 @@ setflags_out:
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
err = -EPERM;
break;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index c77514bd5776..c5e82ece7c6c 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,7 +1,7 @@
/*
** Write ahead logging implementation copyright Chris Mason 2000
**
-** The background commits make this code very interelated, and
+** The background commits make this code very interrelated, and
** overly complex. I need to rethink things a bit....The major players:
**
** journal_begin -- call with the number of blocks you expect to log.
@@ -2725,7 +2725,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
REISERFS_DISK_OFFSET_IN_BYTES /
sb->s_blocksize + 2);
- /* Sanity check to see is the standard journal fitting withing first bitmap
+ /* Sanity check to see is the standard journal fitting within first bitmap
(actual for small blocksizes) */
if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
(SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index b87aa2c1afc1..7df1ce48203a 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -15,7 +15,7 @@
* for this mutex, no need for a system wide mutex facility.
*
* Also this lock is often released before a call that could block because
- * reiserfs performances were partialy based on the release while schedule()
+ * reiserfs performances were partially based on the release while schedule()
* property of the Bkl.
*/
void reiserfs_write_lock(struct super_block *s)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 0aab04f46827..b216ff6be1c9 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -393,7 +393,7 @@ void add_save_link(struct reiserfs_transaction_handle *th,
/* body of "save" link */
link = INODE_PKEY(inode)->k_dir_id;
- /* put "save" link inot tree, don't charge quota to anyone */
+ /* put "save" link into tree, don't charge quota to anyone */
retval =
reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
if (retval) {
@@ -2104,7 +2104,7 @@ out:
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and noone else should touch the files)
+ * itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5c11ca82b782..47d2a4498b03 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -396,7 +396,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
struct address_space *mapping = dir->i_mapping;
struct page *page;
/* We can deadlock if we try to free dentries,
- and an unlink/rmdir has just occured - GFP_NOFS avoids this */
+ and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
mapping_set_gfp_mask(mapping, GFP_NOFS);
page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
if (!IS_ERR(page)) {
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 90d2fcb67a31..3dc38f1206fc 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -26,7 +26,7 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
size_t jcreate_blocks;
if (!reiserfs_posixacl(inode->i_sb))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index aa68a8a31518..efc309fa3035 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -5,12 +5,12 @@ config SQUASHFS
help
Saying Y here includes support for SquashFS 4.0 (a Compressed
Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib/lzo compression to compress both
- files, inodes and directories. Inodes in the system are very small
- and all blocks are packed to minimise data overhead. Block sizes
- greater than 4K are supported up to a maximum of 1 Mbytes (default
- block size 128K). SquashFS 4.0 supports 64 bit filesystems and files
- (larger than 4GB), full uid/gid information, hard links and
+ filesystem for Linux. It uses zlib, lzo or xz compression to
+ compress both files, inodes and directories. Inodes in the system
+ are very small and all blocks are packed to minimise data overhead.
+ Block sizes greater than 4K are supported up to a maximum of 1 Mbytes
+ (default block size 128K). SquashFS 4.0 supports 64 bit filesystems
+ and files (larger than 4GB), full uid/gid information, hard links and
timestamps.
Squashfs is intended for general read-only filesystem use, for
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 26b15ae34d6f..c37b520132ff 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -104,7 +104,7 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
entry = &cache->entry[i];
/*
- * Initialise choosen cache entry, and fill it in from
+ * Initialise chosen cache entry, and fill it in from
* disk.
*/
cache->unused--;
@@ -286,7 +286,7 @@ cleanup:
/*
- * Copy upto length bytes from cache entry to buffer starting at offset bytes
+ * Copy up to length bytes from cache entry to buffer starting at offset bytes
* into the cache entry. If there's not length bytes then copy the number of
* bytes available. In all cases return the number of bytes copied.
*/
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index a5940e54c4dd..e921bd213738 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/buffer_head.h>
#include "squashfs_fs.h"
@@ -74,3 +75,36 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
return decompressor[i];
}
+
+
+void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *strm, *buffer = NULL;
+ int length = 0;
+
+ /*
+ * Read decompressor specific options from file system if present
+ */
+ if (SQUASHFS_COMP_OPTS(flags)) {
+ buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ length = squashfs_read_data(sb, &buffer,
+ sizeof(struct squashfs_super_block), 0, NULL,
+ PAGE_CACHE_SIZE, 1);
+
+ if (length < 0) {
+ strm = ERR_PTR(length);
+ goto finished;
+ }
+ }
+
+ strm = msblk->decompressor->init(msblk, buffer, length);
+
+finished:
+ kfree(buffer);
+
+ return strm;
+}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 3b305a70f7aa..099745ad5691 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,7 +24,7 @@
*/
struct squashfs_decompressor {
- void *(*init)(struct squashfs_sb_info *);
+ void *(*init)(struct squashfs_sb_info *, void *, int);
void (*free)(void *);
int (*decompress)(struct squashfs_sb_info *, void **,
struct buffer_head **, int, int, int, int, int);
@@ -33,11 +33,6 @@ struct squashfs_decompressor {
int supported;
};
-static inline void *squashfs_decompressor_init(struct squashfs_sb_info *msblk)
-{
- return msblk->decompressor->init(msblk);
-}
-
static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
void *s)
{
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 0dc340aa2be9..3f79cd1d0c19 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
+
+ /* dir_count should never be larger than 256 */
+ if (dir_count > 256)
+ goto failed_read;
+
while (dir_count--) {
/*
* Read directory entry.
@@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
size = le16_to_cpu(dire->size) + 1;
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto failed_read;
+
err = squashfs_read_metadata(inode->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 7da759e34c52..00f4dfc5f088 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -37,7 +37,7 @@ struct squashfs_lzo {
void *output;
};
-static void *lzo_init(struct squashfs_sb_info *msblk)
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
@@ -58,7 +58,7 @@ failed2:
failed:
ERROR("Failed to allocate lzo workspace\n");
kfree(stream);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 7a9464d08cf6..5d922a6701ab 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
length += sizeof(dirh);
dir_count = le32_to_cpu(dirh.count) + 1;
+
+ /* dir_count should never be larger than 256 */
+ if (dir_count > 256)
+ goto data_error;
+
while (dir_count--) {
/*
* Read directory entry.
@@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
size = le16_to_cpu(dire->size) + 1;
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto data_error;
+
err = squashfs_read_metadata(dir->i_sb, dire->name,
&block, &offset, size);
if (err < 0)
@@ -228,6 +237,9 @@ exit_lookup:
d_add(dentry, inode);
return ERR_PTR(0);
+data_error:
+ err = -EIO;
+
read_failure:
ERROR("Unable to read directory block [%llx:%x]\n",
squashfs_i(dir)->start + msblk->directory_table,
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index ba729d808876..1f2e608b8785 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -48,6 +48,7 @@ extern int squashfs_read_table(struct super_block *, void *, u64, int);
/* decompressor.c */
extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
+extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
/* export.c */
extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 39533feffd6d..4582c568ef4d 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -57,6 +57,7 @@
#define SQUASHFS_ALWAYS_FRAG 5
#define SQUASHFS_DUPLICATE 6
#define SQUASHFS_EXPORT 7
+#define SQUASHFS_COMP_OPT 10
#define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1)
@@ -81,6 +82,9 @@
#define SQUASHFS_EXPORTABLE(flags) SQUASHFS_BIT(flags, \
SQUASHFS_EXPORT)
+#define SQUASHFS_COMP_OPTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_COMP_OPT)
+
/* Max number of types and file types */
#define SQUASHFS_DIR_TYPE 1
#define SQUASHFS_REG_TYPE 2
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 20700b9f2b4c..5c8184c061a4 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -199,10 +199,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
- msblk->stream = squashfs_decompressor_init(msblk);
- if (msblk->stream == NULL)
- goto failed_mount;
-
msblk->block_cache = squashfs_cache_init("metadata",
SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
if (msblk->block_cache == NULL)
@@ -215,6 +211,13 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ msblk->stream = squashfs_decompressor_init(sb, flags);
+ if (IS_ERR(msblk->stream)) {
+ err = PTR_ERR(msblk->stream);
+ msblk->stream = NULL;
+ goto failed_mount;
+ }
+
/* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb,
le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
@@ -370,8 +373,8 @@ static void squashfs_put_super(struct super_block *sb)
}
-static struct dentry *squashfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static struct dentry *squashfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, squashfs_fill_super);
}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c4eb40018256..aa47a286d1f8 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -26,10 +26,10 @@
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/xz.h>
+#include <linux/bitops.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "decompressor.h"
@@ -38,24 +38,57 @@ struct squashfs_xz {
struct xz_buf buf;
};
-static void *squashfs_xz_init(struct squashfs_sb_info *msblk)
+struct comp_opts {
+ __le32 dictionary_size;
+ __le32 flags;
+};
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
+ int len)
{
- int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+ struct comp_opts *comp_opts = buff;
+ struct squashfs_xz *stream;
+ int dict_size = msblk->block_size;
+ int err, n;
+
+ if (comp_opts) {
+ /* check compressor options are the expected length */
+ if (len < sizeof(*comp_opts)) {
+ err = -EIO;
+ goto failed;
+ }
- struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
- if (stream == NULL)
+ dict_size = le32_to_cpu(comp_opts->dictionary_size);
+
+ /* the dictionary size should be 2^n or 2^n+2^(n+1) */
+ n = ffs(dict_size) - 1;
+ if (dict_size != (1 << n) && dict_size != (1 << n) +
+ (1 << (n + 1))) {
+ err = -EIO;
+ goto failed;
+ }
+ }
+
+ dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL) {
+ err = -ENOMEM;
goto failed;
+ }
- stream->state = xz_dec_init(XZ_PREALLOC, block_size);
- if (stream->state == NULL)
+ stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
+ if (stream->state == NULL) {
+ kfree(stream);
+ err = -ENOMEM;
goto failed;
+ }
return stream;
failed:
- ERROR("Failed to allocate xz workspace\n");
- kfree(stream);
- return NULL;
+ ERROR("Failed to initialise xz decompressor\n");
+ return ERR_PTR(err);
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 4661ae2b1cec..517688b32ffa 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -26,19 +26,19 @@
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/zlib.h>
+#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
-static void *zlib_init(struct squashfs_sb_info *dummy)
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
{
z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
- stream->workspace = kmalloc(zlib_inflate_workspacesize(),
- GFP_KERNEL);
+ stream->workspace = vmalloc(zlib_inflate_workspacesize());
if (stream->workspace == NULL)
goto failed;
@@ -47,7 +47,7 @@ static void *zlib_init(struct squashfs_sb_info *dummy)
failed:
ERROR("Failed to allocate zlib workspace\n");
kfree(stream);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
@@ -56,7 +56,7 @@ static void zlib_free(void *strm)
z_stream *stream = strm;
if (stream)
- kfree(stream->workspace);
+ vfree(stream->workspace);
kfree(stream);
}
diff --git a/fs/super.c b/fs/super.c
index e84864908264..8a06881b1920 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
#else
INIT_LIST_HEAD(&s->s_files);
#endif
+ s->s_bdi = &default_backing_dev_info;
INIT_LIST_HEAD(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
@@ -936,6 +937,7 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
+ WARN_ON(sb->s_bdi == &default_backing_dev_info);
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sync.c b/fs/sync.c
index 92ca208777d5..c38ec163da6c 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -34,7 +34,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
- if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
+ if (sb->s_bdi == &noop_backing_dev_info)
return 0;
if (sb->s_qcop && sb->s_qcop->quota_sync)
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem);
static void sync_one_sb(struct super_block *sb, void *arg)
{
- if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
+ if (!(sb->s_flags & MS_RDONLY))
__sync_filesystem(sb, *(int *)arg);
}
/*
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 9ca66276315e..fa8d43c92bb8 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations sysv_aops = {
.readpage = sysv_readpage,
.writepage = sysv_writepage,
- .sync_page = block_sync_page,
.write_begin = sysv_write_begin,
.write_end = generic_write_end,
.bmap = sysv_bmap
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 1d1859dc3de5..f8b0160da2da 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -47,7 +47,7 @@ config UBIFS_FS_DEBUG
bool "Enable debugging support"
depends on UBIFS_FS
select DEBUG_FS
- select KALLSYMS_ALL
+ select KALLSYMS
help
This option enables UBIFS debugging support. It makes sure various
assertions, self-checks, debugging messages and test modes are compiled
@@ -58,12 +58,3 @@ config UBIFS_FS_DEBUG
down UBIFS. You can then further enable / disable individual debugging
features using UBIFS module parameters and the corresponding sysfs
interfaces.
-
-config UBIFS_FS_DEBUG_CHKS
- bool "Enable extra checks"
- depends on UBIFS_FS_DEBUG
- help
- If extra checks are enabled UBIFS will check the consistency of its
- internal data structures during operation. However, UBIFS performance
- is dramatically slower when this option is selected especially if the
- file system is large.
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index c8ff0d1ae5d3..8b3a7da531eb 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -147,7 +147,7 @@ static int make_free_space(struct ubifs_info *c)
if (liab2 < liab1)
return -EAGAIN;
- dbg_budg("new liability %lld (not shrinked)", liab2);
+ dbg_budg("new liability %lld (not shrunk)", liab2);
/* Liability did not shrink again, try GC */
dbg_budg("Run GC");
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index b148fbc80f8d..1bd01ded7123 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -577,7 +577,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
size_t sz;
if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
- goto out;
+ return 0;
INIT_LIST_HEAD(&list);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 01c2b028e525..004d3745dc45 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -818,7 +818,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
current->pid, lnum);
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
return;
@@ -972,11 +972,39 @@ void dbg_dump_index(struct ubifs_info *c)
void dbg_save_space_info(struct ubifs_info *c)
{
struct ubifs_debug_info *d = c->dbg;
-
- ubifs_get_lp_stats(c, &d->saved_lst);
+ int freeable_cnt;
spin_lock(&c->space_lock);
+ memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
+
+ /*
+ * We use a dirty hack here and zero out @c->freeable_cnt, because it
+ * affects the free space calculations, and UBIFS might not know about
+ * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
+ * only when we read their lprops, and we do this only lazily, upon the
+ * need. So at any given point of time @c->freeable_cnt might be not
+ * exactly accurate.
+ *
+ * Just one example about the issue we hit when we did not zero
+ * @c->freeable_cnt.
+ * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
+ * amount of free space in @d->saved_free
+ * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
+ * information from flash, where we cache LEBs from various
+ * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
+ * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
+ * -> 'ubifs_get_pnode()' -> 'update_cats()'
+ * -> 'ubifs_add_to_cat()').
+ * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
+ * becomes %1.
+ * 4. We calculate the amount of free space when the re-mount is
+ * finished in 'dbg_check_space_info()' and it does not match
+ * @d->saved_free.
+ */
+ freeable_cnt = c->freeable_cnt;
+ c->freeable_cnt = 0;
d->saved_free = ubifs_get_free_space_nolock(c);
+ c->freeable_cnt = freeable_cnt;
spin_unlock(&c->space_lock);
}
@@ -993,12 +1021,15 @@ int dbg_check_space_info(struct ubifs_info *c)
{
struct ubifs_debug_info *d = c->dbg;
struct ubifs_lp_stats lst;
- long long avail, free;
+ long long free;
+ int freeable_cnt;
spin_lock(&c->space_lock);
- avail = ubifs_calc_available(c, c->min_idx_lebs);
+ freeable_cnt = c->freeable_cnt;
+ c->freeable_cnt = 0;
+ free = ubifs_get_free_space_nolock(c);
+ c->freeable_cnt = freeable_cnt;
spin_unlock(&c->space_lock);
- free = ubifs_get_free_space(c);
if (free != d->saved_free) {
ubifs_err("free space changed from %lld to %lld",
@@ -2806,40 +2837,38 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
struct ubifs_debug_info *d = c->dbg;
sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- if (IS_ERR(d->dfs_dir)) {
- err = PTR_ERR(d->dfs_dir);
- ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- d->dfs_dir_name, err);
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
goto out;
- }
+ d->dfs_dir = dent;
fname = "dump_lprops";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_lprops = dent;
fname = "dump_budg";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_budg = dent;
fname = "dump_tnc";
dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
- if (IS_ERR(dent))
+ if (IS_ERR_OR_NULL(dent))
goto out_remove;
d->dfs_dump_tnc = dent;
return 0;
out_remove:
- err = PTR_ERR(dent);
- ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- fname, err);
debugfs_remove_recursive(d->dfs_dir);
out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
+ fname, err);
return err;
}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index d77db7e36484..28be1e6a65e8 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -448,10 +448,12 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
/*
* We change whole page so no need to load it. But we
- * have to set the @PG_checked flag to make the further
- * code know that the page is new. This might be not
- * true, but it is better to budget more than to read
- * the page from the media.
+ * do not know whether this page exists on the media or
+ * not, so we assume the latter because it requires
+ * larger budget. The assumption is that it is better
+ * to budget a bit more than to read the page from the
+ * media. Thus, we are setting the @PG_checked flag
+ * here.
*/
SetPageChecked(page);
skipped_read = 1;
@@ -559,6 +561,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
dbg_gen("copied %d instead of %d, read page and repeat",
copied, len);
cancel_budget(c, page, ui, appending);
+ ClearPageChecked(page);
/*
* Return 0 to force VFS to repeat the whole operation, or the
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 8aacd64957a2..548acf494afd 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -160,7 +160,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_RDONLY(inode))
return -EROFS;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index c7b25e2f7764..0ee0847f2421 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1094,7 +1094,7 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to scan LEB %d", lnum);
goto out;
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 72775d35b99e..ef5155e109a2 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
lnum = branch->lnum;
offs = branch->offs;
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
- if (!pnode) {
- err = -ENOMEM;
- goto out;
- }
+ if (!pnode)
+ return -ENOMEM;
+
if (lnum == 0) {
/*
* This pnode was not written which just means that the LEB
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 0a3c2c3f5c4a..0c9c69bd983a 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -1633,7 +1633,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
return 0;
- buf = p = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory for ltab checking");
return 0;
@@ -1885,7 +1885,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
current->pid, lnum);
- buf = p = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to dump LPT");
return;
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 2cdbd31641d7..09df318e368f 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -898,7 +898,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
if (c->no_orphs)
return 0;
- buf = __vmalloc(c->leb_size, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL);
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to check orphans");
return 0;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index e5dc1e120e8d..c75f6133206c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1568,6 +1568,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
mutex_lock(&c->umount_mutex);
dbg_save_space_info(c);
c->remounting_rw = 1;
+ c->ro_mount = 0;
err = check_free_space(c);
if (err)
@@ -1676,13 +1677,13 @@ static int ubifs_remount_rw(struct ubifs_info *c)
}
dbg_gen("re-mounted read-write");
- c->ro_mount = 0;
c->remounting_rw = 0;
err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
return err;
out:
+ c->ro_mount = 1;
vfree(c->orph_buf);
c->orph_buf = NULL;
if (c->bgt) {
@@ -2011,7 +2012,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
*/
c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
- c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index c74400f88fe0..3299f469e712 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -56,6 +56,7 @@
*/
#include "ubifs.h"
+#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
@@ -80,7 +81,6 @@ enum {
};
static const struct inode_operations none_inode_operations;
-static const struct address_space_operations none_address_operations;
static const struct file_operations none_file_operations;
/**
@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
}
/* Re-define all operations to be "nothing" */
- inode->i_mapping->a_ops = &none_address_operations;
+ inode->i_mapping->a_ops = &empty_aops;
inode->i_op = &none_inode_operations;
inode->i_fop = &none_file_operations;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 8994dd041660..95518a9f589e 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -27,11 +27,10 @@
#include "udf_i.h"
#include "udf_sb.h"
-#define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
-#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
-#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
-#define udf_find_next_one_bit(addr, size, offset) \
- ext2_find_next_bit((unsigned long *)(addr), size, offset)
+#define udf_clear_bit __test_and_clear_bit_le
+#define udf_set_bit __test_and_set_bit_le
+#define udf_test_bit test_bit_le
+#define udf_find_next_one_bit find_next_bit_le
static int read_block_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap, unsigned int block,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index f391a2adc699..2a346bb1d9f5 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
- .sync_page = block_sync_page,
.write_begin = simple_write_begin,
.write_end = udf_adinicb_write_end,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ccc814321414..1d1358ed80c1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -140,7 +140,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.writepage = udf_writepage,
- .sync_page = block_sync_page,
.write_begin = udf_write_begin,
.write_end = generic_write_end,
.bmap = udf_bmap,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 03c255f12df5..e765743cf9f3 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -78,7 +78,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
/*
* Returns the location of the fragment from
- * the begining of the filesystem.
+ * the beginning of the filesystem.
*/
static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
@@ -552,7 +552,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations ufs_aops = {
.readpage = ufs_readpage,
.writepage = ufs_writepage,
- .sync_page = block_sync_page,
.write_begin = ufs_write_begin,
.write_end = generic_write_end,
.bmap = ufs_bmap
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 7693d6293404..3915ade6f9a8 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -483,9 +483,9 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
}
/*
- * Diffrent types of UFS hold fs_cstotal in different
- * places, and use diffrent data structure for it.
- * To make things simplier we just copy fs_cstotal to ufs_sb_private_info
+ * Different types of UFS hold fs_cstotal in different
+ * places, and use different data structure for it.
+ * To make things simpler we just copy fs_cstotal to ufs_sb_private_info
*/
static void ufs_setup_cstotal(struct super_block *sb)
{
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e56a4f567212..5f821dbc0579 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
break;
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
- blk_run_address_space(inode->i_mapping);
yield();
}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 9f8775ce381c..954175928240 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -408,7 +408,7 @@ static inline unsigned _ubh_find_next_zero_bit_(
for (;;) {
count = min_t(unsigned int, size + offset, uspi->s_bpf);
size -= count - offset;
- pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
+ pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
if (pos < count || !size)
break;
base++;
diff --git a/fs/utimes.c b/fs/utimes.c
index 179b58690657..ba653f3dc1bc 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -95,7 +95,7 @@ static int utimes_common(struct path *path, struct timespec *times)
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
- if (!is_owner_or_cap(inode)) {
+ if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
diff --git a/fs/xattr.c b/fs/xattr.c
index 01bb8135e14a..a19acdb81cd1 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -59,7 +59,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -EPERM;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (mask & MAY_WRITE) && !is_owner_or_cap(inode))
+ (mask & MAY_WRITE) && !inode_owner_or_capable(inode))
return -EPERM;
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 8c5c87277456..79ce38be15a1 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -413,8 +413,7 @@ xfs_submit_ioend_bio(
if (xfs_ioend_new_eof(ioend))
xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE, bio);
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
}
STATIC struct bio *
@@ -1296,7 +1295,7 @@ xfs_get_blocks_direct(
* If the private argument is non-NULL __xfs_get_blocks signals us that we
* need to issue a transaction to convert the range from unwritten to written
* extents. In case this is regular synchronous I/O we just call xfs_end_io
- * to do this and we are done. But in case this was a successfull AIO
+ * to do this and we are done. But in case this was a successful AIO
* request this handler is called from interrupt context, from which we
* can't start transactions. In that case offload the I/O completion to
* the workqueues we also use for buffered I/O completion.
@@ -1495,7 +1494,6 @@ const struct address_space_operations xfs_address_space_operations = {
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.write_begin = xfs_vm_write_begin,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5cb230f2cb4f..5ea402023ebd 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -94,75 +94,6 @@ xfs_buf_vmap_len(
}
/*
- * Page Region interfaces.
- *
- * For pages in filesystems where the blocksize is smaller than the
- * pagesize, we use the page->private field (long) to hold a bitmap
- * of uptodate regions within the page.
- *
- * Each such region is "bytes per page / bits per long" bytes long.
- *
- * NBPPR == number-of-bytes-per-page-region
- * BTOPR == bytes-to-page-region (rounded up)
- * BTOPRT == bytes-to-page-region-truncated (rounded down)
- */
-#if (BITS_PER_LONG == 32)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
-#elif (BITS_PER_LONG == 64)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
-#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
-#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
-
-STATIC unsigned long
-page_region_mask(
- size_t offset,
- size_t length)
-{
- unsigned long mask;
- int first, final;
-
- first = BTOPR(offset);
- final = BTOPRT(offset + length - 1);
- first = min(first, final);
-
- mask = ~0UL;
- mask <<= BITS_PER_LONG - (final - first);
- mask >>= BITS_PER_LONG - (final);
-
- ASSERT(offset + length <= PAGE_CACHE_SIZE);
- ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
-
- return mask;
-}
-
-STATIC void
-set_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- set_page_private(page,
- page_private(page) | page_region_mask(offset, length));
- if (page_private(page) == ~0UL)
- SetPageUptodate(page);
-}
-
-STATIC int
-test_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- unsigned long mask = page_region_mask(offset, length);
-
- return (mask && (page_private(page) & mask) == mask);
-}
-
-/*
* xfs_buf_lru_add - add a buffer to the LRU.
*
* The LRU takes a new reference to the buffer so that it will only be freed
@@ -189,7 +120,7 @@ xfs_buf_lru_add(
* The unlocked check is safe here because it only occurs when there are not
* b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
* to optimise the shrinker removing the buffer from the LRU and calling
- * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
* bt_lru_lock.
*/
STATIC void
@@ -332,7 +263,7 @@ xfs_buf_free(
ASSERT(list_empty(&bp->b_lru));
- if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
+ if (bp->b_flags & _XBF_PAGES) {
uint i;
if (xfs_buf_is_vmapped(bp))
@@ -342,25 +273,22 @@ xfs_buf_free(
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
- if (bp->b_flags & _XBF_PAGE_CACHE)
- ASSERT(!PagePrivate(page));
- page_cache_release(page);
+ __free_page(page);
}
- }
+ } else if (bp->b_flags & _XBF_KMEM)
+ kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_deallocate(bp);
}
/*
- * Finds all pages for buffer in question and builds it's page list.
+ * Allocates all the pages for buffer in question and builds it's page list.
*/
STATIC int
-_xfs_buf_lookup_pages(
+xfs_buf_allocate_memory(
xfs_buf_t *bp,
uint flags)
{
- struct address_space *mapping = bp->b_target->bt_mapping;
- size_t blocksize = bp->b_target->bt_bsize;
size_t size = bp->b_count_desired;
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
@@ -369,29 +297,55 @@ _xfs_buf_lookup_pages(
xfs_off_t end;
int error;
+ /*
+ * for buffers that are contained within a single page, just allocate
+ * the memory from the heap - there's no need for the complexity of
+ * page arrays to keep allocation down to order 0.
+ */
+ if (bp->b_buffer_length < PAGE_SIZE) {
+ bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
+ if (!bp->b_addr) {
+ /* low memory - use alloc_page loop instead */
+ goto use_alloc_page;
+ }
+
+ if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
+ PAGE_MASK) !=
+ ((unsigned long)bp->b_addr & PAGE_MASK)) {
+ /* b_addr spans two pages - use alloc_page instead */
+ kmem_free(bp->b_addr);
+ bp->b_addr = NULL;
+ goto use_alloc_page;
+ }
+ bp->b_offset = offset_in_page(bp->b_addr);
+ bp->b_pages = bp->b_page_array;
+ bp->b_pages[0] = virt_to_page(bp->b_addr);
+ bp->b_page_count = 1;
+ bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
+ return 0;
+ }
+
+use_alloc_page:
end = bp->b_file_offset + bp->b_buffer_length;
page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
-
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
- bp->b_flags |= _XBF_PAGE_CACHE;
offset = bp->b_offset;
- first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
+ first = bp->b_file_offset >> PAGE_SHIFT;
+ bp->b_flags |= _XBF_PAGES;
for (i = 0; i < bp->b_page_count; i++) {
struct page *page;
uint retries = 0;
-
- retry:
- page = find_or_create_page(mapping, first + i, gfp_mask);
+retry:
+ page = alloc_page(gfp_mask);
if (unlikely(page == NULL)) {
if (flags & XBF_READ_AHEAD) {
bp->b_page_count = i;
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- return -ENOMEM;
+ error = ENOMEM;
+ goto out_free_pages;
}
/*
@@ -412,52 +366,44 @@ _xfs_buf_lookup_pages(
XFS_STATS_INC(xb_page_found);
- nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
+ nbytes = min_t(size_t, size, PAGE_SIZE - offset);
size -= nbytes;
-
- ASSERT(!PagePrivate(page));
- if (!PageUptodate(page)) {
- page_count--;
- if (blocksize >= PAGE_CACHE_SIZE) {
- if (flags & XBF_READ)
- bp->b_flags |= _XBF_PAGE_LOCKED;
- } else if (!PagePrivate(page)) {
- if (test_page_region(page, offset, nbytes))
- page_count++;
- }
- }
-
bp->b_pages[i] = page;
offset = 0;
}
+ return 0;
- if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
-
- if (page_count == bp->b_page_count)
- bp->b_flags |= XBF_DONE;
-
+out_free_pages:
+ for (i = 0; i < bp->b_page_count; i++)
+ __free_page(bp->b_pages[i]);
return error;
}
/*
- * Map buffer into kernel address-space if nessecary.
+ * Map buffer into kernel address-space if necessary.
*/
STATIC int
_xfs_buf_map_pages(
xfs_buf_t *bp,
uint flags)
{
- /* A single page buffer is always mappable */
+ ASSERT(bp->b_flags & _XBF_PAGES);
if (bp->b_page_count == 1) {
+ /* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
- if (unlikely(bp->b_addr == NULL))
+ int retried = 0;
+
+ do {
+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+ -1, PAGE_KERNEL);
+ if (bp->b_addr)
+ break;
+ vm_unmap_aliases();
+ } while (retried++ <= 1);
+
+ if (!bp->b_addr)
return -ENOMEM;
bp->b_addr += bp->b_offset;
bp->b_flags |= XBF_MAPPED;
@@ -568,9 +514,14 @@ found:
}
}
+ /*
+ * if the buffer is stale, clear all the external state associated with
+ * it. We need to keep flags such as how we allocated the buffer memory
+ * intact here.
+ */
if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- bp->b_flags &= XBF_MAPPED;
+ bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -591,7 +542,7 @@ xfs_buf_get(
xfs_buf_flags_t flags)
{
xfs_buf_t *bp, *new_bp;
- int error = 0, i;
+ int error = 0;
new_bp = xfs_buf_allocate(flags);
if (unlikely(!new_bp))
@@ -599,7 +550,7 @@ xfs_buf_get(
bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
if (bp == new_bp) {
- error = _xfs_buf_lookup_pages(bp, flags);
+ error = xfs_buf_allocate_memory(bp, flags);
if (error)
goto no_buffer;
} else {
@@ -608,9 +559,6 @@ xfs_buf_get(
return NULL;
}
- for (i = 0; i < bp->b_page_count; i++)
- mark_page_accessed(bp->b_pages[i]);
-
if (!(bp->b_flags & XBF_MAPPED)) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
@@ -711,8 +659,7 @@ xfs_buf_readahead(
{
struct backing_dev_info *bdi;
- bdi = target->bt_mapping->backing_dev_info;
- if (bdi_read_congested(bdi))
+ if (bdi_read_congested(target->bt_bdi))
return;
xfs_buf_read(target, ioff, isize,
@@ -790,10 +737,10 @@ xfs_buf_associate_memory(
size_t buflen;
int page_count;
- pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
+ pageaddr = (unsigned long)mem & PAGE_MASK;
offset = (unsigned long)mem - pageaddr;
- buflen = PAGE_CACHE_ALIGN(len + offset);
- page_count = buflen >> PAGE_CACHE_SHIFT;
+ buflen = PAGE_ALIGN(len + offset);
+ page_count = buflen >> PAGE_SHIFT;
/* Free any previous set of page pointers */
if (bp->b_pages)
@@ -810,13 +757,12 @@ xfs_buf_associate_memory(
for (i = 0; i < bp->b_page_count; i++) {
bp->b_pages[i] = mem_to_page((void *)pageaddr);
- pageaddr += PAGE_CACHE_SIZE;
+ pageaddr += PAGE_SIZE;
}
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
bp->b_flags |= XBF_MAPPED;
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
return 0;
}
@@ -923,20 +869,7 @@ xfs_buf_rele(
/*
- * Mutual exclusion on buffers. Locking model:
- *
- * Buffers associated with inodes for which buffer locking
- * is not enabled are not protected by semaphores, and are
- * assumed to be exclusively owned by the caller. There is a
- * spinlock in the buffer, used by the caller when concurrent
- * access is possible.
- */
-
-/*
- * Locks a buffer object, if it is not already locked. Note that this in
- * no way locks the underlying pages, so it is only useful for
- * synchronizing concurrent use of buffer objects, not for synchronizing
- * independent access to the underlying pages.
+ * Lock a buffer object, if it is not already locked.
*
* If we come across a stale, pinned, locked buffer, we know that we are
* being asked to lock a buffer that has been reallocated. Because it is
@@ -970,10 +903,7 @@ xfs_buf_lock_value(
}
/*
- * Locks a buffer object.
- * Note that this in no way locks the underlying pages, so it is only
- * useful for synchronizing concurrent use of buffer objects, not for
- * synchronizing independent access to the underlying pages.
+ * Lock a buffer object.
*
* If we come across a stale, pinned, locked buffer, we know that we
* are being asked to lock a buffer that has been reallocated. Because
@@ -990,7 +920,7 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
+ blk_flush_plug(current);
down(&bp->b_sema);
XB_SET_OWNER(bp);
@@ -1034,9 +964,7 @@ xfs_buf_wait_unpin(
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp->b_pin_count) == 0)
break;
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- schedule();
+ io_schedule();
}
remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
@@ -1248,10 +1176,8 @@ _xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend(bp, schedule);
- }
}
STATIC void
@@ -1260,35 +1186,12 @@ xfs_buf_bio_end_io(
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
- unsigned int blocksize = bp->b_target->bt_bsize;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
- do {
- struct page *page = bvec->bv_page;
-
- ASSERT(!PagePrivate(page));
- if (unlikely(bp->b_error)) {
- if (bp->b_flags & XBF_READ)
- ClearPageUptodate(page);
- } else if (blocksize >= PAGE_CACHE_SIZE) {
- SetPageUptodate(page);
- } else if (!PagePrivate(page) &&
- (bp->b_flags & _XBF_PAGE_CACHE)) {
- set_page_region(page, bvec->bv_offset, bvec->bv_len);
- }
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
- if (bp->b_flags & _XBF_PAGE_LOCKED)
- unlock_page(page);
- } while (bvec >= bio->bi_io_vec);
-
_xfs_buf_ioend(bp, 1);
bio_put(bio);
}
@@ -1302,7 +1205,6 @@ _xfs_buf_ioapply(
int offset = bp->b_offset;
int size = bp->b_count_desired;
sector_t sector = bp->b_bn;
- unsigned int blocksize = bp->b_target->bt_bsize;
total_nr_pages = bp->b_page_count;
map_i = 0;
@@ -1323,29 +1225,6 @@ _xfs_buf_ioapply(
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
}
- /* Special code path for reading a sub page size buffer in --
- * we populate up the whole page, and hence the other metadata
- * in the same page. This optimization is only valid when the
- * filesystem block size is not smaller than the page size.
- */
- if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
- (XBF_READ|_XBF_PAGE_LOCKED)) &&
- (blocksize >= PAGE_CACHE_SIZE)) {
- bio = bio_alloc(GFP_NOIO, 1);
-
- bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector - (offset >> BBSHIFT);
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
-
- bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
- size = 0;
-
- atomic_inc(&bp->b_io_remaining);
-
- goto submit_io;
- }
next_chunk:
atomic_inc(&bp->b_io_remaining);
@@ -1359,8 +1238,9 @@ next_chunk:
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
+
for (; size && nr_pages; nr_pages--, map_i++) {
- int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
+ int rbytes, nbytes = PAGE_SIZE - offset;
if (nbytes > size)
nbytes = size;
@@ -1375,7 +1255,6 @@ next_chunk:
total_nr_pages--;
}
-submit_io:
if (likely(bio->bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
@@ -1385,18 +1264,7 @@ submit_io:
if (size)
goto next_chunk;
} else {
- /*
- * if we get here, no pages were added to the bio. However,
- * we can't just error out here - if the pages are locked then
- * we have to unlock them otherwise we can hang on a later
- * access to the page.
- */
xfs_buf_ioerror(bp, EIO);
- if (bp->b_flags & _XBF_PAGE_LOCKED) {
- int i;
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
bio_put(bio);
}
}
@@ -1442,7 +1310,7 @@ xfs_buf_iowait(
trace_xfs_buf_iowait(bp, _RET_IP_);
if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
+ blk_flush_plug(current);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1460,8 +1328,8 @@ xfs_buf_offset(
return XFS_BUF_PTR(bp) + offset;
offset += bp->b_offset;
- page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
- return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
+ page = bp->b_pages[offset >> PAGE_SHIFT];
+ return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
}
/*
@@ -1483,9 +1351,9 @@ xfs_buf_iomove(
page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
cpoff = xfs_buf_poff(boff + bp->b_offset);
csize = min_t(size_t,
- PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
+ PAGE_SIZE-cpoff, bp->b_count_desired-boff);
- ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
+ ASSERT(((csize + cpoff) <= PAGE_SIZE));
switch (mode) {
case XBRW_ZERO:
@@ -1598,7 +1466,6 @@ xfs_free_buftarg(
xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
- iput(btp->bt_mapping->host);
kthread_stop(btp->bt_task);
kmem_free(btp);
@@ -1622,15 +1489,6 @@ xfs_setsize_buftarg_flags(
return EINVAL;
}
- if (verbose &&
- (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
- printk(KERN_WARNING
- "XFS: %u byte sectors in use on device %s. "
- "This is suboptimal; %u or greater is ideal.\n",
- sectorsize, XFS_BUFTARG_NAME(btp),
- (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
- }
-
return 0;
}
@@ -1645,7 +1503,7 @@ xfs_setsize_buftarg_early(
struct block_device *bdev)
{
return xfs_setsize_buftarg_flags(btp,
- PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
+ PAGE_SIZE, bdev_logical_block_size(bdev), 0);
}
int
@@ -1658,41 +1516,6 @@ xfs_setsize_buftarg(
}
STATIC int
-xfs_mapping_buftarg(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
-{
- struct backing_dev_info *bdi;
- struct inode *inode;
- struct address_space *mapping;
- static const struct address_space_operations mapping_aops = {
- .sync_page = block_sync_page,
- .migratepage = fail_migrate_page,
- };
-
- inode = new_inode(bdev->bd_inode->i_sb);
- if (!inode) {
- printk(KERN_WARNING
- "XFS: Cannot allocate mapping inode for device %s\n",
- XFS_BUFTARG_NAME(btp));
- return ENOMEM;
- }
- inode->i_ino = get_next_ino();
- inode->i_mode = S_IFBLK;
- inode->i_bdev = bdev;
- inode->i_rdev = bdev->bd_dev;
- bdi = blk_get_backing_dev_info(bdev);
- if (!bdi)
- bdi = &default_backing_dev_info;
- mapping = &inode->i_data;
- mapping->a_ops = &mapping_aops;
- mapping->backing_dev_info = bdi;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- btp->bt_mapping = mapping;
- return 0;
-}
-
-STATIC int
xfs_alloc_delwrite_queue(
xfs_buftarg_t *btp,
const char *fsname)
@@ -1720,12 +1543,14 @@ xfs_alloc_buftarg(
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
+ btp->bt_bdi = blk_get_backing_dev_info(bdev);
+ if (!btp->bt_bdi)
+ goto error;
+
INIT_LIST_HEAD(&btp->bt_lru);
spin_lock_init(&btp->bt_lru_lock);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
- if (xfs_mapping_buftarg(btp, bdev))
- goto error;
if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
btp->bt_shrinker.shrink = xfs_buftarg_shrink;
@@ -1947,7 +1772,7 @@ xfsbufd(
count++;
}
if (count)
- blk_run_address_space(target->bt_mapping);
+ blk_flush_plug(current);
} while (!kthread_should_stop());
@@ -1995,7 +1820,7 @@ xfs_flush_buftarg(
if (wait) {
/* Expedite and wait for IO to complete. */
- blk_run_address_space(target->bt_mapping);
+ blk_flush_plug(current);
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index cbe65950e524..a9a1c4512645 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -61,30 +61,11 @@ typedef enum {
#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
/* flags used only internally */
-#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
+#define _XBF_KMEM (1 << 20)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
-/*
- * Special flag for supporting metadata blocks smaller than a FSB.
- *
- * In this case we can have multiple xfs_buf_t on a single page and
- * need to lock out concurrent xfs_buf_t readers as they only
- * serialise access to the buffer.
- *
- * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
- * between reads of the page. Hence we can have one thread read the
- * page and modify it, but then race with another thread that thinks
- * the page is not up-to-date and hence reads it again.
- *
- * The result is that the first modifcation to the page is lost.
- * This sort of AGF/AGI reading race can happen when unlinking inodes
- * that require truncation and results in the AGI unlinked list
- * modifications being lost.
- */
-#define _XBF_PAGE_LOCKED (1 << 22)
-
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
@@ -100,12 +81,10 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_LOCK, "LOCK" }, /* should never be set */\
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
- { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
- { _XBF_DELWRI_Q, "DELWRI_Q" }, \
- { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }
-
+ { _XBF_KMEM, "KMEM" }, \
+ { _XBF_DELWRI_Q, "DELWRI_Q" }
typedef enum {
XBT_FORCE_SLEEP = 0,
@@ -120,7 +99,7 @@ typedef struct xfs_bufhash {
typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
- struct address_space *bt_mapping;
+ struct backing_dev_info *bt_bdi;
struct xfs_mount *bt_mount;
unsigned int bt_bsize;
unsigned int bt_sshift;
@@ -139,17 +118,6 @@ typedef struct xfs_buftarg {
unsigned int bt_lru_nr;
} xfs_buftarg_t;
-/*
- * xfs_buf_t: Buffer structure for pagecache-based buffers
- *
- * This buffer structure is used by the pagecache buffer management routines
- * to refer to an assembly of pages forming a logical buffer.
- *
- * The buffer structure is used on a temporary basis only, and discarded when
- * released. The real data storage is recorded in the pagecache. Buffers are
- * hashed to the block device on which the file system resides.
- */
-
struct xfs_buf;
typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index a55c1b46b219..f4213ba1ff85 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -381,7 +381,7 @@ xfs_aio_write_isize_update(
/*
* If this was a direct or synchronous I/O that failed (such as ENOSPC) then
- * part of the I/O may have been written to disk before the error occured. In
+ * part of the I/O may have been written to disk before the error occurred. In
* this case the on-disk file size may have been adjusted beyond the in-memory
* file size and now needs to be truncated back.
*/
@@ -896,6 +896,7 @@ xfs_file_fallocate(
xfs_flock64_t bf;
xfs_inode_t *ip = XFS_I(inode);
int cmd = XFS_IOC_RESVSP;
+ int attr_flags = XFS_ATTR_NOLOCK;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@@ -918,7 +919,10 @@ xfs_file_fallocate(
goto out_unlock;
}
- error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
+ if (file->f_flags & O_DSYNC)
+ attr_flags |= XFS_ATTR_SYNC;
+
+ error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
if (error)
goto out_unlock;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 0ca0e3c024d7..acca2c5ca3fa 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -624,6 +624,10 @@ xfs_ioc_space(
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= XFS_ATTR_NONBLOCK;
+
+ if (filp->f_flags & O_DSYNC)
+ attr_flags |= XFS_ATTR_SYNC;
+
if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 9ff7fc603d2f..dd21784525a8 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -70,7 +70,7 @@ xfs_synchronize_times(
/*
* If the linux inode is valid, mark it dirty.
- * Used when commiting a dirty inode into a transaction so that
+ * Used when committing a dirty inode into a transaction so that
* the inode will get written back by the linux code
*/
void
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 818c4cf2de86..1ba5c451da36 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1078,7 +1078,7 @@ xfs_fs_write_inode(
error = 0;
goto out_unlock;
}
- error = xfs_iflush(ip, 0);
+ error = xfs_iflush(ip, SYNC_TRYLOCK);
}
out_unlock:
@@ -1539,10 +1539,14 @@ xfs_fs_fill_super(
if (error)
goto out_free_sb;
- error = xfs_mountfs(mp);
- if (error)
- goto out_filestream_unmount;
-
+ /*
+ * we must configure the block size in the superblock before we run the
+ * full mount process as the mount process can lookup and cache inodes.
+ * For the same reason we must also initialise the syncd and register
+ * the inode cache shrinker so that inodes can be reclaimed during
+ * operations like a quotacheck that iterate all inodes in the
+ * filesystem.
+ */
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1550,6 +1554,16 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
+ error = xfs_syncd_init(mp);
+ if (error)
+ goto out_filestream_unmount;
+
+ xfs_inode_shrinker_register(mp);
+
+ error = xfs_mountfs(mp);
+ if (error)
+ goto out_syncd_stop;
+
root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
@@ -1565,14 +1579,11 @@ xfs_fs_fill_super(
goto fail_vnrele;
}
- error = xfs_syncd_init(mp);
- if (error)
- goto fail_vnrele;
-
- xfs_inode_shrinker_register(mp);
-
return 0;
+ out_syncd_stop:
+ xfs_inode_shrinker_unregister(mp);
+ xfs_syncd_stop(mp);
out_filestream_unmount:
xfs_filestream_unmount(mp);
out_free_sb:
@@ -1596,6 +1607,9 @@ xfs_fs_fill_super(
}
fail_unmount:
+ xfs_inode_shrinker_unregister(mp);
+ xfs_syncd_stop(mp);
+
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 6c10f1d2e3d3..9cf35a688f53 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -401,7 +401,7 @@ xfs_quiesce_fs(
/*
* Second stage of a quiesce. The data is already synced, now we have to take
* care of the metadata. New transactions are already blocked, so we need to
- * wait for any remaining transactions to drain out before proceding.
+ * wait for any remaining transactions to drain out before proceeding.
*/
void
xfs_quiesce_attr(
@@ -761,8 +761,10 @@ xfs_reclaim_inode(
struct xfs_perag *pag,
int sync_mode)
{
- int error = 0;
+ int error;
+restart:
+ error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
@@ -788,9 +790,31 @@ xfs_reclaim_inode(
if (xfs_inode_clean(ip))
goto reclaim;
- /* Now we have an inode that needs flushing */
- error = xfs_iflush(ip, sync_mode);
+ /*
+ * Now we have an inode that needs flushing.
+ *
+ * We do a nonblocking flush here even if we are doing a SYNC_WAIT
+ * reclaim as we can deadlock with inode cluster removal.
+ * xfs_ifree_cluster() can lock the inode buffer before it locks the
+ * ip->i_lock, and we are doing the exact opposite here. As a result,
+ * doing a blocking xfs_itobp() to get the cluster buffer will result
+ * in an ABBA deadlock with xfs_ifree_cluster().
+ *
+ * As xfs_ifree_cluser() must gather all inodes that are active in the
+ * cache to mark them stale, if we hit this case we don't actually want
+ * to do IO here - we want the inode marked stale so we can simply
+ * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
+ * just unlock the inode, back off and try again. Hopefully the next
+ * pass through will see the stale flag set on the inode.
+ */
+ error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
if (sync_mode & SYNC_WAIT) {
+ if (error == EAGAIN) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /* backoff longer than in xfs_ifree_cluster */
+ delay(2);
+ goto restart;
+ }
xfs_iflock(ip);
goto reclaim;
}
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 7e2416478503..6fa214603819 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -600,7 +600,7 @@ xfs_qm_dqread(
/*
* Reservation counters are defined as reservation plus current usage
- * to avoid having to add everytime.
+ * to avoid having to add every time.
*/
dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 774d7ec6df8e..a0a829addca9 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -134,7 +134,7 @@ xfs_qm_newmount(
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
- * If an error occured, qm_mount_quotas code
+ * If an error occurred, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index c82f06778a27..0d62a07b7fd8 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -172,7 +172,7 @@ xfs_qm_scall_quotaoff(
/*
* Next we make the changes in the quota flag in the mount struct.
* This isn't protected by a particular lock directly, because we
- * don't want to take a mrlock everytime we depend on quotas being on.
+ * don't want to take a mrlock every time we depend on quotas being on.
*/
mp->m_qflags &= ~(flags);
@@ -354,7 +354,7 @@ xfs_qm_scall_quotaon(
return XFS_ERROR(EINVAL);
}
/*
- * If everything's upto-date incore, then don't waste time.
+ * If everything's up to-date incore, then don't waste time.
*/
if ((mp->m_qflags & flags) == flags)
return XFS_ERROR(EEXIST);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index e5413d96f1af..7b7e005e3dcc 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -992,7 +992,7 @@ xfs_buf_iodone_callbacks(
lasttarg = XFS_BUF_TARGET(bp);
/*
- * If the write was asynchronous then noone will be looking for the
+ * If the write was asynchronous then no one will be looking for the
* error. Clear the error state and write the buffer out again.
*
* During sync or umount we'll write all pending buffers again
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index da871f532236..a37480a6e023 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2789,7 +2789,7 @@ xfs_iflush(
/*
* We can't flush the inode until it is unpinned, so wait for it if we
- * are allowed to block. We know noone new can pin it, because we are
+ * are allowed to block. We know no one new can pin it, because we are
* holding the inode lock shared and you need to hold it exclusively to
* pin the inode.
*
@@ -2835,7 +2835,7 @@ xfs_iflush(
* Get the buffer containing the on-disk inode.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &bp,
- (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
+ (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f753200cef8d..ff4e2a30227d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -111,7 +111,7 @@ struct xfs_imap {
* Generally, we do not want to hold the i_rlock while holding the
* i_ilock. Hierarchy is i_iolock followed by i_rlock.
*
- * xfs_iptr_t contains all the inode fields upto and including the
+ * xfs_iptr_t contains all the inode fields up to and including the
* i_mnext and i_mprev fields, it is used as a marker in the inode
* chain off the mount structure by xfs_sync calls.
*/
@@ -336,7 +336,7 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
/*
* Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was choosen
+ * and using two 16bit values to hold new 32bit projid was chosen
* to retain compatibility with "old" filesystems).
*/
static inline prid_t
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index fd4f398bd6f1..46cc40131d4a 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -760,11 +760,11 @@ xfs_inode_item_push(
* Push the inode to it's backing buffer. This will not remove the
* inode from the AIL - a further push will be required to trigger a
* buffer push. However, this allows all the dirty inodes to be pushed
- * to the buffer before it is pushed to disk. THe buffer IO completion
- * will pull th einode from the AIL, mark it clean and unlock the flush
+ * to the buffer before it is pushed to disk. The buffer IO completion
+ * will pull the inode from the AIL, mark it clean and unlock the flush
* lock.
*/
- (void) xfs_iflush(ip, 0);
+ (void) xfs_iflush(ip, SYNC_TRYLOCK);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 15dbf1f9c2be..ffae692c9832 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -570,7 +570,7 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
* When we crack an atomic LSN, we sample it first so that the value will not
* change while we are cracking it into the component values. This means we
* will always get consistent component values to work from. This should always
- * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * be used to sample and crack LSNs that are stored and updated in atomic
* variables.
*/
static inline void
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0c4a5618e7af..5cc464a17c93 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -101,7 +101,7 @@ xlog_get_bp(
/*
* We do log I/O in units of log sectors (a power-of-2
* multiple of the basic block size), so we round up the
- * requested size to acommodate the basic blocks required
+ * requested size to accommodate the basic blocks required
* for complete log sectors.
*
* In addition, the buffer may be used for a non-sector-
@@ -112,7 +112,7 @@ xlog_get_bp(
* an issue. Nor will this be a problem if the log I/O is
* done in basic blocks (sector size 1). But otherwise we
* extend the buffer by one extra log sector to ensure
- * there's space to accomodate this possiblility.
+ * there's space to accommodate this possibility.
*/
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 3bea66132334..03b3b7f85a3b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -383,7 +383,8 @@ xfs_trans_read_buf(
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
*bpp = NULL;
- return 0;
+ return (flags & XBF_TRYLOCK) ?
+ 0 : XFS_ERROR(ENOMEM);
}
if (XFS_BUF_GETERROR(bp) != 0) {
XFS_BUF_SUPER_STALE(bp);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 16084d8ea231..048b0c689d3e 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -81,7 +81,7 @@ xfs_trans_ijoin(
*
*
* Grabs a reference to the inode which will be dropped when the transaction
- * is commited. The inode will also be unlocked at that point. The inode
+ * is committed. The inode will also be unlocked at that point. The inode
* must be locked, and it cannot be associated with any transaction.
*/
void
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 37d8146ee15b..b7a5fe7c52c8 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -953,7 +953,7 @@ xfs_release(
* If we previously truncated this file and removed old data
* in the process, we want to initiate "early" writeout on
* the last close. This is an attempt to combat the notorious
- * NULL files problem which is particularly noticable from a
+ * NULL files problem which is particularly noticeable from a
* truncate down, buffered (re-)write (delalloc), followed by
* a crash. What we are effectively doing here is
* significantly reducing the time window where we'd otherwise
@@ -982,7 +982,7 @@ xfs_release(
*
* Further, check if the inode is being opened, written and
* closed frequently and we have delayed allocation blocks
- * oustanding (e.g. streaming writes from the NFS server),
+ * outstanding (e.g. streaming writes from the NFS server),
* truncating the blocks past EOF will cause fragmentation to
* occur.
*
@@ -2831,7 +2831,8 @@ xfs_change_file_space(
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
+ if (attr_flags & XFS_ATTR_SYNC)
+ xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index f6702927eee4..3bcd23353d6c 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
+#define XFS_ATTR_SYNC 0x10 /* synchronous operation required */
int xfs_readlink(struct xfs_inode *ip, char *link);
int xfs_release(struct xfs_inode *ip);