summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-08-07 23:20:26 -0700
committerDavid S. Miller <davem@davemloft.net>2011-08-07 23:20:26 -0700
commit19fd61785a580c60cba900c5171bfadb57dd5056 (patch)
tree1e491fb014be0dc03f4b6755bb94e73afd38c455 /fs
parent57569d0e12eaf31717e295960cd2a26f626c8e5b (diff)
parent8028837d71ba9904b17281b40f94b93e947fbe38 (diff)
downloadlinux-19fd61785a580c60cba900c5171bfadb57dd5056.tar.bz2
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/acl.h4
-rw-r--r--fs/9p/vfs_inode_dotl.c6
-rw-r--r--fs/Kconfig15
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/btrfs/Makefile4
-rw-r--r--fs/btrfs/acl.c27
-rw-r--r--fs/btrfs/compression.c14
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/dir-item.c30
-rw-r--r--fs/btrfs/extent-tree.c45
-rw-r--r--fs/btrfs/extent_io.c139
-rw-r--r--fs/btrfs/extent_io.h20
-rw-r--r--fs/btrfs/extent_map.c155
-rw-r--r--fs/btrfs/file-item.c7
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/inode.c145
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/ref-cache.c68
-rw-r--r--fs/btrfs/ref-cache.h52
-rw-r--r--fs/btrfs/root-tree.c5
-rw-r--r--fs/btrfs/transaction.c65
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/cifs/cifs_dfs_ref.c5
-rw-r--r--fs/cifs/cifsencrypt.c16
-rw-r--r--fs/cifs/cifsfs.c22
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c659
-rw-r--r--fs/cifs/dns_resolve.c4
-rw-r--r--fs/cifs/file.c27
-rw-r--r--fs/cifs/inode.c14
-rw-r--r--fs/cifs/misc.c11
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/dcache.c72
-rw-r--r--fs/exofs/Kbuild5
-rw-r--r--fs/exofs/Kconfig4
-rw-r--r--fs/exofs/exofs.h159
-rw-r--r--fs/exofs/inode.c152
-rw-r--r--fs/exofs/ore.c (renamed from fs/exofs/ios.c)370
-rw-r--r--fs/exofs/pnfs.h45
-rw-r--r--fs/exofs/super.c251
-rw-r--r--fs/ext2/acl.c8
-rw-r--r--fs/ext3/acl.c9
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/acl.c9
-rw-r--r--fs/ext4/balloc.c48
-rw-r--r--fs/ext4/block_validity.c21
-rw-r--r--fs/ext4/ext4.h55
-rw-r--r--fs/ext4/extents.c129
-rw-r--r--fs/ext4/fsync.c26
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/indirect.c1482
-rw-r--r--fs/ext4/inode.c1596
-rw-r--r--fs/ext4/ioctl.c12
-rw-r--r--fs/ext4/mballoc.c230
-rw-r--r--fs/ext4/mballoc.h1
-rw-r--r--fs/ext4/namei.c21
-rw-r--r--fs/ext4/page-io.c6
-rw-r--r--fs/ext4/resize.c199
-rw-r--r--fs/ext4/super.c88
-rw-r--r--fs/ext4/truncate.h43
-rw-r--r--fs/generic_acl.c13
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/hppfs/hppfs.c1
-rw-r--r--fs/inode.c14
-rw-r--r--fs/jbd2/checkpoint.c5
-rw-r--r--fs/jbd2/journal.c67
-rw-r--r--fs/jffs2/acl.c4
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/jfs/xattr.c4
-rw-r--r--fs/namei.c117
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs3proc.c6
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/posix_acl.c16
-rw-r--r--fs/proc/base.c12
-rw-r--r--fs/pstore/inode.c12
-rw-r--r--fs/pstore/internal.h2
-rw-r--r--fs/pstore/platform.c30
-rw-r--r--fs/reiserfs/xattr_acl.c10
-rw-r--r--fs/stack.c5
-rw-r--r--fs/stat.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c6
90 files changed, 3509 insertions, 3549 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index e9cb57f07546..9a1d42630751 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -182,11 +182,11 @@ int v9fs_set_create_acl(struct dentry *dentry,
return 0;
}
-int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl)
{
int retval = 0;
- mode_t mode = *modep;
+ umode_t mode = *modep;
struct posix_acl *acl = NULL;
if (!S_ISLNK(mode)) {
@@ -319,7 +319,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
retval = posix_acl_equiv_mode(acl, &mode);
if (retval < 0)
goto err_out;
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index ddb7ae19d971..559556411965 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -20,7 +20,7 @@ extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type);
extern int v9fs_acl_chmod(struct dentry *);
extern int v9fs_set_create_acl(struct dentry *,
struct posix_acl **, struct posix_acl **);
-extern int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+extern int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl);
#else
#define v9fs_iop_get_acl NULL
@@ -38,7 +38,7 @@ static inline int v9fs_set_create_acl(struct dentry *dentry,
{
return 0;
}
-static inline int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl,
struct posix_acl **pacl)
{
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 9a26dce5a99f..b6c8ed205192 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -206,7 +206,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
int err = 0;
gid_t gid;
int flags;
- mode_t mode;
+ umode_t mode;
char *name = NULL;
struct file *filp;
struct p9_qid qid;
@@ -348,7 +348,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct p9_fid *fid = NULL, *dfid = NULL;
gid_t gid;
char *name;
- mode_t mode;
+ umode_t mode;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
@@ -751,7 +751,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
int err;
gid_t gid;
char *name;
- mode_t mode;
+ umode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
diff --git a/fs/Kconfig b/fs/Kconfig
index 19891aab9c6e..9fe0b349f4cd 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -127,14 +127,21 @@ config TMPFS_POSIX_ACL
select TMPFS_XATTR
select GENERIC_ACL
help
- POSIX Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
+ POSIX Access Control Lists (ACLs) support additional access rights
+ for users and groups beyond the standard owner/group/world scheme,
+ and this option selects support for ACLs specifically for tmpfs
+ filesystems.
+
+ If you've selected TMPFS, it's possible that you'll also need
+ this option as there are a number of Linux distros that require
+ POSIX ACL support under /dev for certain features to work properly.
+ For example, some distros need this feature for ALSA-related /dev
+ files for sound to work properly. In short, if you're not sure,
+ say Y.
To learn more about Access Control Lists, visit the POSIX ACLs for
Linux website <http://acl.bestbits.at/>.
- If you don't know what Access Control Lists are, say N.
-
config TMPFS_XATTR
bool "Tmpfs extended attributes"
depends on TMPFS
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f55aad4d1611..ff77262e887c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -387,6 +387,10 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *bd_inode = filp->f_mapping->host;
struct block_device *bdev = I_BDEV(bd_inode);
int error;
+
+ error = filemap_write_and_wait_range(filp->f_mapping, start, end);
+ if (error)
+ return error;
/*
* There is no need to serialise calls to blkdev_issue_flush with
@@ -552,6 +556,7 @@ struct block_device *bdget(dev_t dev)
if (inode->i_state & I_NEW) {
bdev->bd_contains = NULL;
+ bdev->bd_super = NULL;
bdev->bd_inode = inode;
bdev->bd_block_size = (1 << inode->i_blkbits);
bdev->bd_part_count = 0;
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 9b72dcf1cd25..40e6ac08c21f 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
- export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
+ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
+
+btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 65a735d8f6e4..eb159aaa5a11 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -28,8 +28,6 @@
#include "btrfs_inode.h"
#include "xattr.h"
-#ifdef CONFIG_BTRFS_FS_POSIX_ACL
-
struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
int size;
@@ -111,7 +109,6 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
int ret, size = 0;
const char *name;
char *value = NULL;
- mode_t mode;
if (acl) {
ret = posix_acl_valid(acl);
@@ -122,13 +119,11 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
- mode = inode->i_mode;
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- ret = posix_acl_equiv_mode(acl, &mode);
+ ret = posix_acl_equiv_mode(acl, &inode->i_mode);
if (ret < 0)
return ret;
- inode->i_mode = mode;
}
ret = 0;
break;
@@ -222,19 +217,16 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
}
if (IS_POSIXACL(dir) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
ret = btrfs_set_acl(trans, inode, acl,
ACL_TYPE_DEFAULT);
if (ret)
goto failed;
}
- ret = posix_acl_create(&acl, GFP_NOFS, &mode);
+ ret = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (ret < 0)
return ret;
- inode->i_mode = mode;
if (ret > 0) {
/* we need an acl */
ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
@@ -282,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = {
.get = btrfs_xattr_acl_get,
.set = btrfs_xattr_acl_set,
};
-
-#else /* CONFIG_BTRFS_FS_POSIX_ACL */
-
-int btrfs_acl_chmod(struct inode *inode)
-{
- return 0;
-}
-
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-
-#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index bfe42b03eaf9..8ec5d86f1734 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
u64 first_byte = disk_start;
struct block_device *bdev;
int ret;
+ int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio,
+ start, 1);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 365c4e1dde04..0469263e327e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2406,8 +2406,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
-int btrfs_set_root_node(struct btrfs_root_item *item,
- struct extent_buffer *node);
+void btrfs_set_root_node(struct btrfs_root_item *item,
+ struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
/* dir-item.c */
@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
#define PageChecked PageFsMisc
#endif
+/* This forces readahead on a given range of bytes in an inode */
+static inline void btrfs_force_ra(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file,
+ pgoff_t offset, unsigned long req_size)
+{
+ page_cache_sync_readahead(mapping, ra, file, offset, req_size);
+}
+
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
int btrfs_set_inode_index(struct inode *dir, u64 *index);
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags);
-unsigned long btrfs_force_ra(struct address_space *mapping,
- struct file_ra_state *ra, struct file *file,
- pgoff_t offset, pgoff_t last_index);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -2648,12 +2653,21 @@ do { \
/* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
-#else
-#define btrfs_get_acl NULL
-#endif
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
+#else
+#define btrfs_get_acl NULL
+static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
+{
+ return 0;
+}
+static inline int btrfs_acl_chmod(struct inode *inode)
+{
+ return 0;
+}
+#endif
/* relocation.c */
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c360a848d97f..31d84e78129b 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
- if (ret > 0) {
- if (path->slots[0] == 0)
- return NULL;
- path->slots[0]--;
- }
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- if (found_key.objectid != dir ||
- btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
- found_key.offset != key.offset)
+ if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
- if (ret > 0) {
- if (path->slots[0] == 0)
- return NULL;
- path->slots[0]--;
- }
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- if (found_key.objectid != dir ||
- btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
- found_key.offset != key.offset)
+ if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4d08ed79405d..66bac226944e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
struct btrfs_path *path;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
key.objectid = start;
key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
@@ -3272,6 +3274,9 @@ again:
}
ret = btrfs_alloc_chunk(trans, extent_root, flags);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out;
+
spin_lock(&space_info->lock);
if (ret)
space_info->full = 1;
@@ -3281,6 +3286,7 @@ again:
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
+out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
printk(KERN_ERR "umm, got %d back from search"
", was looking for %llu\n", ret,
(unsigned long long)bytenr);
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ if (ret > 0)
+ btrfs_print_leaf(extent_root,
+ path->nodes[0]);
}
BUG_ON(ret);
extent_slot = path->slots[0];
@@ -5073,7 +5081,9 @@ have_block_group:
* group is does point to and try again
*/
if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group) {
+ last_ptr->block_group != block_group &&
+ index <=
+ get_block_group_index(last_ptr->block_group)) {
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- BUG_ON(!wc);
+ if (!wc) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
u64 min_allocable_bytes;
int ret = -ENOSPC;
- if (cache->ro)
- return 0;
/*
* We need some metadata space and system metadata space for
@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
+
+ if (cache->ro) {
+ ret = 0;
+ goto out;
+ }
+
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item);
@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
cache->ro = 1;
ret = 0;
}
-
+out:
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
return ret;
@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cluster->refill_lock);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
inode = lookup_free_space_inode(root, block_group, path);
if (!IS_ERR(inode)) {
- btrfs_orphan_add(trans, inode);
+ ret = btrfs_orphan_add(trans, inode);
+ BUG_ON(ret);
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 067b1747421b..d418164a35f1 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
*
* This should be called with the tree lock held.
*/
-static int merge_state(struct extent_io_tree *tree,
- struct extent_state *state)
+static void merge_state(struct extent_io_tree *tree,
+ struct extent_state *state)
{
struct extent_state *other;
struct rb_node *other_node;
if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
- return 0;
+ return;
other_node = rb_prev(&state->rb_node);
if (other_node) {
@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
free_extent_state(other);
}
}
-
- return 0;
}
-static int set_state_cb(struct extent_io_tree *tree,
+static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state, int *bits)
{
- if (tree->ops && tree->ops->set_bit_hook) {
- return tree->ops->set_bit_hook(tree->mapping->host,
- state, bits);
- }
-
- return 0;
+ if (tree->ops && tree->ops->set_bit_hook)
+ tree->ops->set_bit_hook(tree->mapping->host, state, bits);
}
static void clear_state_cb(struct extent_io_tree *tree,
@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
}
+static void set_state_bits(struct extent_io_tree *tree,
+ struct extent_state *state, int *bits);
+
/*
* insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
int *bits)
{
struct rb_node *node;
- int bits_to_set = *bits & ~EXTENT_CTLBITS;
- int ret;
if (end < start) {
printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
}
state->start = start;
state->end = end;
- ret = set_state_cb(tree, state, bits);
- if (ret)
- return ret;
- if (bits_to_set & EXTENT_DIRTY)
- tree->dirty_bytes += end - start + 1;
- state->state |= bits_to_set;
+ set_state_bits(tree, state, bits);
+
node = tree_insert(&tree->state, end, &state->rb_node);
if (node) {
struct extent_state *found;
@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
return 0;
}
-static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
+static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
u64 split)
{
if (tree->ops && tree->ops->split_extent_hook)
- return tree->ops->split_extent_hook(tree->mapping->host,
- orig, split);
- return 0;
+ tree->ops->split_extent_hook(tree->mapping->host, orig, split);
}
/*
@@ -659,34 +648,25 @@ again:
if (start > end)
break;
- if (need_resched()) {
- spin_unlock(&tree->lock);
- cond_resched();
- spin_lock(&tree->lock);
- }
+ cond_resched_lock(&tree->lock);
}
out:
spin_unlock(&tree->lock);
return 0;
}
-static int set_state_bits(struct extent_io_tree *tree,
+static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
int *bits)
{
- int ret;
int bits_to_set = *bits & ~EXTENT_CTLBITS;
- ret = set_state_cb(tree, state, bits);
- if (ret)
- return ret;
+ set_state_cb(tree, state, bits);
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
tree->dirty_bytes += range;
}
state->state |= bits_to_set;
-
- return 0;
}
static void cache_state(struct extent_state *state,
@@ -779,9 +759,7 @@ hit_next:
goto out;
}
- err = set_state_bits(tree, state, &bits);
- if (err)
- goto out;
+ set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
@@ -830,9 +808,7 @@ hit_next:
if (err)
goto out;
if (state->end <= end) {
- err = set_state_bits(tree, state, &bits);
- if (err)
- goto out;
+ set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
@@ -893,11 +869,7 @@ hit_next:
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
- err = set_state_bits(tree, prealloc, &bits);
- if (err) {
- prealloc = NULL;
- goto out;
- }
+ set_state_bits(tree, prealloc, &bits);
cache_state(prealloc, cached_state);
merge_state(tree, prealloc);
prealloc = NULL;
@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
return 0;
}
-/*
- * find the first offset in the io tree with 'bits' set. zero is
- * returned if we find something, and *start_ret and *end_ret are
- * set to reflect the state struct that was found.
- *
- * If nothing was found, 1 is returned, < 0 on error
- */
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits)
-{
- struct rb_node *node;
- struct extent_state *state;
- int ret = 1;
-
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node)
- goto out;
-
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->end >= start && (state->state & bits)) {
- *start_ret = state->start;
- *end_ret = state->end;
- ret = 0;
- break;
- }
- node = rb_next(node);
- if (!node)
- break;
- }
-out:
- spin_unlock(&tree->lock);
- return ret;
-}
-
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
@@ -1131,6 +1063,30 @@ out:
}
/*
+ * find the first offset in the io tree with 'bits' set. zero is
+ * returned if we find something, and *start_ret and *end_ret are
+ * set to reflect the state struct that was found.
+ *
+ * If nothing was found, 1 is returned, < 0 on error
+ */
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, int bits)
+{
+ struct extent_state *state;
+ int ret = 1;
+
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, bits);
+ if (state) {
+ *start_ret = state->start;
+ *end_ret = state->end;
+ ret = 0;
+ }
+ spin_unlock(&tree->lock);
+ return ret;
+}
+
+/*
* find a contiguous range of bytes in the file marked as delalloc, not
* more than 'max_bytes'. start and end are used to return the range,
*
@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
struct writeback_control *wbc)
{
int ret;
- struct address_space *mapping = page->mapping;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
- struct writeback_control wbc_writepages = {
- .sync_mode = wbc->sync_mode,
- .nr_to_write = 64,
- .range_start = page_offset(page) + PAGE_CACHE_SIZE,
- .range_end = (loff_t)-1,
- };
ret = __extent_writepage(page, wbc, &epd);
- extent_write_cache_pages(tree, mapping, &wbc_writepages,
- __extent_writepage, &epd, flush_write_bio);
flush_epd_write_bio(&epd);
return ret;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 21a7ca9e7282..7b2f0c3e7929 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -76,15 +76,15 @@ struct extent_io_ops {
struct extent_state *state);
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate);
- int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
- int (*merge_extent_hook)(struct inode *inode,
- struct extent_state *new,
- struct extent_state *other);
- int (*split_extent_hook)(struct inode *inode,
- struct extent_state *orig, u64 split);
+ void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+ void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+ void (*merge_extent_hook)(struct inode *inode,
+ struct extent_state *new,
+ struct extent_state *other);
+ void (*split_extent_hook)(struct inode *inode,
+ struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page);
};
@@ -108,8 +108,6 @@ struct extent_state {
wait_queue_head_t wq;
atomic_t refs;
unsigned long state;
- u64 split_start;
- u64 split_end;
/* for use by the FS */
u64 private;
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2d0410344ea3..7c97b3301459 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0;
}
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{
- int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
- struct extent_map *em;
-
- write_lock(&tree->lock);
- em = lookup_extent_mapping(tree, start, len);
-
- WARN_ON(!em || em->start != start);
-
- if (!em)
- goto out;
-
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
@@ -225,6 +213,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
merge->in_tree = 0;
free_extent_map(merge);
}
+}
+
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+{
+ int ret = 0;
+ struct extent_map *em;
+
+ write_lock(&tree->lock);
+ em = lookup_extent_mapping(tree, start, len);
+
+ WARN_ON(!em || em->start != start);
+
+ if (!em)
+ goto out;
+
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+ try_merge_map(tree, em);
free_extent_map(em);
out:
@@ -247,7 +253,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
{
int ret = 0;
- struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *exist;
@@ -263,30 +268,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
goto out;
}
atomic_inc(&em->refs);
- if (em->start != 0) {
- rb = rb_prev(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(merge, em)) {
- em->start = merge->start;
- em->len += merge->len;
- em->block_len += merge->block_len;
- em->block_start = merge->block_start;
- merge->in_tree = 0;
- rb_erase(&merge->rb_node, &tree->map);
- free_extent_map(merge);
- }
- }
- rb = rb_next(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(em, merge)) {
- em->len += merge->len;
- em->block_len += merge->len;
- rb_erase(&merge->rb_node, &tree->map);
- merge->in_tree = 0;
- free_extent_map(merge);
- }
+
+ try_merge_map(tree, em);
out:
return ret;
}
@@ -299,19 +282,8 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-/**
- * lookup_extent_mapping - lookup extent_map
- * @tree: tree to lookup in
- * @start: byte offset to start the search
- * @len: length of the lookup range
- *
- * Find and return the first extent_map struct in @tree that intersects the
- * [start, len] range. There may be additional objects in the tree that
- * intersect, so check the object returned carefully to make sure that no
- * additional lookups are needed.
- */
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len, int strict)
{
struct extent_map *em;
struct rb_node *rb_node;
@@ -320,38 +292,42 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 end = range_end(start, len);
rb_node = __tree_search(&tree->map, start, &prev, &next);
- if (!rb_node && prev) {
- em = rb_entry(prev, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- }
- if (!rb_node && next) {
- em = rb_entry(next, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- }
if (!rb_node) {
- em = NULL;
- goto out;
- }
- if (IS_ERR(rb_node)) {
- em = ERR_CAST(rb_node);
- goto out;
+ if (prev)
+ rb_node = prev;
+ else if (next)
+ rb_node = next;
+ else
+ return NULL;
}
+
em = rb_entry(rb_node, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- em = NULL;
- goto out;
+ if (strict && !(end > em->start && start < extent_map_end(em)))
+ return NULL;
-found:
atomic_inc(&em->refs);
-out:
return em;
}
/**
+ * lookup_extent_mapping - lookup extent_map
+ * @tree: tree to lookup in
+ * @start: byte offset to start the search
+ * @len: length of the lookup range
+ *
+ * Find and return the first extent_map struct in @tree that intersects the
+ * [start, len] range. There may be additional objects in the tree that
+ * intersect, so check the object returned carefully to make sure that no
+ * additional lookups are needed.
+ */
+struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
+{
+ return __lookup_extent_mapping(tree, start, len, 1);
+}
+
+/**
* search_extent_mapping - find a nearby extent map
* @tree: tree to lookup in
* @start: byte offset to start the search
@@ -365,38 +341,7 @@ out:
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
- struct extent_map *em;
- struct rb_node *rb_node;
- struct rb_node *prev = NULL;
- struct rb_node *next = NULL;
-
- rb_node = __tree_search(&tree->map, start, &prev, &next);
- if (!rb_node && prev) {
- em = rb_entry(prev, struct extent_map, rb_node);
- goto found;
- }
- if (!rb_node && next) {
- em = rb_entry(next, struct extent_map, rb_node);
- goto found;
- }
- if (!rb_node) {
- em = NULL;
- goto out;
- }
- if (IS_ERR(rb_node)) {
- em = ERR_CAST(rb_node);
- goto out;
- }
- em = rb_entry(rb_node, struct extent_map, rb_node);
- goto found;
-
- em = NULL;
- goto out;
-
-found:
- atomic_inc(&em->refs);
-out:
- return em;
+ return __lookup_extent_mapping(tree, start, len, 0);
}
/**
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 08bcfa92a222..b910694f61ed 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -291,7 +291,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
if (search_commit) {
path->skip_locking = 1;
@@ -677,7 +678,9 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
sector_sum = sums->sums;
again:
next_offset = (u64)-1;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a35e51c9f235..658d66959abe 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -74,7 +74,7 @@ struct inode_defrag {
* If an existing record is found the defrag item you
* pass in is freed
*/
-static int __btrfs_add_inode_defrag(struct inode *inode,
+static void __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -106,11 +106,11 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
BTRFS_I(inode)->in_defrag = 1;
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
- return 0;
+ return;
exists:
kfree(defrag);
- return 0;
+ return;
}
@@ -123,7 +123,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
- int ret = 0;
u64 transid;
if (!btrfs_test_opt(root, AUTO_DEFRAG))
@@ -150,9 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->defrag_inodes_lock);
if (!BTRFS_I(inode)->in_defrag)
- ret = __btrfs_add_inode_defrag(inode, defrag);
+ __btrfs_add_inode_defrag(inode, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock);
- return ret;
+ return 0;
}
/*
@@ -855,7 +854,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
again:
recow = 0;
split = start;
@@ -1059,7 +1059,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index,
- unsigned long last_index, size_t write_bytes)
+ size_t write_bytes)
{
struct extent_state *cached_state = NULL;
int i;
@@ -1159,7 +1159,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
unsigned long first_index;
- unsigned long last_index;
size_t num_written = 0;
int nrptrs;
int ret = 0;
@@ -1172,7 +1171,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT;
- last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
@@ -1206,8 +1204,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* contents of pages from loop to loop
*/
ret = prepare_pages(root, file, pages, num_pages,
- pos, first_index, last_index,
- write_bytes);
+ pos, first_index, write_bytes);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 13e6255182e3..15fceefbca0a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1061,7 +1061,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode);
@@ -1282,17 +1283,16 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
return ret;
}
-static int btrfs_split_extent_hook(struct inode *inode,
- struct extent_state *orig, u64 split)
+static void btrfs_split_extent_hook(struct inode *inode,
+ struct extent_state *orig, u64 split)
{
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
- return 0;
+ return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
- return 0;
}
/*
@@ -1301,18 +1301,17 @@ static int btrfs_split_extent_hook(struct inode *inode,
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
-static int btrfs_merge_extent_hook(struct inode *inode,
- struct extent_state *new,
- struct extent_state *other)
+static void btrfs_merge_extent_hook(struct inode *inode,
+ struct extent_state *new,
+ struct extent_state *other)
{
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
- return 0;
+ return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
- return 0;
}
/*
@@ -1320,8 +1319,8 @@ static int btrfs_merge_extent_hook(struct inode *inode,
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
-static int btrfs_set_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+static void btrfs_set_bit_hook(struct inode *inode,
+ struct extent_state *state, int *bits)
{
/*
@@ -1351,14 +1350,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
}
spin_unlock(&root->fs_info->delalloc_lock);
}
- return 0;
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
-static int btrfs_clear_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+static void btrfs_clear_bit_hook(struct inode *inode,
+ struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
@@ -1395,7 +1393,6 @@ static int btrfs_clear_bit_hook(struct inode *inode,
}
spin_unlock(&root->fs_info->delalloc_lock);
}
- return 0;
}
/*
@@ -1645,7 +1642,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
int ret;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
@@ -2215,7 +2213,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root);
- BUG_ON(!block_rsv);
+ if (!block_rsv)
+ return -ENOMEM;
}
spin_lock(&root->orphan_lock);
@@ -2517,7 +2516,9 @@ static void btrfs_read_locked_inode(struct inode *inode)
filled = true;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ goto make_bad;
+
path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
@@ -2998,13 +2999,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
+out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
@@ -3147,6 +3151,11 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->reada = -1;
+
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
@@ -3159,10 +3168,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
- path = btrfs_alloc_path();
- BUG_ON(!path);
- path->reada = -1;
-
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
@@ -3690,7 +3695,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
int ret = 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
@@ -3946,6 +3952,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
+ int bad_inode = 0;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
@@ -3955,10 +3962,19 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
- inode_tree_add(inode);
- unlock_new_inode(inode);
- if (new)
- *new = 1;
+ if (!is_bad_inode(inode)) {
+ inode_tree_add(inode);
+ unlock_new_inode(inode);
+ if (new)
+ *new = 1;
+ } else {
+ bad_inode = 1;
+ }
+ }
+
+ if (bad_inode) {
+ iput(inode);
+ inode = ERR_PTR(-ESTALE);
}
return inode;
@@ -3993,12 +4009,19 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
- int ret;
+ int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- ret = btrfs_inode_by_name(dir, dentry, &location);
+ if (unlikely(d_need_lookup(dentry))) {
+ memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
+ kfree(dentry->d_fsdata);
+ dentry->d_fsdata = NULL;
+ d_clear_need_lookup(dentry);
+ } else {
+ ret = btrfs_inode_by_name(dir, dentry, &location);
+ }
if (ret < 0)
return ERR_PTR(ret);
@@ -4053,6 +4076,12 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
return 0;
}
+static void btrfs_dentry_release(struct dentry *dentry)
+{
+ if (dentry->d_fsdata)
+ kfree(dentry->d_fsdata);
+}
+
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
@@ -4075,6 +4104,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
+ struct qstr q;
int ret;
struct extent_buffer *leaf;
int slot;
@@ -4164,6 +4194,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
while (di_cur < di_total) {
struct btrfs_key location;
+ struct dentry *tmp;
if (verify_dir_item(root, leaf, di))
break;
@@ -4184,6 +4215,33 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
+ q.name = name_ptr;
+ q.len = name_len;
+ q.hash = full_name_hash(q.name, q.len);
+ tmp = d_lookup(filp->f_dentry, &q);
+ if (!tmp) {
+ struct btrfs_key *newkey;
+
+ newkey = kzalloc(sizeof(struct btrfs_key),
+ GFP_NOFS);
+ if (!newkey)
+ goto no_dentry;
+ tmp = d_alloc(filp->f_dentry, &q);
+ if (!tmp) {
+ kfree(newkey);
+ dput(tmp);
+ goto no_dentry;
+ }
+ memcpy(newkey, &location,
+ sizeof(struct btrfs_key));
+ tmp->d_fsdata = newkey;
+ tmp->d_flags |= DCACHE_NEED_LOOKUP;
+ d_rehash(tmp);
+ dput(tmp);
+ } else {
+ dput(tmp);
+ }
+no_dentry:
/* is this a reference to our own snapshot? If so
* skip it
*/
@@ -4409,7 +4467,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
int owner;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
@@ -6669,19 +6728,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
return 0;
}
-/* helper function for file defrag and space balancing. This
- * forces readahead on a given range of bytes in an inode
- */
-unsigned long btrfs_force_ra(struct address_space *mapping,
- struct file_ra_state *ra, struct file *file,
- pgoff_t offset, pgoff_t last_index)
-{
- pgoff_t req_size = last_index - offset + 1;
-
- page_cache_sync_readahead(mapping, ra, file, offset, req_size);
- return offset + req_size;
-}
-
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
@@ -7164,7 +7210,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ err = -ENOMEM;
+ drop_inode = 1;
+ goto out_unlock;
+ }
key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
@@ -7430,4 +7480,5 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
+ .d_release = btrfs_dentry_release,
};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0b980afc5edd..7cf013349941 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1749,11 +1749,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
-
}
if (ptr < name)
goto out;
- memcpy(name, ptr, total_len);
+ memmove(name, ptr, total_len);
name[total_len]='\0';
ret = 0;
out:
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
deleted file mode 100644
index 82d569cb6267..000000000000
--- a/fs/btrfs/ref-cache.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2008 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-#include "ctree.h"
-#include "ref-cache.h"
-#include "transaction.h"
-
-static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
-{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct btrfs_leaf_ref *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
-
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
- }
-
- entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
-}
-
-static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
-{
- struct rb_node *n = root->rb_node;
- struct btrfs_leaf_ref *entry;
-
- while (n) {
- entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
- WARN_ON(!entry->in_tree);
-
- if (bytenr < entry->bytenr)
- n = n->rb_left;
- else if (bytenr > entry->bytenr)
- n = n->rb_right;
- else
- return n;
- }
- return NULL;
-}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
deleted file mode 100644
index 24f7001f6387..000000000000
--- a/fs/btrfs/ref-cache.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2008 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-#ifndef __REFCACHE__
-#define __REFCACHE__
-
-struct btrfs_extent_info {
- /* bytenr and num_bytes find the extent in the extent allocation tree */
- u64 bytenr;
- u64 num_bytes;
-
- /* objectid and offset find the back reference for the file */
- u64 objectid;
- u64 offset;
-};
-
-struct btrfs_leaf_ref {
- struct rb_node rb_node;
- struct btrfs_leaf_ref_tree *tree;
- int in_tree;
- atomic_t usage;
-
- u64 root_gen;
- u64 bytenr;
- u64 owner;
- u64 generation;
- int nritems;
-
- struct list_head list;
- struct btrfs_extent_info extents[];
-};
-
-static inline size_t btrfs_leaf_ref_size(int nr_extents)
-{
- return sizeof(struct btrfs_leaf_ref) +
- sizeof(struct btrfs_extent_info) * nr_extents;
-}
-#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index ebe45443de06..f4099904565a 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,13 +71,12 @@ out:
return ret;
}
-int btrfs_set_root_node(struct btrfs_root_item *item,
- struct extent_buffer *node)
+void btrfs_set_root_node(struct btrfs_root_item *item,
+ struct extent_buffer *node)
{
btrfs_set_root_bytenr(item, node->start);
btrfs_set_root_level(item, btrfs_header_level(node));
btrfs_set_root_generation(item, btrfs_header_generation(node));
- return 0;
}
/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index eb55863bb4ae..7dc36fab4afc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -216,17 +216,11 @@ static void wait_current_trans(struct btrfs_root *root)
spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
- DEFINE_WAIT(wait);
atomic_inc(&cur_trans->use_count);
spin_unlock(&root->fs_info->trans_lock);
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!cur_trans->blocked)
- break;
- schedule();
- }
- finish_wait(&root->fs_info->transaction_wait, &wait);
+
+ wait_event(root->fs_info->transaction_wait,
+ !cur_trans->blocked);
put_transaction(cur_trans);
} else {
spin_unlock(&root->fs_info->trans_lock);
@@ -357,19 +351,10 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
}
/* wait for a transaction commit to be fully complete */
-static noinline int wait_for_commit(struct btrfs_root *root,
+static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
{
- DEFINE_WAIT(wait);
- while (!commit->commit_done) {
- prepare_to_wait(&commit->commit_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (commit->commit_done)
- break;
- schedule();
- }
- finish_wait(&commit->commit_wait, &wait);
- return 0;
+ wait_event(commit->commit_wait, commit->commit_done);
}
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
@@ -1085,22 +1070,7 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
static void wait_current_trans_commit_start(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
- DEFINE_WAIT(wait);
-
- if (trans->in_commit)
- return;
-
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (trans->in_commit) {
- finish_wait(&root->fs_info->transaction_blocked_wait,
- &wait);
- break;
- }
- schedule();
- finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
- }
+ wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
}
/*
@@ -1110,24 +1080,8 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
- DEFINE_WAIT(wait);
-
- if (trans->commit_done || (trans->in_commit && !trans->blocked))
- return;
-
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (trans->commit_done ||
- (trans->in_commit && !trans->blocked)) {
- finish_wait(&root->fs_info->transaction_wait,
- &wait);
- break;
- }
- schedule();
- finish_wait(&root->fs_info->transaction_wait,
- &wait);
- }
+ wait_event(root->fs_info->transaction_wait,
+ trans->commit_done || (trans->in_commit && !trans->blocked));
}
/*
@@ -1234,8 +1188,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
atomic_inc(&cur_trans->use_count);
btrfs_end_transaction(trans, root);
- ret = wait_for_commit(root, cur_trans);
- BUG_ON(ret);
+ wait_for_commit(root, cur_trans);
put_transaction(cur_trans);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ac278dd83175..babee65f8eda 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1617,7 +1617,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
return 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
nritems = btrfs_header_nritems(eb);
for (i = 0; i < nritems; i++) {
@@ -1723,7 +1724,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
return -ENOMEM;
if (*level == 1) {
- wc->process_func(root, next, wc, ptr_gen);
+ ret = wc->process_func(root, next, wc, ptr_gen);
+ if (ret)
+ return ret;
path->slots[*level]++;
if (wc->free) {
@@ -1788,8 +1791,11 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
parent = path->nodes[*level + 1];
root_owner = btrfs_header_owner(parent);
- wc->process_func(root, path->nodes[*level], wc,
+ ret = wc->process_func(root, path->nodes[*level], wc,
btrfs_header_generation(path->nodes[*level]));
+ if (ret)
+ return ret;
+
if (wc->free) {
struct extent_buffer *next;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b89e372c7544..53875ae73ad4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1037,7 +1037,8 @@ static noinline int find_next_chunk(struct btrfs_root *root,
struct btrfs_key found_key;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
key.objectid = objectid;
key.offset = (u64)-1;
@@ -2061,8 +2062,10 @@ int btrfs_balance(struct btrfs_root *dev_root)
/* step two, relocate all the chunks */
path = btrfs_alloc_path();
- BUG_ON(!path);
-
+ if (!path) {
+ ret = -ENOMEM;
+ goto error;
+ }
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -2661,7 +2664,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = find_next_chunk(fs_info->chunk_root,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
- BUG_ON(ret);
+ if (ret)
+ return ret;
alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
(fs_info->metadata_alloc_profile &
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 8d8f28c94c0f..6873bb634a97 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -141,10 +141,11 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc < 0) {
- cERROR(1, "%s: Failed to resolve server part of %s to IP: %d",
- __func__, *devname, rc);
+ cFYI(1, "%s: Failed to resolve server part of %s to IP: %d",
+ __func__, *devname, rc);
goto compose_mount_options_err;
}
+
/* md_len = strlen(...) + 12 for 'sep+prefixpath='
* assuming that we have 'unc=' and 'ip=' in
* the original sb_mountdata
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 259991bd2112..e76bfeb68267 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -87,9 +87,15 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
+ server->tcpStatus == CifsNeedNegotiate)
return rc;
+ if (!server->session_estab) {
+ strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+ return rc;
+ }
+
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
@@ -178,9 +184,15 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
+ server->tcpStatus == CifsNeedNegotiate)
return rc;
+ if (!server->session_estab) {
+ strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+ return rc;
+ }
+
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 865517470967..f93eb948d071 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -86,24 +86,6 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
-void
-cifs_sb_active(struct super_block *sb)
-{
- struct cifs_sb_info *server = CIFS_SB(sb);
-
- if (atomic_inc_return(&server->active) == 1)
- atomic_inc(&sb->s_active);
-}
-
-void
-cifs_sb_deactive(struct super_block *sb)
-{
- struct cifs_sb_info *server = CIFS_SB(sb);
-
- if (atomic_dec_and_test(&server->active))
- deactivate_super(sb);
-}
-
static int
cifs_read_super(struct super_block *sb)
{
@@ -581,6 +563,10 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
mutex_unlock(&dir->i_mutex);
dput(dentry);
dentry = child;
+ if (!dentry->d_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ }
} while (!IS_ERR(dentry));
_FreeXid(xid);
kfree(full_path);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index fbd050c8d52a..cb71dc1f94d1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -41,10 +41,6 @@ extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
-/* Functions related to super block operations */
-extern void cifs_sb_active(struct super_block *sb);
-extern void cifs_sb_deactive(struct super_block *sb);
-
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fcf4e5b3112..38ce6d44b145 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -942,8 +942,6 @@ GLOBAL_EXTERN spinlock_t siduidlock;
GLOBAL_EXTERN spinlock_t sidgidlock;
void cifs_oplock_break(struct work_struct *work);
-void cifs_oplock_break_get(struct cifsFileInfo *cfile);
-void cifs_oplock_break_put(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 1a9fe7f816d1..aac37d99a487 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -107,7 +107,7 @@ static void mark_open_files_invalid(struct cifs_tcon *pTcon)
static int
cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
{
- int rc = 0;
+ int rc;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_codepage;
@@ -5720,6 +5720,7 @@ CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
char *temp_ptr;
char *end_of_smb;
__u16 params, byte_count, data_offset;
+ unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
@@ -5837,7 +5838,8 @@ QAllEAsRetry:
}
if (ea_name) {
- if (strncmp(ea_name, temp_ptr, name_len) == 0) {
+ if (ea_name_len == name_len &&
+ strncmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1;
rc = value_len;
if (buf_size == 0)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e66297bad412..80c2e3add3a2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -319,25 +319,328 @@ requeue_echo:
queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
}
+static bool
+allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
+ bool is_large_buf)
+{
+ char *bbuf = *bigbuf, *sbuf = *smallbuf;
+
+ if (bbuf == NULL) {
+ bbuf = (char *)cifs_buf_get();
+ if (!bbuf) {
+ cERROR(1, "No memory for large SMB response");
+ msleep(3000);
+ /* retry will check if exiting */
+ return false;
+ }
+ } else if (is_large_buf) {
+ /* we are reusing a dirty large buf, clear its start */
+ memset(bbuf, 0, size);
+ }
+
+ if (sbuf == NULL) {
+ sbuf = (char *)cifs_small_buf_get();
+ if (!sbuf) {
+ cERROR(1, "No memory for SMB response");
+ msleep(1000);
+ /* retry will check if exiting */
+ return false;
+ }
+ /* beginning of smb buffer is cleared in our buf_get */
+ } else {
+ /* if existing small buf clear beginning */
+ memset(sbuf, 0, size);
+ }
+
+ *bigbuf = bbuf;
+ *smallbuf = sbuf;
+
+ return true;
+}
+
+static int
+read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
+ struct kvec *iov, unsigned int to_read,
+ unsigned int *ptotal_read, bool is_header_read)
+{
+ int length, rc = 0;
+ unsigned int total_read;
+ char *buf = iov->iov_base;
+
+ for (total_read = 0; total_read < to_read; total_read += length) {
+ length = kernel_recvmsg(server->ssocket, smb_msg, iov, 1,
+ to_read - total_read, 0);
+ if (server->tcpStatus == CifsExiting) {
+ /* then will exit */
+ rc = 2;
+ break;
+ } else if (server->tcpStatus == CifsNeedReconnect) {
+ cifs_reconnect(server);
+ /* Reconnect wakes up rspns q */
+ /* Now we will reread sock */
+ rc = 1;
+ break;
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
+ /*
+ * Minimum sleep to prevent looping, allowing socket
+ * to clear and app threads to set tcpStatus
+ * CifsNeedReconnect if server hung.
+ */
+ usleep_range(1000, 2000);
+ length = 0;
+ if (!is_header_read)
+ continue;
+ /* Special handling for header read */
+ if (total_read) {
+ iov->iov_base = (to_read - total_read) +
+ buf;
+ iov->iov_len = to_read - total_read;
+ smb_msg->msg_control = NULL;
+ smb_msg->msg_controllen = 0;
+ rc = 3;
+ } else
+ rc = 1;
+ break;
+ } else if (length <= 0) {
+ cERROR(1, "Received no data, expecting %d",
+ to_read - total_read);
+ cifs_reconnect(server);
+ rc = 1;
+ break;
+ }
+ }
+
+ *ptotal_read = total_read;
+ return rc;
+}
+
+static bool
+check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
+{
+ char temp = *buf;
+ unsigned int pdu_length = be32_to_cpu(
+ ((struct smb_hdr *)buf)->smb_buf_length);
+
+ /*
+ * The first byte big endian of the length field,
+ * is actually not part of the length but the type
+ * with the most common, zero, as regular data.
+ */
+ if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
+ return false;
+ } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
+ cFYI(1, "Good RFC 1002 session rsp");
+ return false;
+ } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
+ /*
+ * We get this from Windows 98 instead of an error on
+ * SMB negprot response.
+ */
+ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
+ pdu_length);
+ /* give server a second to clean up */
+ msleep(1000);
+ /*
+ * Always try 445 first on reconnect since we get NACK
+ * on some if we ever connected to port 139 (the NACK
+ * is since we do not begin with RFC1001 session
+ * initialize frame).
+ */
+ cifs_set_port((struct sockaddr *)
+ &server->dstaddr, CIFS_PORT);
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return false;
+ } else if (temp != (char) 0) {
+ cERROR(1, "Unknown RFC 1002 frame");
+ cifs_dump_mem(" Received Data: ", buf, 4);
+ cifs_reconnect(server);
+ return false;
+ }
+
+ /* else we have an SMB response */
+ if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
+ (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
+ cERROR(1, "Invalid size SMB length %d pdu_length %d",
+ 4, pdu_length+4);
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return false;
+ }
+
+ return true;
+}
+
+static struct mid_q_entry *
+find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf,
+ int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
+{
+ struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) {
+ if (mid->mid != buf->Mid ||
+ mid->midState != MID_REQUEST_SUBMITTED ||
+ mid->command != buf->Command)
+ continue;
+
+ if (*length == 0 && check2ndT2(buf, server->maxBuf) > 0) {
+ /* We have a multipart transact2 resp */
+ *is_multi_rsp = true;
+ if (mid->resp_buf) {
+ /* merge response - fix up 1st*/
+ *length = coalesce_t2(buf, mid->resp_buf);
+ if (*length > 0) {
+ *length = 0;
+ mid->multiRsp = true;
+ break;
+ }
+ /* All parts received or packet is malformed. */
+ mid->multiEnd = true;
+ goto multi_t2_fnd;
+ }
+ if (!is_large_buf) {
+ /*FIXME: switch to already allocated largebuf?*/
+ cERROR(1, "1st trans2 resp needs bigbuf");
+ } else {
+ /* Have first buffer */
+ mid->resp_buf = buf;
+ mid->largeBuf = true;
+ *bigbuf = NULL;
+ }
+ break;
+ }
+ mid->resp_buf = buf;
+ mid->largeBuf = is_large_buf;
+multi_t2_fnd:
+ if (*length == 0)
+ mid->midState = MID_RESPONSE_RECEIVED;
+ else
+ mid->midState = MID_RESPONSE_MALFORMED;
+#ifdef CONFIG_CIFS_STATS2
+ mid->when_received = jiffies;
+#endif
+ list_del_init(&mid->qhead);
+ ret = mid;
+ break;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ return ret;
+}
+
+static void clean_demultiplex_info(struct TCP_Server_Info *server)
+{
+ int length;
+
+ /* take it off the list, if it's not already */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_del_init(&server->tcp_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ spin_lock(&GlobalMid_Lock);
+ server->tcpStatus = CifsExiting;
+ spin_unlock(&GlobalMid_Lock);
+ wake_up_all(&server->response_q);
+
+ /*
+ * Check if we have blocked requests that need to free. Note that
+ * cifs_max_pending is normally 50, but can be set at module install
+ * time to as little as two.
+ */
+ spin_lock(&GlobalMid_Lock);
+ if (atomic_read(&server->inFlight) >= cifs_max_pending)
+ atomic_set(&server->inFlight, cifs_max_pending - 1);
+ /*
+ * We do not want to set the max_pending too low or we could end up
+ * with the counter going negative.
+ */
+ spin_unlock(&GlobalMid_Lock);
+ /*
+ * Although there should not be any requests blocked on this queue it
+ * can not hurt to be paranoid and try to wake up requests that may
+ * haven been blocked when more than 50 at time were on the wire to the
+ * same server - they now will see the session is in exit state and get
+ * out of SendReceive.
+ */
+ wake_up_all(&server->request_q);
+ /* give those requests time to exit */
+ msleep(125);
+
+ if (server->ssocket) {
+ sock_release(server->ssocket);
+ server->ssocket = NULL;
+ }
+
+ if (!list_empty(&server->pending_mid_q)) {
+ struct list_head dispose_list;
+ struct mid_q_entry *mid_entry;
+ struct list_head *tmp, *tmp2;
+
+ INIT_LIST_HEAD(&dispose_list);
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
+ mid_entry->midState = MID_SHUTDOWN;
+ list_move(&mid_entry->qhead, &dispose_list);
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ /* now walk dispose list and issue callbacks */
+ list_for_each_safe(tmp, tmp2, &dispose_list) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Callback mid 0x%x", mid_entry->mid);
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
+ }
+ /* 1/8th of sec is more than enough time for them to exit */
+ msleep(125);
+ }
+
+ if (!list_empty(&server->pending_mid_q)) {
+ /*
+ * mpx threads have not exited yet give them at least the smb
+ * send timeout time for long ops.
+ *
+ * Due to delays on oplock break requests, we need to wait at
+ * least 45 seconds before giving up on a request getting a
+ * response and going ahead and killing cifsd.
+ */
+ cFYI(1, "Wait for exit from demultiplex thread");
+ msleep(46000);
+ /*
+ * If threads still have not exited they are probably never
+ * coming home not much else we can do but free the memory.
+ */
+ }
+
+ kfree(server->hostname);
+ kfree(server);
+
+ length = atomic_dec_return(&tcpSesAllocCount);
+ if (length > 0)
+ mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
+ GFP_KERNEL);
+}
+
static int
cifs_demultiplex_thread(void *p)
{
int length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length, total_read;
+ char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL;
struct smb_hdr *smb_buffer = NULL;
- struct smb_hdr *bigbuf = NULL;
- struct smb_hdr *smallbuf = NULL;
struct msghdr smb_msg;
struct kvec iov;
- struct socket *csocket = server->ssocket;
- struct list_head *tmp, *tmp2;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
- char temp;
bool isLargeBuf = false;
- bool isMultiRsp;
- int reconnect;
+ bool isMultiRsp = false;
+ int rc;
current->flags |= PF_MEMALLOC;
cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
@@ -351,35 +654,16 @@ cifs_demultiplex_thread(void *p)
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
- if (bigbuf == NULL) {
- bigbuf = cifs_buf_get();
- if (!bigbuf) {
- cERROR(1, "No memory for large SMB response");
- msleep(3000);
- /* retry will check if exiting */
- continue;
- }
- } else if (isLargeBuf) {
- /* we are reusing a dirty large buf, clear its start */
- memset(bigbuf, 0, sizeof(struct smb_hdr));
- }
- if (smallbuf == NULL) {
- smallbuf = cifs_small_buf_get();
- if (!smallbuf) {
- cERROR(1, "No memory for SMB response");
- msleep(1000);
- /* retry will check if exiting */
- continue;
- }
- /* beginning of smb buffer is cleared in our buf_get */
- } else /* if existing small buf clear beginning */
- memset(smallbuf, 0, sizeof(struct smb_hdr));
+ if (!allocate_buffers(&bigbuf, &smallbuf,
+ sizeof(struct smb_hdr), isLargeBuf))
+ continue;
isLargeBuf = false;
isMultiRsp = false;
- smb_buffer = smallbuf;
- iov.iov_base = smb_buffer;
+ smb_buffer = (struct smb_hdr *)smallbuf;
+ buf = smallbuf;
+ iov.iov_base = buf;
iov.iov_len = 4;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
@@ -393,158 +677,50 @@ incomplete_rcv:
"Reconnecting...", server->hostname,
(echo_retries * SMB_ECHO_INTERVAL / HZ));
cifs_reconnect(server);
- csocket = server->ssocket;
wake_up(&server->response_q);
continue;
}
- length =
- kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, pdu_length, 0 /* BB other flags? */);
-
- if (server->tcpStatus == CifsExiting) {
+ rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
+ &total_read, true /* header read */);
+ if (rc == 3)
+ goto incomplete_rcv;
+ else if (rc == 2)
break;
- } else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, "Reconnect after server stopped responding");
- cifs_reconnect(server);
- cFYI(1, "call to reconnect done");
- csocket = server->ssocket;
- continue;
- } else if (length == -ERESTARTSYS ||
- length == -EAGAIN ||
- length == -EINTR) {
- msleep(1); /* minimum sleep to prevent looping
- allowing socket to clear and app threads to set
- tcpStatus CifsNeedReconnect if server hung */
- if (pdu_length < 4) {
- iov.iov_base = (4 - pdu_length) +
- (char *)smb_buffer;
- iov.iov_len = pdu_length;
- smb_msg.msg_control = NULL;
- smb_msg.msg_controllen = 0;
- goto incomplete_rcv;
- } else
- continue;
- } else if (length <= 0) {
- cFYI(1, "Reconnect after unexpected peek error %d",
- length);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
+ else if (rc == 1)
continue;
- } else if (length < pdu_length) {
- cFYI(1, "requested %d bytes but only got %d bytes",
- pdu_length, length);
- pdu_length -= length;
- msleep(1);
- goto incomplete_rcv;
- }
-
- /* The right amount was read from socket - 4 bytes */
- /* so we can now interpret the length field */
- /* the first byte big endian of the length field,
- is actually not part of the length but the type
- with the most common, zero, as regular data */
- temp = *((char *) smb_buffer);
+ /*
+ * The right amount was read from socket - 4 bytes,
+ * so we can now interpret the length field.
+ */
- /* Note that FC 1001 length is big endian on the wire,
- but we convert it here so it is always manipulated
- as host byte order */
+ /*
+ * Note that RFC 1001 length is big endian on the wire,
+ * but we convert it here so it is always manipulated
+ * as host byte order.
+ */
pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
cFYI(1, "rfc1002 length 0x%x", pdu_length+4);
-
- if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
- continue;
- } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
- cFYI(1, "Good RFC 1002 session rsp");
- continue;
- } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
- /* we get this from Windows 98 instead of
- an error on SMB negprot response */
- cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
- pdu_length);
- /* give server a second to clean up */
- msleep(1000);
- /* always try 445 first on reconnect since we get NACK
- * on some if we ever connected to port 139 (the NACK
- * is since we do not begin with RFC1001 session
- * initialize frame)
- */
- cifs_set_port((struct sockaddr *)
- &server->dstaddr, CIFS_PORT);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- } else if (temp != (char) 0) {
- cERROR(1, "Unknown RFC 1002 frame");
- cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
- length);
- cifs_reconnect(server);
- csocket = server->ssocket;
+ if (!check_rfc1002_header(server, buf))
continue;
- }
-
- /* else we have an SMB response */
- if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
- (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
- cERROR(1, "Invalid size SMB length %d pdu_length %d",
- length, pdu_length+4);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- }
/* else length ok */
- reconnect = 0;
-
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
isLargeBuf = true;
memcpy(bigbuf, smallbuf, 4);
- smb_buffer = bigbuf;
+ smb_buffer = (struct smb_hdr *)bigbuf;
+ buf = bigbuf;
}
- length = 0;
- iov.iov_base = 4 + (char *)smb_buffer;
+
+ iov.iov_base = 4 + buf;
iov.iov_len = pdu_length;
- for (total_read = 0; total_read < pdu_length;
- total_read += length) {
- length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
- pdu_length - total_read, 0);
- if (server->tcpStatus == CifsExiting) {
- /* then will exit */
- reconnect = 2;
- break;
- } else if (server->tcpStatus == CifsNeedReconnect) {
- cifs_reconnect(server);
- csocket = server->ssocket;
- /* Reconnect wakes up rspns q */
- /* Now we will reread sock */
- reconnect = 1;
- break;
- } else if (length == -ERESTARTSYS ||
- length == -EAGAIN ||
- length == -EINTR) {
- msleep(1); /* minimum sleep to prevent looping,
- allowing socket to clear and app
- threads to set tcpStatus
- CifsNeedReconnect if server hung*/
- length = 0;
- continue;
- } else if (length <= 0) {
- cERROR(1, "Received no data, expecting %d",
- pdu_length - total_read);
- cifs_reconnect(server);
- csocket = server->ssocket;
- reconnect = 1;
- break;
- }
- }
- if (reconnect == 2)
+ rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
+ &total_read, false);
+ if (rc == 2)
break;
- else if (reconnect == 1)
+ else if (rc == 1)
continue;
total_read += 4; /* account for rfc1002 hdr */
@@ -562,75 +738,13 @@ incomplete_rcv:
*/
length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
if (length != 0)
- cifs_dump_mem("Bad SMB: ", smb_buffer,
- min_t(unsigned int, total_read, 48));
+ cifs_dump_mem("Bad SMB: ", buf,
+ min_t(unsigned int, total_read, 48));
- mid_entry = NULL;
server->lstrp = jiffies;
- spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
- if (mid_entry->mid != smb_buffer->Mid ||
- mid_entry->midState != MID_REQUEST_SUBMITTED ||
- mid_entry->command != smb_buffer->Command) {
- mid_entry = NULL;
- continue;
- }
-
- if (length == 0 &&
- check2ndT2(smb_buffer, server->maxBuf) > 0) {
- /* We have a multipart transact2 resp */
- isMultiRsp = true;
- if (mid_entry->resp_buf) {
- /* merge response - fix up 1st*/
- length = coalesce_t2(smb_buffer,
- mid_entry->resp_buf);
- if (length > 0) {
- length = 0;
- mid_entry->multiRsp = true;
- break;
- } else {
- /* all parts received or
- * packet is malformed
- */
- mid_entry->multiEnd = true;
- goto multi_t2_fnd;
- }
- } else {
- if (!isLargeBuf) {
- /*
- * FIXME: switch to already
- * allocated largebuf?
- */
- cERROR(1, "1st trans2 resp "
- "needs bigbuf");
- } else {
- /* Have first buffer */
- mid_entry->resp_buf =
- smb_buffer;
- mid_entry->largeBuf = true;
- bigbuf = NULL;
- }
- }
- break;
- }
- mid_entry->resp_buf = smb_buffer;
- mid_entry->largeBuf = isLargeBuf;
-multi_t2_fnd:
- if (length == 0)
- mid_entry->midState = MID_RESPONSE_RECEIVED;
- else
- mid_entry->midState = MID_RESPONSE_MALFORMED;
-#ifdef CONFIG_CIFS_STATS2
- mid_entry->when_received = jiffies;
-#endif
- list_del_init(&mid_entry->qhead);
- break;
- }
- spin_unlock(&GlobalMid_Lock);
-
+ mid_entry = find_cifs_mid(server, smb_buffer, &length,
+ isLargeBuf, &isMultiRsp, &bigbuf);
if (mid_entry != NULL) {
mid_entry->callback(mid_entry);
/* Was previous buf put in mpx struct for multi-rsp? */
@@ -648,7 +762,7 @@ multi_t2_fnd:
!isMultiRsp) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
+ cifs_dump_mem("Received Data is: ", buf,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
cifs_dump_detail(smb_buffer);
@@ -658,88 +772,13 @@ multi_t2_fnd:
}
} /* end while !EXITING */
- /* take it off the list, if it's not already */
- spin_lock(&cifs_tcp_ses_lock);
- list_del_init(&server->tcp_ses_list);
- spin_unlock(&cifs_tcp_ses_lock);
-
- spin_lock(&GlobalMid_Lock);
- server->tcpStatus = CifsExiting;
- spin_unlock(&GlobalMid_Lock);
- wake_up_all(&server->response_q);
-
- /* check if we have blocked requests that need to free */
- /* Note that cifs_max_pending is normally 50, but
- can be set at module install time to as little as two */
- spin_lock(&GlobalMid_Lock);
- if (atomic_read(&server->inFlight) >= cifs_max_pending)
- atomic_set(&server->inFlight, cifs_max_pending - 1);
- /* We do not want to set the max_pending too low or we
- could end up with the counter going negative */
- spin_unlock(&GlobalMid_Lock);
- /* Although there should not be any requests blocked on
- this queue it can not hurt to be paranoid and try to wake up requests
- that may haven been blocked when more than 50 at time were on the wire
- to the same server - they now will see the session is in exit state
- and get out of SendReceive. */
- wake_up_all(&server->request_q);
- /* give those requests time to exit */
- msleep(125);
-
- if (server->ssocket) {
- sock_release(csocket);
- server->ssocket = NULL;
- }
/* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(bigbuf);
if (smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf);
- if (!list_empty(&server->pending_mid_q)) {
- struct list_head dispose_list;
-
- INIT_LIST_HEAD(&dispose_list);
- spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
- mid_entry->midState = MID_SHUTDOWN;
- list_move(&mid_entry->qhead, &dispose_list);
- }
- spin_unlock(&GlobalMid_Lock);
-
- /* now walk dispose list and issue callbacks */
- list_for_each_safe(tmp, tmp2, &dispose_list) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Callback mid 0x%x", mid_entry->mid);
- list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
- }
- /* 1/8th of sec is more than enough time for them to exit */
- msleep(125);
- }
-
- if (!list_empty(&server->pending_mid_q)) {
- /* mpx threads have not exited yet give them
- at least the smb send timeout time for long ops */
- /* due to delays on oplock break requests, we need
- to wait at least 45 seconds before giving up
- on a request getting a response and going ahead
- and killing cifsd */
- cFYI(1, "Wait for exit from demultiplex thread");
- msleep(46000);
- /* if threads still have not exited they are probably never
- coming home not much else we can do but free the memory */
- }
-
- kfree(server->hostname);
task_to_wake = xchg(&server->tsk, NULL);
- kfree(server);
-
- length = atomic_dec_return(&tcpSesAllocCount);
- if (length > 0)
- mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
- GFP_KERNEL);
+ clean_demultiplex_info(server);
/* if server->tsk was NULL then wait for a signal before exiting */
if (!task_to_wake) {
@@ -3193,15 +3232,9 @@ mount_fail_check:
else
cifs_put_tcp_session(srvTcp);
bdi_destroy(&cifs_sb->bdi);
- goto out;
}
- /* volume_info->password is freed above when existing session found
- (in which case it is not needed anymore) but when new sesion is created
- the password ptr is put in the new session structure (in which case the
- password will be freed at unmount time) */
out:
- /* zero out password before freeing */
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 548f06230a6d..1d2d91d9bf65 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -79,8 +79,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* Perform the upcall */
rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL);
if (rc < 0)
- cERROR(1, "%s: unable to resolve: %*.*s",
- __func__, len, len, hostname);
+ cFYI(1, "%s: unable to resolve: %*.*s",
+ __func__, len, len, hostname);
else
cFYI(1, "%s: resolved: %*.*s to %s",
__func__, len, len, hostname, *ip_addr);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 378acdafa356..9f41a10523a1 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -314,6 +314,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
}
spin_unlock(&cifs_file_list_lock);
+ cancel_work_sync(&cifs_file->oplock_break);
+
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
int xid, rc;
@@ -2418,31 +2420,6 @@ void cifs_oplock_break(struct work_struct *work)
cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, "Oplock release rc = %d", rc);
}
-
- /*
- * We might have kicked in before is_valid_oplock_break()
- * finished grabbing reference for us. Make sure it's done by
- * waiting for cifs_file_list_lock.
- */
- spin_lock(&cifs_file_list_lock);
- spin_unlock(&cifs_file_list_lock);
-
- cifs_oplock_break_put(cfile);
-}
-
-/* must be called while holding cifs_file_list_lock */
-void cifs_oplock_break_get(struct cifsFileInfo *cfile)
-{
- cifs_sb_active(cfile->dentry->d_sb);
- cifsFileInfo_get(cfile);
-}
-
-void cifs_oplock_break_put(struct cifsFileInfo *cfile)
-{
- struct super_block *sb = cfile->dentry->d_sb;
-
- cifsFileInfo_put(cfile);
- cifs_sb_deactive(sb);
}
const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9b018c8334fa..a7b2dcd4a53e 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -764,20 +764,10 @@ char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
if (full_path == NULL)
return full_path;
- if (dfsplen) {
+ if (dfsplen)
strncpy(full_path, tcon->treeName, dfsplen);
- /* switch slash direction in prepath depending on whether
- * windows or posix style path names
- */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
- int i;
- for (i = 0; i < dfsplen; i++) {
- if (full_path[i] == '\\')
- full_path[i] = '/';
- }
- }
- }
strncpy(full_path + dfsplen, vol->prepath, pplen);
+ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 03a1f491d39b..7c1693392598 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -585,15 +585,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cifs_set_oplock_level(pCifsInode,
pSMB->OplockLevel ? OPLOCK_READ : 0);
- /*
- * cifs_oplock_break_put() can't be called
- * from here. Get reference after queueing
- * succeeded. cifs_oplock_break() will
- * synchronize using cifs_file_list_lock.
- */
- if (queue_work(system_nrt_wq,
- &netfile->oplock_break))
- cifs_oplock_break_get(netfile);
+ queue_work(system_nrt_wq,
+ &netfile->oplock_break);
netfile->oplock_break_cancelled = false;
spin_unlock(&cifs_file_list_lock);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 147aa22c3c3a..c1b9c4b10739 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -362,6 +362,8 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
mid = AllocMidQEntry(hdr, server);
if (mid == NULL) {
mutex_unlock(&server->srv_mutex);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
return -ENOMEM;
}
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 8be086e9abe4..51352de88ef1 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1003,6 +1003,7 @@ COMPATIBLE_IOCTL(PPPIOCCONNECT)
COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
+COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
/* PPPOX */
COMPATIBLE_IOCTL(PPPOEIOCSFWD)
COMPATIBLE_IOCTL(PPPOEIOCDFWD)
diff --git a/fs/dcache.c b/fs/dcache.c
index b05aac3a8cfc..a88948b8bd17 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -301,6 +301,27 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
return parent;
}
+/*
+ * Unhash a dentry without inserting an RCU walk barrier or checking that
+ * dentry->d_lock is locked. The caller must take care of that, if
+ * appropriate.
+ */
+static void __d_shrink(struct dentry *dentry)
+{
+ if (!d_unhashed(dentry)) {
+ struct hlist_bl_head *b;
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ b = &dentry->d_sb->s_anon;
+ else
+ b = d_hash(dentry->d_parent, dentry->d_name.hash);
+
+ hlist_bl_lock(b);
+ __hlist_bl_del(&dentry->d_hash);
+ dentry->d_hash.pprev = NULL;
+ hlist_bl_unlock(b);
+ }
+}
+
/**
* d_drop - drop a dentry
* @dentry: dentry to drop
@@ -319,17 +340,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
void __d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
- struct hlist_bl_head *b;
- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
- b = &dentry->d_sb->s_anon;
- else
- b = d_hash(dentry->d_parent, dentry->d_name.hash);
-
- hlist_bl_lock(b);
- __hlist_bl_del(&dentry->d_hash);
- dentry->d_hash.pprev = NULL;
- hlist_bl_unlock(b);
-
+ __d_shrink(dentry);
dentry_rcuwalk_barrier(dentry);
}
}
@@ -784,6 +795,7 @@ relock:
/**
* prune_dcache_sb - shrink the dcache
+ * @sb: superblock
* @nr_to_scan: number of entries to try to free
*
* Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
@@ -828,44 +840,24 @@ EXPORT_SYMBOL(shrink_dcache_sb);
static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
{
struct dentry *parent;
- unsigned detached = 0;
BUG_ON(!IS_ROOT(dentry));
- /* detach this root from the system */
- spin_lock(&dentry->d_lock);
- dentry_lru_del(dentry);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
-
for (;;) {
/* descend to the first leaf in the current subtree */
- while (!list_empty(&dentry->d_subdirs)) {
- struct dentry *loop;
-
- /* this is a branch with children - detach all of them
- * from the system in one go */
- spin_lock(&dentry->d_lock);
- list_for_each_entry(loop, &dentry->d_subdirs,
- d_u.d_child) {
- spin_lock_nested(&loop->d_lock,
- DENTRY_D_LOCK_NESTED);
- dentry_lru_del(loop);
- __d_drop(loop);
- spin_unlock(&loop->d_lock);
- }
- spin_unlock(&dentry->d_lock);
-
- /* move to the first child */
+ while (!list_empty(&dentry->d_subdirs))
dentry = list_entry(dentry->d_subdirs.next,
struct dentry, d_u.d_child);
- }
/* consume the dentries from this leaf up through its parents
* until we find one with children or run out altogether */
do {
struct inode *inode;
+ /* detach from the system */
+ dentry_lru_del(dentry);
+ __d_shrink(dentry);
+
if (dentry->d_count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
@@ -886,14 +878,10 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
list_del(&dentry->d_u.d_child);
} else {
parent = dentry->d_parent;
- spin_lock(&parent->d_lock);
parent->d_count--;
list_del(&dentry->d_u.d_child);
- spin_unlock(&parent->d_lock);
}
- detached++;
-
inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
@@ -938,9 +926,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root;
sb->s_root = NULL;
- spin_lock(&dentry->d_lock);
dentry->d_count--;
- spin_unlock(&dentry->d_lock);
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1743,7 +1729,7 @@ seqretry:
*/
if (read_seqcount_retry(&dentry->d_seq, *seq))
goto seqretry;
- if (parent->d_flags & DCACHE_OP_COMPARE) {
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
if (parent->d_op->d_compare(parent, *inode,
dentry, i,
tlen, tname, name))
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 2d0f757fda3e..c5a5855a6c44 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -12,5 +12,8 @@
# Kbuild - Gets included from the Kernels Makefile and build system
#
-exofs-y := ios.o inode.o file.o symlink.o namei.o dir.o super.o
+# ore module library
+obj-$(CONFIG_ORE) += ore.o
+
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/Kconfig b/fs/exofs/Kconfig
index 86194b2f799d..70bae4149291 100644
--- a/fs/exofs/Kconfig
+++ b/fs/exofs/Kconfig
@@ -1,6 +1,10 @@
+config ORE
+ tristate
+
config EXOFS_FS
tristate "exofs: OSD based file system support"
depends on SCSI_OSD_ULD
+ select ORE
help
EXOFS is a file system that uses an OSD storage device,
as its backing storage.
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index c965806c2821..f4e442ec7445 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -36,12 +36,9 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
-#include "common.h"
+#include <scsi/osd_ore.h>
-/* FIXME: Remove once pnfs hits mainline
- * #include <linux/exportfs/pnfs_osd_xdr.h>
- */
-#include "pnfs.h"
+#include "common.h"
#define EXOFS_ERR(fmt, a...) printk(KERN_ERR "exofs: " fmt, ##a)
@@ -56,27 +53,11 @@
/* u64 has problems with printk this will cast it to unsigned long long */
#define _LLU(x) (unsigned long long)(x)
-struct exofs_layout {
- osd_id s_pid; /* partition ID of file system*/
-
- /* Our way of looking at the data_map */
- unsigned stripe_unit;
- unsigned mirrors_p1;
-
- unsigned group_width;
- u64 group_depth;
- unsigned group_count;
-
- enum exofs_inode_layout_gen_functions lay_func;
-
- unsigned s_numdevs; /* Num of devices in array */
- struct osd_dev *s_ods[0]; /* Variable length */
-};
-
/*
* our extension to the in-memory superblock
*/
struct exofs_sb_info {
+ struct backing_dev_info bdi; /* register our bdi with VFS */
struct exofs_sb_stats s_ess; /* Written often, pre-allocate*/
int s_timeout; /* timeout for OSD operations */
uint64_t s_nextid; /* highest object ID used */
@@ -84,16 +65,13 @@ struct exofs_sb_info {
spinlock_t s_next_gen_lock; /* spinlock for gen # update */
u32 s_next_generation; /* next gen # to use */
atomic_t s_curr_pending; /* number of pending commands */
- uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */
- struct backing_dev_info bdi; /* register our bdi with VFS */
struct pnfs_osd_data_map data_map; /* Default raid to use
* FIXME: Needed ?
*/
-/* struct exofs_layout dir_layout;*/ /* Default dir layout */
- struct exofs_layout layout; /* Default files layout,
- * contains the variable osd_dev
- * array. Keep last */
+ struct ore_layout layout; /* Default files layout */
+ struct ore_comp one_comp; /* id & cred of partition id=0*/
+ struct ore_components comps; /* comps for the partition */
struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
@@ -107,7 +85,8 @@ struct exofs_i_info {
uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/
uint32_t i_dir_start_lookup; /* which page to start lookup */
uint64_t i_commit_size; /* the object's written length */
- uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */
+ struct ore_comp one_comp; /* same component for all devices */
+ struct ore_components comps; /* inode view of the device table */
};
static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
@@ -115,52 +94,6 @@ static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
return oi->vfs_inode.i_ino + EXOFS_OBJ_OFF;
}
-struct exofs_io_state;
-typedef void (*exofs_io_done_fn)(struct exofs_io_state *or, void *private);
-
-struct exofs_io_state {
- struct kref kref;
-
- void *private;
- exofs_io_done_fn done;
-
- struct exofs_layout *layout;
- struct osd_obj_id obj;
- u8 *cred;
-
- /* Global read/write IO*/
- loff_t offset;
- unsigned long length;
- void *kern_buff;
-
- struct page **pages;
- unsigned nr_pages;
- unsigned pgbase;
- unsigned pages_consumed;
-
- /* Attributes */
- unsigned in_attr_len;
- struct osd_attr *in_attr;
- unsigned out_attr_len;
- struct osd_attr *out_attr;
-
- /* Variable array of size numdevs */
- unsigned numdevs;
- struct exofs_per_dev_state {
- struct osd_request *or;
- struct bio *bio;
- loff_t offset;
- unsigned length;
- unsigned dev;
- } per_dev[];
-};
-
-static inline unsigned exofs_io_state_size(unsigned numdevs)
-{
- return sizeof(struct exofs_io_state) +
- sizeof(struct exofs_per_dev_state) * numdevs;
-}
-
/*
* our inode flags
*/
@@ -205,12 +138,6 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode)
}
/*
- * Given a layout, object_number and stripe_index return the associated global
- * dev_index
- */
-unsigned exofs_layout_od_id(struct exofs_layout *layout,
- osd_id obj_no, unsigned layout_index);
-/*
* Maximum count of links to a file
*/
#define EXOFS_LINK_MAX 32000
@@ -219,44 +146,8 @@ unsigned exofs_layout_od_id(struct exofs_layout *layout,
* function declarations *
*************************/
-/* ios.c */
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
- const struct osd_obj_id *obj);
-int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
- u64 offset, void *p, unsigned length);
-
-int exofs_get_io_state(struct exofs_layout *layout,
- struct exofs_io_state **ios);
-void exofs_put_io_state(struct exofs_io_state *ios);
-
-int exofs_check_io(struct exofs_io_state *ios, u64 *resid);
-
-int exofs_sbi_create(struct exofs_io_state *ios);
-int exofs_sbi_remove(struct exofs_io_state *ios);
-int exofs_sbi_write(struct exofs_io_state *ios);
-int exofs_sbi_read(struct exofs_io_state *ios);
-
-int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr);
-
-int exofs_oi_truncate(struct exofs_i_info *oi, u64 new_len);
-static inline int exofs_oi_write(struct exofs_i_info *oi,
- struct exofs_io_state *ios)
-{
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
- return exofs_sbi_write(ios);
-}
-
-static inline int exofs_oi_read(struct exofs_i_info *oi,
- struct exofs_io_state *ios)
-{
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
- return exofs_sbi_read(ios);
-}
-
/* inode.c */
-unsigned exofs_max_io_pages(struct exofs_layout *layout,
+unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
@@ -281,6 +172,8 @@ int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
/* super.c */
+void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
+ const struct osd_obj_id *obj);
int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
/*********************
@@ -295,7 +188,6 @@ extern const struct file_operations exofs_file_operations;
/* inode.c */
extern const struct address_space_operations exofs_aops;
-extern const struct osd_attr g_attr_logical_length;
/* namei.c */
extern const struct inode_operations exofs_dir_inode_operations;
@@ -305,4 +197,33 @@ extern const struct inode_operations exofs_special_inode_operations;
extern const struct inode_operations exofs_symlink_inode_operations;
extern const struct inode_operations exofs_fast_symlink_inode_operations;
+/* exofs_init_comps will initialize an ore_components device array
+ * pointing to a single ore_comp struct, and a round-robin view
+ * of the device table.
+ * The first device of each inode is the [inode->ino % num_devices]
+ * and the rest of the devices sequentially following where the
+ * first device is after the last device.
+ * It is assumed that the global device array at @sbi is twice
+ * bigger and that the device table repeats twice.
+ * See: exofs_read_lookup_dev_table()
+ */
+static inline void exofs_init_comps(struct ore_components *comps,
+ struct ore_comp *one_comp,
+ struct exofs_sb_info *sbi, osd_id oid)
+{
+ unsigned dev_mod = (unsigned)oid, first_dev;
+
+ one_comp->obj.partition = sbi->one_comp.obj.partition;
+ one_comp->obj.id = oid;
+ exofs_make_credential(one_comp->cred, &one_comp->obj);
+
+ comps->numdevs = sbi->comps.numdevs;
+ comps->single_comp = EC_SINGLE_COMP;
+ comps->comps = one_comp;
+
+ /* Round robin device view of the table */
+ first_dev = (dev_mod * sbi->layout.mirrors_p1) % sbi->comps.numdevs;
+ comps->ods = sbi->comps.ods + first_dev;
+}
+
#endif
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 8472c098445d..f39a38fc2349 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -43,7 +43,7 @@ enum { BIO_MAX_PAGES_KMALLOC =
PAGE_SIZE / sizeof(struct page *),
};
-unsigned exofs_max_io_pages(struct exofs_layout *layout,
+unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
@@ -58,7 +58,7 @@ struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
unsigned expected_pages;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct page **pages;
unsigned alloc_pages;
@@ -110,13 +110,6 @@ static int pcol_try_alloc(struct page_collect *pcol)
{
unsigned pages;
- if (!pcol->ios) { /* First time allocate io_state */
- int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
-
- if (ret)
- return ret;
- }
-
/* TODO: easily support bio chaining */
pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
@@ -140,7 +133,7 @@ static void pcol_free(struct page_collect *pcol)
pcol->pages = NULL;
if (pcol->ios) {
- exofs_put_io_state(pcol->ios);
+ ore_put_io_state(pcol->ios);
pcol->ios = NULL;
}
}
@@ -200,7 +193,7 @@ static int __readpages_done(struct page_collect *pcol)
u64 resid;
u64 good_bytes;
u64 length = 0;
- int ret = exofs_check_io(pcol->ios, &resid);
+ int ret = ore_check_io(pcol->ios, &resid);
if (likely(!ret))
good_bytes = pcol->length;
@@ -241,7 +234,7 @@ static int __readpages_done(struct page_collect *pcol)
}
/* callback of async reads */
-static void readpages_done(struct exofs_io_state *ios, void *p)
+static void readpages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
@@ -269,20 +262,28 @@ static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
static int read_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct exofs_io_state *ios = pcol->ios;
+ struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
+ if (!pcol->ios) {
+ int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, true,
+ pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->length, &pcol->ios);
+
+ if (ret)
+ return ret;
+ }
+
+ ios = pcol->ios;
ios->pages = pcol->pages;
ios->nr_pages = pcol->nr_pages;
- ios->length = pcol->length;
- ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
if (pcol->read_4_write) {
- exofs_oi_read(oi, pcol->ios);
+ ore_read(pcol->ios);
return __readpages_done(pcol);
}
@@ -295,14 +296,14 @@ static int read_exec(struct page_collect *pcol)
*pcol_copy = *pcol;
ios->done = readpages_done;
ios->private = pcol_copy;
- ret = exofs_oi_read(oi, ios);
+ ret = ore_read(ios);
if (unlikely(ret))
goto err;
atomic_inc(&pcol->sbi->s_curr_pending);
EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
- ios->obj.id, _LLU(ios->offset), pcol->length);
+ oi->one_comp.obj.id, _LLU(ios->offset), pcol->length);
/* pages ownership was passed to pcol_copy */
_pcol_reset(pcol);
@@ -457,14 +458,14 @@ static int exofs_readpage(struct file *file, struct page *page)
}
/* Callback for osd_write. All writes are asynchronous */
-static void writepages_done(struct exofs_io_state *ios, void *p)
+static void writepages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
int i;
u64 resid;
u64 good_bytes;
u64 length = 0;
- int ret = exofs_check_io(ios, &resid);
+ int ret = ore_check_io(ios, &resid);
atomic_dec(&pcol->sbi->s_curr_pending);
@@ -507,13 +508,21 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
static int write_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct exofs_io_state *ios = pcol->ios;
+ struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
+ BUG_ON(pcol->ios);
+ ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, false,
+ pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->length, &pcol->ios);
+
+ if (unlikely(ret))
+ goto err;
+
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
if (!pcol_copy) {
EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
@@ -523,16 +532,15 @@ static int write_exec(struct page_collect *pcol)
*pcol_copy = *pcol;
+ ios = pcol->ios;
ios->pages = pcol_copy->pages;
ios->nr_pages = pcol_copy->nr_pages;
- ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
- ios->length = pcol_copy->length;
ios->done = writepages_done;
ios->private = pcol_copy;
- ret = exofs_oi_write(oi, ios);
+ ret = ore_write(ios);
if (unlikely(ret)) {
- EXOFS_ERR("write_exec: exofs_oi_write() Failed\n");
+ EXOFS_ERR("write_exec: ore_write() Failed\n");
goto err;
}
@@ -844,17 +852,15 @@ static inline int exofs_inode_is_fast_symlink(struct inode *inode)
return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
}
-const struct osd_attr g_attr_logical_length = ATTR_DEF(
- OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
-
static int _do_truncate(struct inode *inode, loff_t newsize)
{
struct exofs_i_info *oi = exofs_i(inode);
+ struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- ret = exofs_oi_truncate(oi, (u64)newsize);
+ ret = ore_truncate(&sbi->layout, &oi->comps, (u64)newsize);
if (likely(!ret))
truncate_setsize(inode, newsize);
@@ -917,30 +923,26 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
[1] = g_attr_inode_file_layout,
[2] = g_attr_inode_dir_layout,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct exofs_on_disk_inode_layout *layout;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
- ios->obj.id = exofs_oi_objno(oi);
- exofs_make_credential(oi->i_cred, &ios->obj);
- ios->cred = oi->i_cred;
-
- attrs[1].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
- attrs[2].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
+ attrs[1].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
+ attrs[2].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret)) {
EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
- _LLU(ios->obj.id), ret);
+ _LLU(oi->one_comp.obj.id), ret);
memset(inode, 0, sizeof(*inode));
inode->i_mode = 0040000 | (0777 & ~022);
/* If object is lost on target we might as well enable it's
@@ -990,7 +992,7 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
}
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
@@ -1016,6 +1018,8 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
return inode;
oi = exofs_i(inode);
__oi_init(oi);
+ exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
+ exofs_oi_objno(oi));
/* read the inode from the osd */
ret = exofs_get_inode(sb, oi, &fcb);
@@ -1107,21 +1111,22 @@ int __exofs_wait_obj_created(struct exofs_i_info *oi)
* set the obj_created flag so that other methods know that the object exists on
* the OSD.
*/
-static void create_done(struct exofs_io_state *ios, void *p)
+static void create_done(struct ore_io_state *ios, void *p)
{
struct inode *inode = p;
struct exofs_i_info *oi = exofs_i(inode);
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
- ret = exofs_check_io(ios, NULL);
- exofs_put_io_state(ios);
+ ret = ore_check_io(ios, NULL);
+ ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
if (unlikely(ret)) {
EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
- _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
+ _LLU(exofs_oi_objno(oi)),
+ _LLU(oi->one_comp.obj.partition));
/*TODO: When FS is corrupted creation can fail, object already
* exist. Get rid of this asynchronous creation, if exist
* increment the obj counter and try the next object. Until we
@@ -1140,14 +1145,13 @@ static void create_done(struct exofs_io_state *ios, void *p)
*/
struct inode *exofs_new_inode(struct inode *dir, int mode)
{
- struct super_block *sb;
+ struct super_block *sb = dir->i_sb;
+ struct exofs_sb_info *sbi = sb->s_fs_info;
struct inode *inode;
struct exofs_i_info *oi;
- struct exofs_sb_info *sbi;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- sb = dir->i_sb;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -1157,8 +1161,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
set_obj_2bcreated(oi);
- sbi = sb->s_fs_info;
-
inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
@@ -1170,25 +1172,24 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
spin_unlock(&sbi->s_next_gen_lock);
insert_inode_hash(inode);
+ exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
+ exofs_oi_objno(oi));
exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
mark_inode_dirty(inode);
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
+ EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
return ERR_PTR(ret);
}
- ios->obj.id = exofs_oi_objno(oi);
- exofs_make_credential(oi->i_cred, &ios->obj);
-
ios->done = create_done;
ios->private = inode;
- ios->cred = oi->i_cred;
- ret = exofs_sbi_create(ios);
+
+ ret = ore_create(ios);
if (ret) {
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ERR_PTR(ret);
}
atomic_inc(&sbi->s_curr_pending);
@@ -1207,11 +1208,11 @@ struct updatei_args {
/*
* Callback function from exofs_update_inode().
*/
-static void updatei_done(struct exofs_io_state *ios, void *p)
+static void updatei_done(struct ore_io_state *ios, void *p)
{
struct updatei_args *args = p;
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
atomic_dec(&args->sbi->s_curr_pending);
@@ -1227,7 +1228,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct osd_attr attr;
struct exofs_fcb *fcb;
struct updatei_args *args;
@@ -1266,9 +1267,9 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
} else
memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
goto free_args;
}
@@ -1285,13 +1286,13 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
ios->private = args;
}
- ret = exofs_oi_write(oi, ios);
+ ret = ore_write(ios);
if (!do_sync && !ret) {
atomic_inc(&sbi->s_curr_pending);
goto out; /* deallocation in updatei_done */
}
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
free_args:
kfree(args);
out:
@@ -1310,11 +1311,11 @@ int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
* Callback function from exofs_delete_inode() - don't have much cleaning up to
* do.
*/
-static void delete_done(struct exofs_io_state *ios, void *p)
+static void delete_done(struct ore_io_state *ios, void *p)
{
struct exofs_sb_info *sbi = p;
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
}
@@ -1329,7 +1330,7 @@ void exofs_evict_inode(struct inode *inode)
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
truncate_inode_pages(&inode->i_data, 0);
@@ -1349,20 +1350,19 @@ void exofs_evict_inode(struct inode *inode)
/* ignore the error, attempt a remove anyway */
/* Now Remove the OSD objects */
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
return;
}
- ios->obj.id = exofs_oi_objno(oi);
ios->done = delete_done;
ios->private = sbi;
- ios->cred = oi->i_cred;
- ret = exofs_sbi_remove(ios);
+
+ ret = ore_remove(ios);
if (ret) {
- EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
- exofs_put_io_state(ios);
+ EXOFS_ERR("%s: ore_remove failed\n", __func__);
+ ore_put_io_state(ios);
return;
}
atomic_inc(&sbi->s_curr_pending);
diff --git a/fs/exofs/ios.c b/fs/exofs/ore.c
index f74a2ec027a6..25305af88198 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ore.c
@@ -23,81 +23,87 @@
*/
#include <linux/slab.h>
-#include <scsi/scsi_device.h>
#include <asm/div64.h>
-#include "exofs.h"
+#include <scsi/osd_ore.h>
-#define EXOFS_DBGMSG2(M...) do {} while (0)
-/* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
+#define ORE_ERR(fmt, a...) printk(KERN_ERR "ore: " fmt, ##a)
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
-{
- osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
-}
+#ifdef CONFIG_EXOFS_DEBUG
+#define ORE_DBGMSG(fmt, a...) \
+ printk(KERN_NOTICE "ore @%s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define ORE_DBGMSG(fmt, a...) \
+ do { if (0) printk(fmt, ##a); } while (0)
+#endif
-int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
- u64 offset, void *p, unsigned length)
-{
- struct osd_request *or = osd_start_request(od, GFP_KERNEL);
-/* struct osd_sense_info osi = {.key = 0};*/
- int ret;
+/* u64 has problems with printk this will cast it to unsigned long long */
+#define _LLU(x) (unsigned long long)(x)
- if (unlikely(!or)) {
- EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
- return -ENOMEM;
- }
- ret = osd_req_read_kern(or, obj, offset, p, length);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
- goto out;
- }
+#define ORE_DBGMSG2(M...) do {} while (0)
+/* #define ORE_DBGMSG2 ORE_DBGMSG */
- ret = osd_finalize_request(or, 0, cred, NULL);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
- goto out;
- }
+MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
+MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
+MODULE_LICENSE("GPL");
- ret = osd_execute_request(or);
- if (unlikely(ret))
- EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
- /* osd_req_decode_sense(or, ret); */
+static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
+{
+ return ios->comps->comps[index & ios->comps->single_comp].cred;
+}
-out:
- osd_end_request(or);
- return ret;
+static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
+{
+ return &ios->comps->comps[index & ios->comps->single_comp].obj;
}
-int exofs_get_io_state(struct exofs_layout *layout,
- struct exofs_io_state **pios)
+static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
{
- struct exofs_io_state *ios;
+ return ios->comps->ods[index];
+}
+
+int ore_get_rw_state(struct ore_layout *layout, struct ore_components *comps,
+ bool is_reading, u64 offset, u64 length,
+ struct ore_io_state **pios)
+{
+ struct ore_io_state *ios;
/*TODO: Maybe use kmem_cach per sbi of size
* exofs_io_state_size(layout->s_numdevs)
*/
- ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
+ ios = kzalloc(ore_io_state_size(comps->numdevs), GFP_KERNEL);
if (unlikely(!ios)) {
- EXOFS_DBGMSG("Failed kzalloc bytes=%d\n",
- exofs_io_state_size(layout->s_numdevs));
+ ORE_DBGMSG("Failed kzalloc bytes=%d\n",
+ ore_io_state_size(comps->numdevs));
*pios = NULL;
return -ENOMEM;
}
ios->layout = layout;
- ios->obj.partition = layout->s_pid;
+ ios->comps = comps;
+ ios->offset = offset;
+ ios->length = length;
+ ios->reading = is_reading;
+
*pios = ios;
return 0;
}
+EXPORT_SYMBOL(ore_get_rw_state);
+
+int ore_get_io_state(struct ore_layout *layout, struct ore_components *comps,
+ struct ore_io_state **ios)
+{
+ return ore_get_rw_state(layout, comps, true, 0, 0, ios);
+}
+EXPORT_SYMBOL(ore_get_io_state);
-void exofs_put_io_state(struct exofs_io_state *ios)
+void ore_put_io_state(struct ore_io_state *ios)
{
if (ios) {
unsigned i;
for (i = 0; i < ios->numdevs; i++) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[i];
if (per_dev->or)
osd_end_request(per_dev->or);
@@ -108,31 +114,9 @@ void exofs_put_io_state(struct exofs_io_state *ios)
kfree(ios);
}
}
+EXPORT_SYMBOL(ore_put_io_state);
-unsigned exofs_layout_od_id(struct exofs_layout *layout,
- osd_id obj_no, unsigned layout_index)
-{
-/* switch (layout->lay_func) {
- case LAYOUT_MOVING_WINDOW:
- {*/
- unsigned dev_mod = obj_no;
-
- return (layout_index + dev_mod * layout->mirrors_p1) %
- layout->s_numdevs;
-/* }
- case LAYOUT_FUNC_IMPLICT:
- return layout->devs[layout_index];
- }*/
-}
-
-static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
- unsigned layout_index)
-{
- return ios->layout->s_ods[
- exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
-}
-
-static void _sync_done(struct exofs_io_state *ios, void *p)
+static void _sync_done(struct ore_io_state *ios, void *p)
{
struct completion *waiting = p;
@@ -141,20 +125,20 @@ static void _sync_done(struct exofs_io_state *ios, void *p)
static void _last_io(struct kref *kref)
{
- struct exofs_io_state *ios = container_of(
- kref, struct exofs_io_state, kref);
+ struct ore_io_state *ios = container_of(
+ kref, struct ore_io_state, kref);
ios->done(ios, ios->private);
}
static void _done_io(struct osd_request *or, void *p)
{
- struct exofs_io_state *ios = p;
+ struct ore_io_state *ios = p;
kref_put(&ios->kref, _last_io);
}
-static int exofs_io_execute(struct exofs_io_state *ios)
+static int ore_io_execute(struct ore_io_state *ios)
{
DECLARE_COMPLETION_ONSTACK(wait);
bool sync = (ios->done == NULL);
@@ -170,9 +154,9 @@ static int exofs_io_execute(struct exofs_io_state *ios)
if (unlikely(!or))
continue;
- ret = osd_finalize_request(or, 0, ios->cred, NULL);
+ ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n",
+ ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
ret);
return ret;
}
@@ -194,7 +178,7 @@ static int exofs_io_execute(struct exofs_io_state *ios)
if (sync) {
wait_for_completion(&wait);
- ret = exofs_check_io(ios, NULL);
+ ret = ore_check_io(ios, NULL);
}
return ret;
}
@@ -214,7 +198,7 @@ static void _clear_bio(struct bio *bio)
}
}
-int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
+int ore_check_io(struct ore_io_state *ios, u64 *resid)
{
enum osd_err_priority acumulated_osd_err = 0;
int acumulated_lin_err = 0;
@@ -235,7 +219,7 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
/* start read offset passed endof file */
_clear_bio(ios->per_dev[i].bio);
- EXOFS_DBGMSG("start read offset passed end of file "
+ ORE_DBGMSG("start read offset passed end of file "
"offset=0x%llx, length=0x%llx\n",
_LLU(ios->per_dev[i].offset),
_LLU(ios->per_dev[i].length));
@@ -259,6 +243,7 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
return acumulated_lin_err;
}
+EXPORT_SYMBOL(ore_check_io);
/*
* L - logical offset into the file
@@ -305,20 +290,21 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
struct _striping_info {
u64 obj_offset;
u64 group_length;
+ u64 M; /* for truncate */
unsigned dev;
unsigned unit_off;
};
-static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
+static void _calc_stripe_info(struct ore_layout *layout, u64 file_offset,
struct _striping_info *si)
{
- u32 stripe_unit = ios->layout->stripe_unit;
- u32 group_width = ios->layout->group_width;
- u64 group_depth = ios->layout->group_depth;
+ u32 stripe_unit = layout->stripe_unit;
+ u32 group_width = layout->group_width;
+ u64 group_depth = layout->group_depth;
u32 U = stripe_unit * group_width;
u64 T = U * group_depth;
- u64 S = T * ios->layout->group_count;
+ u64 S = T * layout->group_count;
u64 M = div64_u64(file_offset, S);
/*
@@ -333,7 +319,7 @@ static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
- si->dev *= ios->layout->mirrors_p1;
+ si->dev *= layout->mirrors_p1;
div_u64_rem(file_offset, stripe_unit, &si->unit_off);
@@ -341,15 +327,16 @@ static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
(M * group_depth * stripe_unit);
si->group_length = T - H;
+ si->M = M;
}
-static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
- unsigned pgbase, struct exofs_per_dev_state *per_dev,
+static int _add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ unsigned pgbase, struct ore_per_dev_state *per_dev,
int cur_len)
{
unsigned pg = *cur_pg;
struct request_queue *q =
- osd_request_queue(exofs_ios_od(ios, per_dev->dev));
+ osd_request_queue(_ios_od(ios, per_dev->dev));
per_dev->length += cur_len;
@@ -361,7 +348,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
- EXOFS_DBGMSG("Failed to allocate BIO size=%u\n",
+ ORE_DBGMSG("Failed to allocate BIO size=%u\n",
bio_size);
return -ENOMEM;
}
@@ -387,7 +374,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
return 0;
}
-static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
+static int _prepare_one_group(struct ore_io_state *ios, u64 length,
struct _striping_info *si)
{
unsigned stripe_unit = ios->layout->stripe_unit;
@@ -400,7 +387,7 @@ static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
int ret = 0;
while (length) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[dev];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[dev];
unsigned cur_len, page_off = 0;
if (!per_dev->length) {
@@ -443,7 +430,7 @@ out:
return ret;
}
-static int _prepare_for_striping(struct exofs_io_state *ios)
+static int _prepare_for_striping(struct ore_io_state *ios)
{
u64 length = ios->length;
u64 offset = ios->offset;
@@ -452,9 +439,9 @@ static int _prepare_for_striping(struct exofs_io_state *ios)
if (!ios->pages) {
if (ios->kern_buff) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[0];
- _calc_stripe_info(ios, ios->offset, &si);
+ _calc_stripe_info(ios->layout, ios->offset, &si);
per_dev->offset = si.obj_offset;
per_dev->dev = si.dev;
@@ -468,7 +455,7 @@ static int _prepare_for_striping(struct exofs_io_state *ios)
}
while (length) {
- _calc_stripe_info(ios, offset, &si);
+ _calc_stripe_info(ios->layout, offset, &si);
if (length < si.group_length)
si.group_length = length;
@@ -485,57 +472,59 @@ out:
return ret;
}
-int exofs_sbi_create(struct exofs_io_state *ios)
+int ore_create(struct ore_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->layout->s_numdevs; i++) {
+ for (i = 0; i < ios->comps->numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
- osd_req_create_object(or, &ios->obj);
+ osd_req_create_object(or, _ios_obj(ios, i));
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
return ret;
}
+EXPORT_SYMBOL(ore_create);
-int exofs_sbi_remove(struct exofs_io_state *ios)
+int ore_remove(struct ore_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->layout->s_numdevs; i++) {
+ for (i = 0; i < ios->comps->numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
- osd_req_remove_object(or, &ios->obj);
+ osd_req_remove_object(or, _ios_obj(ios, i));
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
return ret;
}
+EXPORT_SYMBOL(ore_remove);
-static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
+static int _write_mirror(struct ore_io_state *ios, int cur_comp)
{
- struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
unsigned dev = ios->per_dev[cur_comp].dev;
unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
int ret = 0;
@@ -544,12 +533,12 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
return 0; /* Just an empty slot */
for (; cur_comp < last_comp; ++cur_comp, ++dev) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
@@ -563,7 +552,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
bio = bio_kmalloc(GFP_KERNEL,
master_dev->bio->bi_max_vecs);
if (unlikely(!bio)) {
- EXOFS_DBGMSG(
+ ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
master_dev->bio->bi_max_vecs);
ret = -ENOMEM;
@@ -582,25 +571,29 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, &ios->obj, per_dev->offset, bio,
- per_dev->length);
- EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
+ osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+ bio, per_dev->length);
+ ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(_ios_obj(ios, dev)->id),
+ _LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
- ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
- ios->kern_buff, ios->length);
+ ret = osd_req_write_kern(or, _ios_obj(ios, dev),
+ per_dev->offset,
+ ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
- EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(_ios_obj(ios, dev)->id),
+ _LLU(per_dev->offset),
_LLU(ios->length), dev);
} else {
- osd_req_set_attributes(or, &ios->obj);
- EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(ios->obj.id), ios->out_attr_len, dev);
+ osd_req_set_attributes(or, _ios_obj(ios, dev));
+ ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+ _LLU(_ios_obj(ios, dev)->id),
+ ios->out_attr_len, dev);
}
if (ios->out_attr)
@@ -616,7 +609,7 @@ out:
return ret;
}
-int exofs_sbi_write(struct exofs_io_state *ios)
+int ore_write(struct ore_io_state *ios)
{
int i;
int ret;
@@ -626,52 +619,55 @@ int exofs_sbi_write(struct exofs_io_state *ios)
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _sbi_write_mirror(ios, i);
+ ret = _write_mirror(ios, i);
if (unlikely(ret))
return ret;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
return ret;
}
+EXPORT_SYMBOL(ore_write);
-static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
+static int _read_mirror(struct ore_io_state *ios, unsigned cur_comp)
{
struct osd_request *or;
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
- unsigned first_dev = (unsigned)ios->obj.id;
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
+ unsigned first_dev = (unsigned)obj->id;
if (ios->pages && !per_dev->length)
return 0; /* Just an empty slot */
first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
- or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
if (ios->pages) {
- osd_req_read(or, &ios->obj, per_dev->offset,
+ osd_req_read(or, obj, per_dev->offset,
per_dev->bio, per_dev->length);
- EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
- " dev=%d\n", _LLU(ios->obj.id),
+ ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
+ " dev=%d\n", _LLU(obj->id),
_LLU(per_dev->offset), _LLU(per_dev->length),
first_dev);
} else if (ios->kern_buff) {
- int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
+ int ret = osd_req_read_kern(or, obj, per_dev->offset,
ios->kern_buff, ios->length);
- EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
+ ORE_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d ret=>%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(obj->id), _LLU(per_dev->offset),
_LLU(ios->length), first_dev, ret);
if (unlikely(ret))
return ret;
} else {
- osd_req_get_attributes(or, &ios->obj);
- EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
- _LLU(ios->obj.id), ios->in_attr_len, first_dev);
+ osd_req_get_attributes(or, obj);
+ ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
+ _LLU(obj->id),
+ ios->in_attr_len, first_dev);
}
if (ios->out_attr)
osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
@@ -682,7 +678,7 @@ static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
return 0;
}
-int exofs_sbi_read(struct exofs_io_state *ios)
+int ore_read(struct ore_io_state *ios)
{
int i;
int ret;
@@ -692,16 +688,17 @@ int exofs_sbi_read(struct exofs_io_state *ios)
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _sbi_read_mirror(ios, i);
+ ret = _read_mirror(ios, i);
if (unlikely(ret))
return ret;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
return ret;
}
+EXPORT_SYMBOL(ore_read);
-int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
+int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
{
struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
void *iter = NULL;
@@ -721,83 +718,118 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
return -EIO;
}
+EXPORT_SYMBOL(extract_attr_from_ios);
-static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
+static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
struct osd_attr *attr)
{
int last_comp = cur_comp + ios->layout->mirrors_p1;
for (; cur_comp < last_comp; ++cur_comp) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
- osd_req_set_attributes(or, &ios->obj);
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
osd_req_add_set_attr_list(or, attr, 1);
}
return 0;
}
-int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
+struct _trunc_info {
+ struct _striping_info si;
+ u64 prev_group_obj_off;
+ u64 next_group_obj_off;
+
+ unsigned first_group_dev;
+ unsigned nex_group_dev;
+ unsigned max_devs;
+};
+
+void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
+ struct _trunc_info *ti)
+{
+ unsigned stripe_unit = layout->stripe_unit;
+
+ _calc_stripe_info(layout, file_offset, &ti->si);
+
+ ti->prev_group_obj_off = ti->si.M * stripe_unit;
+ ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
+
+ ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
+ ti->nex_group_dev = ti->first_group_dev + layout->group_width;
+ ti->max_devs = layout->group_width * layout->group_count;
+}
+
+int ore_truncate(struct ore_layout *layout, struct ore_components *comps,
+ u64 size)
{
- struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct exofs_trunc_attr {
struct osd_attr attr;
__be64 newsize;
} *size_attrs;
- struct _striping_info si;
+ struct _trunc_info ti;
int i, ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(layout, comps, &ios);
if (unlikely(ret))
return ret;
- size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
+ _calc_trunk_info(ios->layout, size, &ti);
+
+ size_attrs = kcalloc(ti.max_devs, sizeof(*size_attrs),
GFP_KERNEL);
if (unlikely(!size_attrs)) {
ret = -ENOMEM;
goto out;
}
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
+ ios->numdevs = ios->comps->numdevs;
- ios->numdevs = ios->layout->s_numdevs;
- _calc_stripe_info(ios, size, &si);
-
- for (i = 0; i < ios->layout->group_width; ++i) {
+ for (i = 0; i < ti.max_devs; ++i) {
struct exofs_trunc_attr *size_attr = &size_attrs[i];
u64 obj_size;
- if (i < si.dev)
- obj_size = si.obj_offset +
- ios->layout->stripe_unit - si.unit_off;
- else if (i == si.dev)
- obj_size = si.obj_offset;
- else /* i > si.dev */
- obj_size = si.obj_offset - si.unit_off;
+ if (i < ti.first_group_dev)
+ obj_size = ti.prev_group_obj_off;
+ else if (i >= ti.nex_group_dev)
+ obj_size = ti.next_group_obj_off;
+ else if (i < ti.si.dev) /* dev within this group */
+ obj_size = ti.si.obj_offset +
+ ios->layout->stripe_unit - ti.si.unit_off;
+ else if (i == ti.si.dev)
+ obj_size = ti.si.obj_offset;
+ else /* i > ti.dev */
+ obj_size = ti.si.obj_offset - ti.si.unit_off;
size_attr->newsize = cpu_to_be64(obj_size);
size_attr->attr = g_attr_logical_length;
size_attr->attr.val_ptr = &size_attr->newsize;
+ ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ _LLU(comps->comps->obj.id), _LLU(obj_size), i);
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
&size_attr->attr);
if (unlikely(ret))
goto out;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
kfree(size_attrs);
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
+EXPORT_SYMBOL(ore_truncate);
+
+const struct osd_attr g_attr_logical_length = ATTR_DEF(
+ OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
+EXPORT_SYMBOL(g_attr_logical_length);
diff --git a/fs/exofs/pnfs.h b/fs/exofs/pnfs.h
deleted file mode 100644
index c52e9888b8ab..000000000000
--- a/fs/exofs/pnfs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <bharrosh@panasas.com>
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 as published by the Free
- * Software Foundation.
- *
- */
-
-/* FIXME: Remove this file once pnfs hits mainline */
-
-#ifndef __EXOFS_PNFS_H__
-#define __EXOFS_PNFS_H__
-
-#if ! defined(__PNFS_OSD_XDR_H__)
-
-enum pnfs_iomode {
- IOMODE_READ = 1,
- IOMODE_RW = 2,
- IOMODE_ANY = 3,
-};
-
-/* Layout Structure */
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-#endif /* ! defined(__PNFS_OSD_XDR_H__) */
-
-#endif /* __EXOFS_PNFS_H__ */
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index c57beddcc217..274894053b02 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -40,6 +40,8 @@
#include "exofs.h"
+#define EXOFS_DBGMSG2(M...) do {} while (0)
+
/******************************************************************************
* MOUNT OPTIONS
*****************************************************************************/
@@ -208,10 +210,48 @@ static void destroy_inodecache(void)
}
/******************************************************************************
- * SUPERBLOCK FUNCTIONS
+ * Some osd helpers
*****************************************************************************/
-static const struct super_operations exofs_sops;
-static const struct export_operations exofs_export_ops;
+void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
+{
+ osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
+}
+
+static int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
+ u64 offset, void *p, unsigned length)
+{
+ struct osd_request *or = osd_start_request(od, GFP_KERNEL);
+/* struct osd_sense_info osi = {.key = 0};*/
+ int ret;
+
+ if (unlikely(!or)) {
+ EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
+ return -ENOMEM;
+ }
+ ret = osd_req_read_kern(or, obj, offset, p, length);
+ if (unlikely(ret)) {
+ EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
+ goto out;
+ }
+
+ ret = osd_finalize_request(or, 0, cred, NULL);
+ if (unlikely(ret)) {
+ EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
+ goto out;
+ }
+
+ ret = osd_execute_request(or);
+ if (unlikely(ret))
+ EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
+ /* osd_req_decode_sense(or, ret); */
+
+out:
+ osd_end_request(or);
+ EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%p ret=>%d\n",
+ _LLU(obj->id), _LLU(offset), _LLU(length), od, ret);
+ return ret;
+}
static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
EXOFS_APAGE_SB_DATA,
@@ -223,21 +263,19 @@ static int __sbi_read_stats(struct exofs_sb_info *sbi)
struct osd_attr attrs[] = {
[0] = g_attr_sb_stats,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
- ios->cred = sbi->s_cred;
-
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret)) {
EXOFS_ERR("Error reading super_block stats => %d\n", ret);
goto out;
@@ -264,13 +302,13 @@ static int __sbi_read_stats(struct exofs_sb_info *sbi)
}
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
-static void stats_done(struct exofs_io_state *ios, void *p)
+static void stats_done(struct ore_io_state *ios, void *p)
{
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
/* Good thanks nothing to do anymore */
}
@@ -280,12 +318,12 @@ int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
struct osd_attr attrs[] = {
[0] = g_attr_sb_stats,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
@@ -293,21 +331,27 @@ int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
attrs[0].val_ptr = &sbi->s_ess;
- ios->cred = sbi->s_cred;
+
ios->done = stats_done;
ios->private = sbi;
ios->out_attr = attrs;
ios->out_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_write(ios);
+ ret = ore_write(ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
- exofs_put_io_state(ios);
+ EXOFS_ERR("%s: ore_write failed.\n", __func__);
+ ore_put_io_state(ios);
}
return ret;
}
+/******************************************************************************
+ * SUPERBLOCK FUNCTIONS
+ *****************************************************************************/
+static const struct super_operations exofs_sops;
+static const struct export_operations exofs_export_ops;
+
/*
* Write the superblock to the OSD
*/
@@ -315,7 +359,9 @@ int exofs_sync_fs(struct super_block *sb, int wait)
{
struct exofs_sb_info *sbi;
struct exofs_fscb *fscb;
- struct exofs_io_state *ios;
+ struct ore_comp one_comp;
+ struct ore_components comps;
+ struct ore_io_state *ios;
int ret = -ENOMEM;
fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
@@ -331,7 +377,10 @@ int exofs_sync_fs(struct super_block *sb, int wait)
* version). Otherwise the exofs_fscb is read-only from mkfs time. All
* the writeable info is set in exofs_sbi_write_stats() above.
*/
- ret = exofs_get_io_state(&sbi->layout, &ios);
+
+ exofs_init_comps(&comps, &one_comp, sbi, EXOFS_SUPER_ID);
+
+ ret = ore_get_io_state(&sbi->layout, &comps, &ios);
if (unlikely(ret))
goto out;
@@ -345,14 +394,12 @@ int exofs_sync_fs(struct super_block *sb, int wait)
fscb->s_newfs = 0;
fscb->s_version = EXOFS_FSCB_VER;
- ios->obj.id = EXOFS_SUPER_ID;
ios->offset = 0;
ios->kern_buff = fscb;
- ios->cred = sbi->s_cred;
- ret = exofs_sbi_write(ios);
+ ret = ore_write(ios);
if (unlikely(ret))
- EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
+ EXOFS_ERR("%s: ore_write failed.\n", __func__);
else
sb->s_dirt = 0;
@@ -360,7 +407,7 @@ int exofs_sync_fs(struct super_block *sb, int wait)
unlock_super(sb);
out:
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
kfree(fscb);
return ret;
}
@@ -384,15 +431,17 @@ static void _exofs_print_device(const char *msg, const char *dev_path,
void exofs_free_sbi(struct exofs_sb_info *sbi)
{
- while (sbi->layout.s_numdevs) {
- int i = --sbi->layout.s_numdevs;
- struct osd_dev *od = sbi->layout.s_ods[i];
+ while (sbi->comps.numdevs) {
+ int i = --sbi->comps.numdevs;
+ struct osd_dev *od = sbi->comps.ods[i];
if (od) {
- sbi->layout.s_ods[i] = NULL;
+ sbi->comps.ods[i] = NULL;
osduld_put_device(od);
}
}
+ if (sbi->comps.ods != sbi->_min_one_dev)
+ kfree(sbi->comps.ods);
kfree(sbi);
}
@@ -419,8 +468,8 @@ static void exofs_put_super(struct super_block *sb)
msecs_to_jiffies(100));
}
- _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
- sbi->layout.s_pid);
+ _exofs_print_device("Unmounting", NULL, sbi->comps.ods[0],
+ sbi->one_comp.obj.partition);
bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
@@ -501,10 +550,19 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
return -EINVAL;
}
+ EXOFS_DBGMSG("exofs: layout: "
+ "num_comps=%u stripe_unit=0x%x group_width=%u "
+ "group_depth=0x%llx mirrors_p1=%u raid_algorithm=%u\n",
+ numdevs,
+ sbi->layout.stripe_unit,
+ sbi->layout.group_width,
+ _LLU(sbi->layout.group_depth),
+ sbi->layout.mirrors_p1,
+ sbi->data_map.odm_raid_algorithm);
return 0;
}
-static unsigned __ra_pages(struct exofs_layout *layout)
+static unsigned __ra_pages(struct ore_layout *layout)
{
const unsigned _MIN_RA = 32; /* min 128K read-ahead */
unsigned ra_pages = layout->group_width * layout->stripe_unit /
@@ -547,13 +605,11 @@ static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
return !(odi->systemid_len || odi->osdname_len);
}
-static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
+static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
+ struct osd_dev *fscb_od,
unsigned table_count)
{
- struct exofs_sb_info *sbi = *psbi;
- struct osd_dev *fscb_od;
- struct osd_obj_id obj = {.partition = sbi->layout.s_pid,
- .id = EXOFS_DEVTABLE_ID};
+ struct ore_comp comp;
struct exofs_device_table *dt;
unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
sizeof(*dt);
@@ -567,10 +623,14 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
return -ENOMEM;
}
- fscb_od = sbi->layout.s_ods[0];
- sbi->layout.s_ods[0] = NULL;
- sbi->layout.s_numdevs = 0;
- ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes);
+ sbi->comps.numdevs = 0;
+
+ comp.obj.partition = sbi->one_comp.obj.partition;
+ comp.obj.id = EXOFS_DEVTABLE_ID;
+ exofs_make_credential(comp.cred, &comp.obj);
+
+ ret = exofs_read_kern(fscb_od, comp.cred, &comp.obj, 0, dt,
+ table_bytes);
if (unlikely(ret)) {
EXOFS_ERR("ERROR: reading device table\n");
goto out;
@@ -588,16 +648,18 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
if (likely(numdevs > 1)) {
- unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]);
+ unsigned size = numdevs * sizeof(sbi->comps.ods[0]);
- sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL);
- if (unlikely(!sbi)) {
+ /* Twice bigger table: See exofs_init_comps() and below
+ * comment
+ */
+ sbi->comps.ods = kzalloc(size + size - 1, GFP_KERNEL);
+ if (unlikely(!sbi->comps.ods)) {
+ EXOFS_ERR("ERROR: faild allocating Device array[%d]\n",
+ numdevs);
ret = -ENOMEM;
goto out;
}
- memset(&sbi->layout.s_ods[1], 0,
- size - sizeof(sbi->layout.s_ods[0]));
- *psbi = sbi;
}
for (i = 0; i < numdevs; i++) {
@@ -619,8 +681,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
* line. We always keep them in device-table order.
*/
if (fscb_od && osduld_device_same(fscb_od, &odi)) {
- sbi->layout.s_ods[i] = fscb_od;
- ++sbi->layout.s_numdevs;
+ sbi->comps.ods[i] = fscb_od;
+ ++sbi->comps.numdevs;
fscb_od = NULL;
continue;
}
@@ -633,13 +695,13 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
}
- sbi->layout.s_ods[i] = od;
- ++sbi->layout.s_numdevs;
+ sbi->comps.ods[i] = od;
+ ++sbi->comps.numdevs;
/* Read the fscb of the other devices to make sure the FS
* partition is there.
*/
- ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb,
+ ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb,
sizeof(fscb));
if (unlikely(ret)) {
EXOFS_ERR("ERROR: Malformed participating device "
@@ -656,13 +718,22 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
out:
kfree(dt);
- if (unlikely(!ret && fscb_od)) {
- EXOFS_ERR(
- "ERROR: Bad device-table container device not present\n");
- osduld_put_device(fscb_od);
- ret = -EINVAL;
- }
+ if (likely(!ret)) {
+ unsigned numdevs = sbi->comps.numdevs;
+ if (unlikely(fscb_od)) {
+ EXOFS_ERR("ERROR: Bad device-table container device not present\n");
+ osduld_put_device(fscb_od);
+ return -EINVAL;
+ }
+ /* exofs round-robins the device table view according to inode
+ * number. We hold a: twice bigger table hence inodes can point
+ * to any device and have a sequential view of the table
+ * starting at this device. See exofs_init_comps()
+ */
+ for (i = 0; i < numdevs - 1; ++i)
+ sbi->comps.ods[i + numdevs] = sbi->comps.ods[i];
+ }
return ret;
}
@@ -676,7 +747,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
struct exofs_sb_info *sbi; /*extended info */
struct osd_dev *od; /* Master device */
struct exofs_fscb fscb; /*on-disk superblock info */
- struct osd_obj_id obj;
+ struct ore_comp comp;
unsigned table_count;
int ret;
@@ -684,10 +755,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
return -ENOMEM;
- ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
- if (ret)
- goto free_bdi;
-
/* use mount options to fill superblock */
if (opts->is_osdname) {
struct osd_dev_info odi = {.systemid_len = 0};
@@ -695,6 +762,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
odi.osdname_len = strlen(opts->dev_name);
odi.osdname = (u8 *)opts->dev_name;
od = osduld_info_lookup(&odi);
+ kfree(opts->dev_name);
+ opts->dev_name = NULL;
} else {
od = osduld_path_lookup(opts->dev_name);
}
@@ -709,11 +778,16 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sbi->layout.group_width = 1;
sbi->layout.group_depth = -1;
sbi->layout.group_count = 1;
- sbi->layout.s_ods[0] = od;
- sbi->layout.s_numdevs = 1;
- sbi->layout.s_pid = opts->pid;
sbi->s_timeout = opts->timeout;
+ sbi->one_comp.obj.partition = opts->pid;
+ sbi->one_comp.obj.id = 0;
+ exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
+ sbi->comps.numdevs = 1;
+ sbi->comps.single_comp = EC_SINGLE_COMP;
+ sbi->comps.comps = &sbi->one_comp;
+ sbi->comps.ods = sbi->_min_one_dev;
+
/* fill in some other data by hand */
memset(sb->s_id, 0, sizeof(sb->s_id));
strcpy(sb->s_id, "exofs");
@@ -724,11 +798,11 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_bdev = NULL;
sb->s_dev = 0;
- obj.partition = sbi->layout.s_pid;
- obj.id = EXOFS_SUPER_ID;
- exofs_make_credential(sbi->s_cred, &obj);
+ comp.obj.partition = sbi->one_comp.obj.partition;
+ comp.obj.id = EXOFS_SUPER_ID;
+ exofs_make_credential(comp.cred, &comp.obj);
- ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb, sizeof(fscb));
+ ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb, sizeof(fscb));
if (unlikely(ret))
goto free_sbi;
@@ -757,9 +831,11 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
table_count = le64_to_cpu(fscb.s_dev_table_count);
if (table_count) {
- ret = exofs_read_lookup_dev_table(&sbi, table_count);
+ ret = exofs_read_lookup_dev_table(sbi, od, table_count);
if (unlikely(ret))
goto free_sbi;
+ } else {
+ sbi->comps.ods[0] = od;
}
__sbi_read_stats(sbi);
@@ -793,20 +869,20 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- _exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
- sbi->layout.s_pid);
- if (opts->is_osdname)
- kfree(opts->dev_name);
+ ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
+ if (ret) {
+ EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
+ goto free_sbi;
+ }
+
+ _exofs_print_device("Mounting", opts->dev_name, sbi->comps.ods[0],
+ sbi->one_comp.obj.partition);
return 0;
free_sbi:
- bdi_destroy(&sbi->bdi);
-free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
- opts->dev_name, sbi->layout.s_pid, ret);
+ opts->dev_name, sbi->one_comp.obj.partition, ret);
exofs_free_sbi(sbi);
- if (opts->is_osdname)
- kfree(opts->dev_name);
return ret;
}
@@ -837,7 +913,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct osd_attr attrs[] = {
ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
@@ -846,21 +922,18 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
};
uint64_t capacity = ULLONG_MAX;
uint64_t used = ULLONG_MAX;
- uint8_t cred_a[OSD_CAP_LEN];
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (ret) {
- EXOFS_DBGMSG("exofs_get_io_state failed.\n");
+ EXOFS_DBGMSG("ore_get_io_state failed.\n");
return ret;
}
- exofs_make_credential(cred_a, &ios->obj);
- ios->cred = sbi->s_cred;
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret))
goto out;
@@ -889,7 +962,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = EXOFS_NAME_LEN;
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 52c053763942..35d6a3cfd9ff 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -194,12 +194,10 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
case ACL_TYPE_ACCESS:
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (error == 0)
@@ -253,16 +251,14 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
if (S_ISDIR(inode->i_mode)) {
error = ext2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext2_set_acl(inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 6c29bf0df04a..3091f62e55b6 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -199,12 +199,10 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
if (error == 0)
@@ -261,19 +259,16 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
error = ext3_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_NOFS, &mode);
+ error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 04109460ba9e..56fd8f865930 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o
+ mmp.o indirect.o
ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index dca2d1ded931..a5c29bb3b835 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -198,12 +198,10 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (error == 0)
@@ -259,19 +257,16 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
error = ext4_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_NOFS, &mode);
+ error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 264f6949511e..f8224adf496e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -620,3 +620,51 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
}
+/**
+ * ext4_inode_to_goal_block - return a hint for block allocation
+ * @inode: inode for block allocation
+ *
+ * Return the ideal location to start allocating blocks for a
+ * newly created inode.
+ */
+ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_group_t block_group;
+ ext4_grpblk_t colour;
+ int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
+ ext4_fsblk_t bg_start;
+ ext4_fsblk_t last_block;
+
+ block_group = ei->i_block_group;
+ if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
+ /*
+ * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
+ * block groups per flexgroup, reserve the first block
+ * group for directories and special files. Regular
+ * files will start at the second block group. This
+ * tends to speed up directory access and improves
+ * fsck times.
+ */
+ block_group &= ~(flex_size-1);
+ if (S_ISREG(inode->i_mode))
+ block_group++;
+ }
+ bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
+ last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
+
+ /*
+ * If we are doing delayed allocation, we don't need take
+ * colour into account.
+ */
+ if (test_opt(inode->i_sb, DELALLOC))
+ return bg_start;
+
+ if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
+ colour = (current->pid % 16) *
+ (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+ else
+ colour = (current->pid % 16) * ((last_block - bg_start) / 16);
+ return bg_start + colour;
+}
+
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index fac90f3fba80..8efb2f0a3447 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -246,3 +246,24 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
return 1;
}
+int ext4_check_blockref(const char *function, unsigned int line,
+ struct inode *inode, __le32 *p, unsigned int max)
+{
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ __le32 *bref = p;
+ unsigned int blk;
+
+ while (bref < p+max) {
+ blk = le32_to_cpu(*bref++);
+ if (blk &&
+ unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+ blk, 1))) {
+ es->s_last_error_block = cpu_to_le64(blk);
+ ext4_error_inode(inode, function, line, blk,
+ "invalid block");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index fa44df879711..e717dfd2f2b4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -526,6 +526,7 @@ struct ext4_new_group_data {
#define EXT4_FREE_BLOCKS_METADATA 0x0001
#define EXT4_FREE_BLOCKS_FORGET 0x0002
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
+#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
/*
* ioctl commands
@@ -939,6 +940,8 @@ struct ext4_inode_info {
#define ext4_find_next_zero_bit find_next_zero_bit_le
#define ext4_find_next_bit find_next_bit_le
+extern void ext4_set_bits(void *bm, int cur, int len);
+
/*
* Maximal mount counts between two filesystem checks
*/
@@ -1126,7 +1129,8 @@ struct ext4_sb_info {
struct journal_s *s_journal;
struct list_head s_orphan;
struct mutex s_orphan_lock;
- struct mutex s_resize_lock;
+ unsigned long s_resize_flags; /* Flags indicating if there
+ is a resizer */
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
@@ -1214,6 +1218,9 @@ struct ext4_sb_info {
/* Kernel thread for multiple mount protection */
struct task_struct *s_mmp_tsk;
+
+ /* record the last minlen when FITRIM is called. */
+ atomic_t s_last_trim_minblks;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1743,6 +1750,7 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
struct ext4_group_desc *desc);
#define ext4_free_blocks_after_init(sb, group, desc) \
ext4_init_block_bitmap(sb, NULL, group, desc)
+ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
@@ -1793,7 +1801,7 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
unsigned long count, int flags);
extern int ext4_mb_add_groupinfo(struct super_block *sb,
ext4_group_t i, struct ext4_group_desc *desc);
-extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
@@ -1834,6 +1842,17 @@ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
+
+/* indirect.c */
+extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags);
+extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset,
+ unsigned long nr_segs);
+extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
+extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
+extern void ext4_ind_truncate(struct inode *inode);
+
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1855,6 +1874,9 @@ extern int ext4_group_extend(struct super_block *sb,
ext4_fsblk_t n_blocks_count);
/* super.c */
+extern void *ext4_kvmalloc(size_t size, gfp_t flags);
+extern void *ext4_kvzalloc(size_t size, gfp_t flags);
+extern void ext4_kvfree(void *ptr);
extern void __ext4_error(struct super_block *, const char *, unsigned int,
const char *, ...)
__attribute__ ((format (printf, 4, 5)));
@@ -2067,11 +2089,19 @@ struct ext4_group_info {
* 5 free 8-block regions. */
};
-#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
+#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
+#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
+ (test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_SET_TRIMMED(grp) \
+ (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
+ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+
#define EXT4_MAX_CONTENTION 8
#define EXT4_CONTENTION_THRESHOLD 2
@@ -2123,6 +2153,19 @@ static inline void ext4_mark_super_dirty(struct super_block *sb)
}
/*
+ * Block validity checking
+ */
+#define ext4_check_indirect_blockref(inode, bh) \
+ ext4_check_blockref(__func__, __LINE__, inode, \
+ (__le32 *)(bh)->b_data, \
+ EXT4_ADDR_PER_BLOCK((inode)->i_sb))
+
+#define ext4_ind_check_inode(inode) \
+ ext4_check_blockref(__func__, __LINE__, inode, \
+ EXT4_I(inode)->i_data, \
+ EXT4_NDIR_BLOCKS)
+
+/*
* Inodes and files operations
*/
@@ -2151,6 +2194,8 @@ extern void ext4_exit_system_zone(void);
extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
ext4_fsblk_t start_blk,
unsigned int count);
+extern int ext4_check_blockref(const char *, unsigned int,
+ struct inode *, __le32 *, unsigned int);
/* extents.c */
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
@@ -2230,6 +2275,10 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+#define EXT4_RESIZING 0
+extern int ext4_resize_begin(struct super_block *sb);
+extern void ext4_resize_end(struct super_block *sb);
+
#endif /* __KERNEL__ */
#endif /* _EXT4_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index f815cc81e7a2..57cf568a98ab 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -114,12 +114,6 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
- ext4_fsblk_t bg_start;
- ext4_fsblk_t last_block;
- ext4_grpblk_t colour;
- ext4_group_t block_group;
- int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
int depth;
if (path) {
@@ -161,36 +155,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
}
/* OK. use inode's group */
- block_group = ei->i_block_group;
- if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
- /*
- * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
- * block groups per flexgroup, reserve the first block
- * group for directories and special files. Regular
- * files will start at the second block group. This
- * tends to speed up directory access and improves
- * fsck times.
- */
- block_group &= ~(flex_size-1);
- if (S_ISREG(inode->i_mode))
- block_group++;
- }
- bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
- last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
-
- /*
- * If we are doing delayed allocation, we don't need take
- * colour into account.
- */
- if (test_opt(inode->i_sb, DELALLOC))
- return bg_start;
-
- if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
- (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
- return bg_start + colour + block;
+ return ext4_inode_to_goal_block(inode);
}
/*
@@ -776,6 +741,16 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
logical, le32_to_cpu(curp->p_idx->ei_block));
return -EIO;
}
+
+ if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
+ >= le16_to_cpu(curp->p_hdr->eh_max))) {
+ EXT4_ERROR_INODE(inode,
+ "eh_entries %d >= eh_max %d!",
+ le16_to_cpu(curp->p_hdr->eh_entries),
+ le16_to_cpu(curp->p_hdr->eh_max));
+ return -EIO;
+ }
+
len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
@@ -805,13 +780,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
- if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
- > le16_to_cpu(curp->p_hdr->eh_max))) {
- EXT4_ERROR_INODE(inode,
- "logical %d == ei_block %d!",
- logical, le32_to_cpu(curp->p_idx->ei_block));
- return -EIO;
- }
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
return -EIO;
@@ -1446,8 +1414,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
* ext4_ext_next_leaf_block:
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
*/
-static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
- struct ext4_ext_path *path)
+static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
{
int depth;
@@ -1757,7 +1724,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
goto merge;
}
-repeat:
depth = ext_depth(inode);
eh = path[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
@@ -1765,9 +1731,10 @@ repeat:
/* probably next leaf has space for us? */
fex = EXT_LAST_EXTENT(eh);
- next = ext4_ext_next_leaf_block(inode, path);
- if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
- && next != EXT_MAX_BLOCKS) {
+ next = EXT_MAX_BLOCKS;
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
+ next = ext4_ext_next_leaf_block(path);
+ if (next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %d\n", next);
BUG_ON(npath != NULL);
npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1779,7 +1746,7 @@ repeat:
ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
- goto repeat;
+ goto has_space;
}
ext_debug("next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
@@ -1839,7 +1806,7 @@ has_space:
ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
- nearex, len, nearex + 1, nearex + 2);
+ nearex, len, nearex, nearex + 1);
memmove(nearex + 1, nearex, len);
path[depth].p_ext = nearex;
}
@@ -2052,7 +2019,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
}
/*
- * ext4_ext_in_cache()
+ * ext4_ext_check_cache()
* Checks to see if the given block is in the cache.
* If it is, the cached extent is stored in the given
* cache extent pointer. If the cached extent is a hole,
@@ -2134,8 +2101,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
/*
* ext4_ext_rm_idx:
* removes index from the index block.
- * It's used in truncate case only, thus all requests are for
- * last index in the block only.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
@@ -2153,6 +2118,13 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode, path);
if (err)
return err;
+
+ if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
+ int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
+ len *= sizeof(struct ext4_extent_idx);
+ memmove(path->p_idx, path->p_idx + 1, len);
+ }
+
le16_add_cpu(&path->p_hdr->eh_entries, -1);
err = ext4_ext_dirty(handle, inode, path);
if (err)
@@ -2534,8 +2506,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
return 1;
}
-static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end)
+static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
{
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
@@ -2575,7 +2546,7 @@ again:
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
- start, end);
+ start, EXT_MAX_BLOCKS - 1);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
@@ -3107,12 +3078,10 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct ext4_ext_path *path)
{
struct ext4_extent *ex;
- struct ext4_extent_header *eh;
int depth;
int err = 0;
depth = ext_depth(inode);
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
@@ -3357,8 +3326,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* check in cache */
- if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
- ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
+ if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
+ ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
if (!newex.ee_start_lo && !newex.ee_start_hi) {
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
@@ -3497,8 +3466,27 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext4_ext_mark_uninitialized(ex);
- err = ext4_ext_remove_space(inode, map->m_lblk,
- map->m_lblk + punched_out);
+ ext4_ext_invalidate_cache(inode);
+
+ err = ext4_ext_rm_leaf(handle, inode, path,
+ map->m_lblk, map->m_lblk + punched_out);
+
+ if (!err && path->p_hdr->eh_entries == 0) {
+ /*
+ * Punch hole freed all of this sub tree,
+ * so we need to correct eh_depth
+ */
+ err = ext4_ext_get_access(handle, inode, path);
+ if (err == 0) {
+ ext_inode_hdr(inode)->eh_depth = 0;
+ ext_inode_hdr(inode)->eh_max =
+ cpu_to_le16(ext4_ext_space_root(
+ inode, 0));
+
+ err = ext4_ext_dirty(
+ handle, inode, path);
+ }
+ }
goto out2;
}
@@ -3596,17 +3584,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
}
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
- if (err)
- goto out2;
-
- err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (!err)
+ err = ext4_ext_insert_extent(handle, inode, path,
+ &newex, flags);
if (err) {
+ int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
+ EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
- ext4_ext_get_actual_len(&newex), 0);
+ ext4_ext_get_actual_len(&newex), fb_flags);
goto out2;
}
@@ -3699,7 +3688,7 @@ void ext4_ext_truncate(struct inode *inode)
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
- err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ err = ext4_ext_remove_space(inode, last_block);
/* In a multi-transaction truncate, we only make the final
* transaction synchronous.
@@ -3835,7 +3824,7 @@ retry:
blkbits) >> blkbits))
new_size = offset + len;
else
- new_size = (map.m_lblk + ret) << blkbits;
+ new_size = ((loff_t) map.m_lblk + ret) << blkbits;
ext4_falloc_update_inode(inode, mode, new_size,
(map.m_flags & EXT4_MAP_NEW));
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index da3bed3e0c29..036f78f7a1ef 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -129,15 +129,30 @@ static int ext4_sync_parent(struct inode *inode)
{
struct writeback_control wbc;
struct dentry *dentry = NULL;
+ struct inode *next;
int ret = 0;
- while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
+ return 0;
+ inode = igrab(inode);
+ while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = list_entry(inode->i_dentry.next,
- struct dentry, d_alias);
- if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
+ dentry = NULL;
+ spin_lock(&inode->i_lock);
+ if (!list_empty(&inode->i_dentry)) {
+ dentry = list_first_entry(&inode->i_dentry,
+ struct dentry, d_alias);
+ dget(dentry);
+ }
+ spin_unlock(&inode->i_lock);
+ if (!dentry)
break;
- inode = dentry->d_parent->d_inode;
+ next = igrab(dentry->d_parent->d_inode);
+ dput(dentry);
+ if (!next)
+ break;
+ iput(inode);
+ inode = next;
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
@@ -148,6 +163,7 @@ static int ext4_sync_parent(struct inode *inode)
if (ret)
break;
}
+ iput(inode);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 21bb2f61e502..9c63f273b550 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1287,7 +1287,7 @@ extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
group, used_blks,
ext4_itable_unused_count(sb, gdp));
ret = 1;
- goto out;
+ goto err_out;
}
blk = ext4_inode_table(sb, gdp) + used_blks;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
new file mode 100644
index 000000000000..b8602cde5b5a
--- /dev/null
+++ b/fs/ext4/indirect.c
@@ -0,0 +1,1482 @@
+/*
+ * linux/fs/ext4/indirect.c
+ *
+ * from
+ *
+ * linux/fs/ext4/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/fs/minix/inode.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Goal-directed block allocation by Stephen Tweedie
+ * (sct@redhat.com), 1993, 1998
+ */
+
+#include <linux/module.h>
+#include "ext4_jbd2.h"
+#include "truncate.h"
+
+#include <trace/events/ext4.h>
+
+typedef struct {
+ __le32 *p;
+ __le32 key;
+ struct buffer_head *bh;
+} Indirect;
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
+{
+ p->key = *(p->p = v);
+ p->bh = bh;
+}
+
+/**
+ * ext4_block_to_path - parse the block number into array of offsets
+ * @inode: inode in question (we are only interested in its superblock)
+ * @i_block: block number to be parsed
+ * @offsets: array to store the offsets in
+ * @boundary: set this non-zero if the referred-to block is likely to be
+ * followed (on disk) by an indirect block.
+ *
+ * To store the locations of file's data ext4 uses a data structure common
+ * for UNIX filesystems - tree of pointers anchored in the inode, with
+ * data blocks at leaves and indirect blocks in intermediate nodes.
+ * This function translates the block number into path in that tree -
+ * return value is the path length and @offsets[n] is the offset of
+ * pointer to (n+1)th node in the nth one. If @block is out of range
+ * (negative or too large) warning is printed and zero returned.
+ *
+ * Note: function doesn't find node addresses, so no IO is needed. All
+ * we need to know is the capacity of indirect blocks (taken from the
+ * inode->i_sb).
+ */
+
+/*
+ * Portability note: the last comparison (check that we fit into triple
+ * indirect block) is spelled differently, because otherwise on an
+ * architecture with 32-bit longs and 8Kb pages we might get into trouble
+ * if our filesystem had 8Kb blocks. We might use long long, but that would
+ * kill us on x86. Oh, well, at least the sign propagation does not matter -
+ * i_block would have to be negative in the very beginning, so we would not
+ * get there at all.
+ */
+
+static int ext4_block_to_path(struct inode *inode,
+ ext4_lblk_t i_block,
+ ext4_lblk_t offsets[4], int *boundary)
+{
+ int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
+ const long direct_blocks = EXT4_NDIR_BLOCKS,
+ indirect_blocks = ptrs,
+ double_blocks = (1 << (ptrs_bits * 2));
+ int n = 0;
+ int final = 0;
+
+ if (i_block < direct_blocks) {
+ offsets[n++] = i_block;
+ final = direct_blocks;
+ } else if ((i_block -= direct_blocks) < indirect_blocks) {
+ offsets[n++] = EXT4_IND_BLOCK;
+ offsets[n++] = i_block;
+ final = ptrs;
+ } else if ((i_block -= indirect_blocks) < double_blocks) {
+ offsets[n++] = EXT4_DIND_BLOCK;
+ offsets[n++] = i_block >> ptrs_bits;
+ offsets[n++] = i_block & (ptrs - 1);
+ final = ptrs;
+ } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+ offsets[n++] = EXT4_TIND_BLOCK;
+ offsets[n++] = i_block >> (ptrs_bits * 2);
+ offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+ offsets[n++] = i_block & (ptrs - 1);
+ final = ptrs;
+ } else {
+ ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
+ i_block + direct_blocks +
+ indirect_blocks + double_blocks, inode->i_ino);
+ }
+ if (boundary)
+ *boundary = final - 1 - (i_block & (ptrs - 1));
+ return n;
+}
+
+/**
+ * ext4_get_branch - read the chain of indirect blocks leading to data
+ * @inode: inode in question
+ * @depth: depth of the chain (1 - direct pointer, etc.)
+ * @offsets: offsets of pointers in inode/indirect blocks
+ * @chain: place to store the result
+ * @err: here we store the error value
+ *
+ * Function fills the array of triples <key, p, bh> and returns %NULL
+ * if everything went OK or the pointer to the last filled triple
+ * (incomplete one) otherwise. Upon the return chain[i].key contains
+ * the number of (i+1)-th block in the chain (as it is stored in memory,
+ * i.e. little-endian 32-bit), chain[i].p contains the address of that
+ * number (it points into struct inode for i==0 and into the bh->b_data
+ * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
+ * block for i>0 and NULL for i==0. In other words, it holds the block
+ * numbers of the chain, addresses they were taken from (and where we can
+ * verify that chain did not change) and buffer_heads hosting these
+ * numbers.
+ *
+ * Function stops when it stumbles upon zero pointer (absent block)
+ * (pointer to last triple returned, *@err == 0)
+ * or when it gets an IO error reading an indirect block
+ * (ditto, *@err == -EIO)
+ * or when it reads all @depth-1 indirect blocks successfully and finds
+ * the whole chain, all way to the data (returns %NULL, *err == 0).
+ *
+ * Need to be called with
+ * down_read(&EXT4_I(inode)->i_data_sem)
+ */
+static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ ext4_lblk_t *offsets,
+ Indirect chain[4], int *err)
+{
+ struct super_block *sb = inode->i_sb;
+ Indirect *p = chain;
+ struct buffer_head *bh;
+
+ *err = 0;
+ /* i_data is not going away, no lock needed */
+ add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
+ if (!p->key)
+ goto no_block;
+ while (--depth) {
+ bh = sb_getblk(sb, le32_to_cpu(p->key));
+ if (unlikely(!bh))
+ goto failure;
+
+ if (!bh_uptodate_or_lock(bh)) {
+ if (bh_submit_read(bh) < 0) {
+ put_bh(bh);
+ goto failure;
+ }
+ /* validate block references */
+ if (ext4_check_indirect_blockref(inode, bh)) {
+ put_bh(bh);
+ goto failure;
+ }
+ }
+
+ add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
+ /* Reader: end */
+ if (!p->key)
+ goto no_block;
+ }
+ return NULL;
+
+failure:
+ *err = -EIO;
+no_block:
+ return p;
+}
+
+/**
+ * ext4_find_near - find a place for allocation with sufficient locality
+ * @inode: owner
+ * @ind: descriptor of indirect block.
+ *
+ * This function returns the preferred place for block allocation.
+ * It is used when heuristic for sequential allocation fails.
+ * Rules are:
+ * + if there is a block to the left of our position - allocate near it.
+ * + if pointer will live in indirect block - allocate near that block.
+ * + if pointer will live in inode - allocate in the same
+ * cylinder group.
+ *
+ * In the latter case we colour the starting block by the callers PID to
+ * prevent it from clashing with concurrent allocations for a different inode
+ * in the same block group. The PID is used here so that functionally related
+ * files will be close-by on-disk.
+ *
+ * Caller must make sure that @ind is valid and will stay that way.
+ */
+static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
+ __le32 *p;
+
+ /* Try to find previous block */
+ for (p = ind->p - 1; p >= start; p--) {
+ if (*p)
+ return le32_to_cpu(*p);
+ }
+
+ /* No such thing, so let's try location of indirect block */
+ if (ind->bh)
+ return ind->bh->b_blocknr;
+
+ /*
+ * It is going to be referred to from the inode itself? OK, just put it
+ * into the same cylinder group then.
+ */
+ return ext4_inode_to_goal_block(inode);
+}
+
+/**
+ * ext4_find_goal - find a preferred place for allocation.
+ * @inode: owner
+ * @block: block we want
+ * @partial: pointer to the last triple within a chain
+ *
+ * Normally this function find the preferred place for block allocation,
+ * returns it.
+ * Because this is only used for non-extent files, we limit the block nr
+ * to 32 bits.
+ */
+static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
+ Indirect *partial)
+{
+ ext4_fsblk_t goal;
+
+ /*
+ * XXX need to get goal block from mballoc's data structures
+ */
+
+ goal = ext4_find_near(inode, partial);
+ goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+ return goal;
+}
+
+/**
+ * ext4_blks_to_allocate - Look up the block map and count the number
+ * of direct blocks need to be allocated for the given branch.
+ *
+ * @branch: chain of indirect blocks
+ * @k: number of blocks need for indirect blocks
+ * @blks: number of data blocks to be mapped.
+ * @blocks_to_boundary: the offset in the indirect block
+ *
+ * return the total number of blocks to be allocate, including the
+ * direct and indirect blocks.
+ */
+static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
+ int blocks_to_boundary)
+{
+ unsigned int count = 0;
+
+ /*
+ * Simple case, [t,d]Indirect block(s) has not allocated yet
+ * then it's clear blocks on that path have not allocated
+ */
+ if (k > 0) {
+ /* right now we don't handle cross boundary allocation */
+ if (blks < blocks_to_boundary + 1)
+ count += blks;
+ else
+ count += blocks_to_boundary + 1;
+ return count;
+ }
+
+ count++;
+ while (count < blks && count <= blocks_to_boundary &&
+ le32_to_cpu(*(branch[0].p + count)) == 0) {
+ count++;
+ }
+ return count;
+}
+
+/**
+ * ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ * @handle: handle for this transaction
+ * @inode: inode which needs allocated blocks
+ * @iblock: the logical block to start allocated at
+ * @goal: preferred physical block of allocation
+ * @indirect_blks: the number of blocks need to allocate for indirect
+ * blocks
+ * @blks: number of desired blocks
+ * @new_blocks: on return it will store the new block numbers for
+ * the indirect blocks(if needed) and the first direct block,
+ * @err: on return it will store the error code
+ *
+ * This function will return the number of blocks allocated as
+ * requested by the passed-in parameters.
+ */
+static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
+ ext4_lblk_t iblock, ext4_fsblk_t goal,
+ int indirect_blks, int blks,
+ ext4_fsblk_t new_blocks[4], int *err)
+{
+ struct ext4_allocation_request ar;
+ int target, i;
+ unsigned long count = 0, blk_allocated = 0;
+ int index = 0;
+ ext4_fsblk_t current_block = 0;
+ int ret = 0;
+
+ /*
+ * Here we try to allocate the requested multiple blocks at once,
+ * on a best-effort basis.
+ * To build a branch, we should allocate blocks for
+ * the indirect blocks(if not allocated yet), and at least
+ * the first direct block of this branch. That's the
+ * minimum number of blocks need to allocate(required)
+ */
+ /* first we try to allocate the indirect blocks */
+ target = indirect_blks;
+ while (target > 0) {
+ count = target;
+ /* allocating blocks for indirect blocks and direct blocks */
+ current_block = ext4_new_meta_blocks(handle, inode, goal,
+ 0, &count, err);
+ if (*err)
+ goto failed_out;
+
+ if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + count %lu > %d!",
+ current_block, count,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
+
+ target -= count;
+ /* allocate blocks for indirect blocks */
+ while (index < indirect_blks && count) {
+ new_blocks[index++] = current_block++;
+ count--;
+ }
+ if (count > 0) {
+ /*
+ * save the new block number
+ * for the first direct block
+ */
+ new_blocks[index] = current_block;
+ printk(KERN_INFO "%s returned more blocks than "
+ "requested\n", __func__);
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ target = blks - count ;
+ blk_allocated = count;
+ if (!target)
+ goto allocated;
+ /* Now allocate data blocks */
+ memset(&ar, 0, sizeof(ar));
+ ar.inode = inode;
+ ar.goal = goal;
+ ar.len = target;
+ ar.logical = iblock;
+ if (S_ISREG(inode->i_mode))
+ /* enable in-core preallocation only for regular files */
+ ar.flags = EXT4_MB_HINT_DATA;
+
+ current_block = ext4_mb_new_blocks(handle, &ar, err);
+ if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + ar.len %d > %d!",
+ current_block, ar.len,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
+
+ if (*err && (target == blks)) {
+ /*
+ * if the allocation failed and we didn't allocate
+ * any blocks before
+ */
+ goto failed_out;
+ }
+ if (!*err) {
+ if (target == blks) {
+ /*
+ * save the new block number
+ * for the first direct block
+ */
+ new_blocks[index] = current_block;
+ }
+ blk_allocated += ar.len;
+ }
+allocated:
+ /* total number of blocks allocated for direct blocks */
+ ret = blk_allocated;
+ *err = 0;
+ return ret;
+failed_out:
+ for (i = 0; i < index; i++)
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
+ return ret;
+}
+
+/**
+ * ext4_alloc_branch - allocate and set up a chain of blocks.
+ * @handle: handle for this transaction
+ * @inode: owner
+ * @indirect_blks: number of allocated indirect blocks
+ * @blks: number of allocated direct blocks
+ * @goal: preferred place for allocation
+ * @offsets: offsets (in the blocks) to store the pointers to next.
+ * @branch: place to store the chain in.
+ *
+ * This function allocates blocks, zeroes out all but the last one,
+ * links them into chain and (if we are synchronous) writes them to disk.
+ * In other words, it prepares a branch that can be spliced onto the
+ * inode. It stores the information about that chain in the branch[], in
+ * the same format as ext4_get_branch() would do. We are calling it after
+ * we had read the existing part of chain and partial points to the last
+ * triple of that (one with zero ->key). Upon the exit we have the same
+ * picture as after the successful ext4_get_block(), except that in one
+ * place chain is disconnected - *branch->p is still zero (we did not
+ * set the last link), but branch->key contains the number that should
+ * be placed into *branch->p to fill that gap.
+ *
+ * If allocation fails we free all blocks we've allocated (and forget
+ * their buffer_heads) and return the error value the from failed
+ * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
+ * as described above and return 0.
+ */
+static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ ext4_lblk_t iblock, int indirect_blks,
+ int *blks, ext4_fsblk_t goal,
+ ext4_lblk_t *offsets, Indirect *branch)
+{
+ int blocksize = inode->i_sb->s_blocksize;
+ int i, n = 0;
+ int err = 0;
+ struct buffer_head *bh;
+ int num;
+ ext4_fsblk_t new_blocks[4];
+ ext4_fsblk_t current_block;
+
+ num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
+ *blks, new_blocks, &err);
+ if (err)
+ return err;
+
+ branch[0].key = cpu_to_le32(new_blocks[0]);
+ /*
+ * metadata blocks and data blocks are allocated.
+ */
+ for (n = 1; n <= indirect_blks; n++) {
+ /*
+ * Get buffer_head for parent block, zero it out
+ * and set the pointer to new one, then send
+ * parent to disk.
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+ err = -EIO;
+ goto failed;
+ }
+
+ branch[n].bh = bh;
+ lock_buffer(bh);
+ BUFFER_TRACE(bh, "call get_create_access");
+ err = ext4_journal_get_create_access(handle, bh);
+ if (err) {
+ /* Don't brelse(bh) here; it's done in
+ * ext4_journal_forget() below */
+ unlock_buffer(bh);
+ goto failed;
+ }
+
+ memset(bh->b_data, 0, blocksize);
+ branch[n].p = (__le32 *) bh->b_data + offsets[n];
+ branch[n].key = cpu_to_le32(new_blocks[n]);
+ *branch[n].p = branch[n].key;
+ if (n == indirect_blks) {
+ current_block = new_blocks[n];
+ /*
+ * End of chain, update the last new metablock of
+ * the chain to point to the new allocated
+ * data blocks numbers
+ */
+ for (i = 1; i < num; i++)
+ *(branch[n].p + i) = cpu_to_le32(++current_block);
+ }
+ BUFFER_TRACE(bh, "marking uptodate");
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (err)
+ goto failed;
+ }
+ *blks = num;
+ return err;
+failed:
+ /* Allocation failed, free what we already allocated */
+ ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
+ for (i = 1; i <= n ; i++) {
+ /*
+ * branch[i].bh is newly allocated, so there is no
+ * need to revoke the block, which is why we don't
+ * need to set EXT4_FREE_BLOCKS_METADATA.
+ */
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
+ EXT4_FREE_BLOCKS_FORGET);
+ }
+ for (i = n+1; i < indirect_blks; i++)
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
+
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
+
+ return err;
+}
+
+/**
+ * ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ * ext4_alloc_branch)
+ * @where: location of missing link
+ * @num: number of indirect blocks we are adding
+ * @blks: number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
+ */
+static int ext4_splice_branch(handle_t *handle, struct inode *inode,
+ ext4_lblk_t block, Indirect *where, int num,
+ int blks)
+{
+ int i;
+ int err = 0;
+ ext4_fsblk_t current_block;
+
+ /*
+ * If we're splicing into a [td]indirect block (as opposed to the
+ * inode) then we need to get write access to the [td]indirect block
+ * before the splice.
+ */
+ if (where->bh) {
+ BUFFER_TRACE(where->bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, where->bh);
+ if (err)
+ goto err_out;
+ }
+ /* That's it */
+
+ *where->p = where->key;
+
+ /*
+ * Update the host buffer_head or inode to point to more just allocated
+ * direct blocks blocks
+ */
+ if (num == 0 && blks > 1) {
+ current_block = le32_to_cpu(where->key) + 1;
+ for (i = 1; i < blks; i++)
+ *(where->p + i) = cpu_to_le32(current_block++);
+ }
+
+ /* We are done with atomic stuff, now do the rest of housekeeping */
+ /* had we spliced it onto indirect block? */
+ if (where->bh) {
+ /*
+ * If we spliced it onto an indirect block, we haven't
+ * altered the inode. Note however that if it is being spliced
+ * onto an indirect block at the very end of the file (the
+ * file is growing) then we *will* alter the inode to reflect
+ * the new i_size. But that is not done here - it is done in
+ * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
+ */
+ jbd_debug(5, "splicing indirect only\n");
+ BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, where->bh);
+ if (err)
+ goto err_out;
+ } else {
+ /*
+ * OK, we spliced it into the inode itself on a direct block.
+ */
+ ext4_mark_inode_dirty(handle, inode);
+ jbd_debug(5, "splicing direct\n");
+ }
+ return err;
+
+err_out:
+ for (i = 1; i <= num; i++) {
+ /*
+ * branch[i].bh is newly allocated, so there is no
+ * need to revoke the block, which is why we don't
+ * need to set EXT4_FREE_BLOCKS_METADATA.
+ */
+ ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
+ EXT4_FREE_BLOCKS_FORGET);
+ }
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
+ blks, 0);
+
+ return err;
+}
+
+/*
+ * The ext4_ind_map_blocks() function handles non-extents inodes
+ * (i.e., using the traditional indirect/double-indirect i_blocks
+ * scheme) for ext4_map_blocks().
+ *
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+ * to tree, set linkage between the newborn blocks, write them if sync is
+ * required, recheck the path, free and repeat if check fails, otherwise
+ * set the last missing link (that will protect us from any truncate-generated
+ * removals - all blocks on the path are immune now) and possibly force the
+ * write on the parent block.
+ * That has a nice additional property: no special recovery from the failed
+ * allocations is needed - we simply release blocks and do not touch anything
+ * reachable from inode.
+ *
+ * `handle' can be NULL if create == 0.
+ *
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
+ *
+ * The ext4_ind_get_blocks() function should be called with
+ * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+ * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+ * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+ * blocks.
+ */
+int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ int flags)
+{
+ int err = -EIO;
+ ext4_lblk_t offsets[4];
+ Indirect chain[4];
+ Indirect *partial;
+ ext4_fsblk_t goal;
+ int indirect_blks;
+ int blocks_to_boundary = 0;
+ int depth;
+ int count = 0;
+ ext4_fsblk_t first_block = 0;
+
+ trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
+ J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+ J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
+ depth = ext4_block_to_path(inode, map->m_lblk, offsets,
+ &blocks_to_boundary);
+
+ if (depth == 0)
+ goto out;
+
+ partial = ext4_get_branch(inode, depth, offsets, chain, &err);
+
+ /* Simplest case - block found, no allocation needed */
+ if (!partial) {
+ first_block = le32_to_cpu(chain[depth - 1].key);
+ count++;
+ /*map more blocks*/
+ while (count < map->m_len && count <= blocks_to_boundary) {
+ ext4_fsblk_t blk;
+
+ blk = le32_to_cpu(*(chain[depth-1].p + count));
+
+ if (blk == first_block + count)
+ count++;
+ else
+ break;
+ }
+ goto got_it;
+ }
+
+ /* Next simple case - plain lookup or failed read of indirect block */
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
+ goto cleanup;
+
+ /*
+ * Okay, we need to do block allocation.
+ */
+ goal = ext4_find_goal(inode, map->m_lblk, partial);
+
+ /* the number of blocks need to allocate for [d,t]indirect blocks */
+ indirect_blks = (chain + depth) - partial - 1;
+
+ /*
+ * Next look up the indirect map to count the totoal number of
+ * direct blocks to allocate for this branch.
+ */
+ count = ext4_blks_to_allocate(partial, indirect_blks,
+ map->m_len, blocks_to_boundary);
+ /*
+ * Block out ext4_truncate while we alter the tree
+ */
+ err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
+ &count, goal,
+ offsets + (partial - chain), partial);
+
+ /*
+ * The ext4_splice_branch call will free and forget any buffers
+ * on the new chain if there is a failure, but that risks using
+ * up transaction credits, especially for bitmaps where the
+ * credits cannot be returned. Can we handle this somehow? We
+ * may need to return -EAGAIN upwards in the worst case. --sct
+ */
+ if (!err)
+ err = ext4_splice_branch(handle, inode, map->m_lblk,
+ partial, indirect_blks, count);
+ if (err)
+ goto cleanup;
+
+ map->m_flags |= EXT4_MAP_NEW;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+got_it:
+ map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = le32_to_cpu(chain[depth-1].key);
+ map->m_len = count;
+ if (count > blocks_to_boundary)
+ map->m_flags |= EXT4_MAP_BOUNDARY;
+ err = count;
+ /* Clean up and exit */
+ partial = chain + depth - 1; /* the whole chain */
+cleanup:
+ while (partial > chain) {
+ BUFFER_TRACE(partial->bh, "call brelse");
+ brelse(partial->bh);
+ partial--;
+ }
+out:
+ trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
+ map->m_pblk, map->m_len, err);
+ return err;
+}
+
+/*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list. So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ * If the O_DIRECT write is intantiating holes inside i_size and the machine
+ * crashes then stale disk data _may_ be exposed inside the file. But current
+ * VFS code falls back into buffered path in that case so we are safe.
+ */
+ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset,
+ unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ handle_t *handle;
+ ssize_t ret;
+ int orphan = 0;
+ size_t count = iov_length(iov, nr_segs);
+ int retries = 0;
+
+ if (rw == WRITE) {
+ loff_t final_size = offset + count;
+
+ if (final_size > inode->i_size) {
+ /* Credits for sb + inode write */
+ handle = ext4_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
+ orphan = 1;
+ ei->i_disksize = inode->i_size;
+ ext4_journal_stop(handle);
+ }
+ }
+
+retry:
+ if (rw == READ && ext4_should_dioread_nolock(inode))
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ ext4_get_block, NULL, NULL, 0);
+ else {
+ ret = blockdev_direct_IO(rw, iocb, inode, iov,
+ offset, nr_segs, ext4_get_block);
+
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ ext4_truncate_failed_write(inode);
+ }
+ }
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ if (orphan) {
+ int err;
+
+ /* Credits for sb + inode write */
+ handle = ext4_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ /* This is really bad luck. We've written the data
+ * but cannot extend i_size. Bail out and pretend
+ * the write failed... */
+ ret = PTR_ERR(handle);
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+
+ goto out;
+ }
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ if (ret > 0) {
+ loff_t end = offset + ret;
+ if (end > inode->i_size) {
+ ei->i_disksize = end;
+ i_size_write(inode, end);
+ /*
+ * We're going to return a positive `ret'
+ * here due to non-zero-length I/O, so there's
+ * no way of reporting error returns from
+ * ext4_mark_inode_dirty() to userspace. So
+ * ignore it.
+ */
+ ext4_mark_inode_dirty(handle, inode);
+ }
+ }
+ err = ext4_journal_stop(handle);
+ if (ret == 0)
+ ret = err;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Calculate the number of metadata blocks need to reserve
+ * to allocate a new block at @lblocks for non extent file based file
+ */
+int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
+ int blk_bits;
+
+ if (lblock < EXT4_NDIR_BLOCKS)
+ return 0;
+
+ lblock -= EXT4_NDIR_BLOCKS;
+
+ if (ei->i_da_metadata_calc_len &&
+ (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
+ ei->i_da_metadata_calc_len++;
+ return 0;
+ }
+ ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
+ ei->i_da_metadata_calc_len = 1;
+ blk_bits = order_base_2(lblock);
+ return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
+}
+
+int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+{
+ int indirects;
+
+ /* if nrblocks are contiguous */
+ if (chunk) {
+ /*
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
+ */
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ }
+ /*
+ * if nrblocks are not contiguous, worse case, each block touch
+ * a indirect block, and each indirect block touch a double indirect
+ * block, plus a triple indirect block
+ */
+ indirects = nrblocks * 2 + 1;
+ return indirects;
+}
+
+/*
+ * Truncate transactions can be complex and absolutely huge. So we need to
+ * be able to restart the transaction at a conventient checkpoint to make
+ * sure we don't overflow the journal.
+ *
+ * start_transaction gets us a new handle for a truncate transaction,
+ * and extend_transaction tries to extend the existing one a bit. If
+ * extend fails, we need to propagate the failure up and restart the
+ * transaction in the top-level truncate loop. --sct
+ */
+static handle_t *start_transaction(struct inode *inode)
+{
+ handle_t *result;
+
+ result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
+ if (!IS_ERR(result))
+ return result;
+
+ ext4_std_error(inode->i_sb, PTR_ERR(result));
+ return result;
+}
+
+/*
+ * Try to extend this transaction for the purposes of truncation.
+ *
+ * Returns 0 if we managed to create more room. If we can't create more
+ * room, and the transaction must be restarted we return 1.
+ */
+static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+{
+ if (!ext4_handle_valid(handle))
+ return 0;
+ if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
+ return 0;
+ if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
+ return 0;
+ return 1;
+}
+
+/*
+ * Probably it should be a library function... search for first non-zero word
+ * or memcmp with zero_page, whatever is better for particular architecture.
+ * Linus?
+ */
+static inline int all_zeroes(__le32 *p, __le32 *q)
+{
+ while (p < q)
+ if (*p++)
+ return 0;
+ return 1;
+}
+
+/**
+ * ext4_find_shared - find the indirect blocks for partial truncation.
+ * @inode: inode in question
+ * @depth: depth of the affected branch
+ * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
+ * @chain: place to store the pointers to partial indirect blocks
+ * @top: place to the (detached) top of branch
+ *
+ * This is a helper function used by ext4_truncate().
+ *
+ * When we do truncate() we may have to clean the ends of several
+ * indirect blocks but leave the blocks themselves alive. Block is
+ * partially truncated if some data below the new i_size is referred
+ * from it (and it is on the path to the first completely truncated
+ * data block, indeed). We have to free the top of that path along
+ * with everything to the right of the path. Since no allocation
+ * past the truncation point is possible until ext4_truncate()
+ * finishes, we may safely do the latter, but top of branch may
+ * require special attention - pageout below the truncation point
+ * might try to populate it.
+ *
+ * We atomically detach the top of branch from the tree, store the
+ * block number of its root in *@top, pointers to buffer_heads of
+ * partially truncated blocks - in @chain[].bh and pointers to
+ * their last elements that should not be removed - in
+ * @chain[].p. Return value is the pointer to last filled element
+ * of @chain.
+ *
+ * The work left to caller to do the actual freeing of subtrees:
+ * a) free the subtree starting from *@top
+ * b) free the subtrees whose roots are stored in
+ * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
+ * c) free the subtrees growing from the inode past the @chain[0].
+ * (no partially truncated stuff there). */
+
+static Indirect *ext4_find_shared(struct inode *inode, int depth,
+ ext4_lblk_t offsets[4], Indirect chain[4],
+ __le32 *top)
+{
+ Indirect *partial, *p;
+ int k, err;
+
+ *top = 0;
+ /* Make k index the deepest non-null offset + 1 */
+ for (k = depth; k > 1 && !offsets[k-1]; k--)
+ ;
+ partial = ext4_get_branch(inode, k, offsets, chain, &err);
+ /* Writer: pointers */
+ if (!partial)
+ partial = chain + k-1;
+ /*
+ * If the branch acquired continuation since we've looked at it -
+ * fine, it should all survive and (new) top doesn't belong to us.
+ */
+ if (!partial->key && *partial->p)
+ /* Writer: end */
+ goto no_top;
+ for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
+ ;
+ /*
+ * OK, we've found the last block that must survive. The rest of our
+ * branch should be detached before unlocking. However, if that rest
+ * of branch is all ours and does not grow immediately from the inode
+ * it's easier to cheat and just decrement partial->p.
+ */
+ if (p == chain + k - 1 && p > chain) {
+ p->p--;
+ } else {
+ *top = *p->p;
+ /* Nope, don't do this in ext4. Must leave the tree intact */
+#if 0
+ *p->p = 0;
+#endif
+ }
+ /* Writer: end */
+
+ while (partial > p) {
+ brelse(partial->bh);
+ partial--;
+ }
+no_top:
+ return partial;
+}
+
+/*
+ * Zero a number of block pointers in either an inode or an indirect block.
+ * If we restart the transaction we must again get write access to the
+ * indirect block for further modification.
+ *
+ * We release `count' blocks on disk, but (last - first) may be greater
+ * than `count' because there can be holes in there.
+ *
+ * Return 0 on success, 1 on invalid block range
+ * and < 0 on fatal error.
+ */
+static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh,
+ ext4_fsblk_t block_to_free,
+ unsigned long count, __le32 *first,
+ __le32 *last)
+{
+ __le32 *p;
+ int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+ int err;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
+
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
+ count)) {
+ EXT4_ERROR_INODE(inode, "attempt to clear invalid "
+ "blocks %llu len %lu",
+ (unsigned long long) block_to_free, count);
+ return 1;
+ }
+
+ if (try_to_extend_transaction(handle, inode)) {
+ if (bh) {
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (unlikely(err))
+ goto out_err;
+ }
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err))
+ goto out_err;
+ err = ext4_truncate_restart_trans(handle, inode,
+ ext4_blocks_for_truncate(inode));
+ if (unlikely(err))
+ goto out_err;
+ if (bh) {
+ BUFFER_TRACE(bh, "retaking write access");
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err))
+ goto out_err;
+ }
+ }
+
+ for (p = first; p < last; p++)
+ *p = 0;
+
+ ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
+ return 0;
+out_err:
+ ext4_std_error(inode->i_sb, err);
+ return err;
+}
+
+/**
+ * ext4_free_data - free a list of data blocks
+ * @handle: handle for this transaction
+ * @inode: inode we are dealing with
+ * @this_bh: indirect buffer_head which contains *@first and *@last
+ * @first: array of block numbers
+ * @last: points immediately past the end of array
+ *
+ * We are freeing all blocks referred from that array (numbers are stored as
+ * little-endian 32-bit) and updating @inode->i_blocks appropriately.
+ *
+ * We accumulate contiguous runs of blocks to free. Conveniently, if these
+ * blocks are contiguous then releasing them at one time will only affect one
+ * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
+ * actually use a lot of journal space.
+ *
+ * @this_bh will be %NULL if @first and @last point into the inode's direct
+ * block pointers.
+ */
+static void ext4_free_data(handle_t *handle, struct inode *inode,
+ struct buffer_head *this_bh,
+ __le32 *first, __le32 *last)
+{
+ ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
+ unsigned long count = 0; /* Number of blocks in the run */
+ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
+ corresponding to
+ block_to_free */
+ ext4_fsblk_t nr; /* Current block # */
+ __le32 *p; /* Pointer into inode/ind
+ for current block */
+ int err = 0;
+
+ if (this_bh) { /* For indirect block */
+ BUFFER_TRACE(this_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, this_bh);
+ /* Important: if we can't update the indirect pointers
+ * to the blocks, we can't free them. */
+ if (err)
+ return;
+ }
+
+ for (p = first; p < last; p++) {
+ nr = le32_to_cpu(*p);
+ if (nr) {
+ /* accumulate blocks to free if they're contiguous */
+ if (count == 0) {
+ block_to_free = nr;
+ block_to_free_p = p;
+ count = 1;
+ } else if (nr == block_to_free + count) {
+ count++;
+ } else {
+ err = ext4_clear_blocks(handle, inode, this_bh,
+ block_to_free, count,
+ block_to_free_p, p);
+ if (err)
+ break;
+ block_to_free = nr;
+ block_to_free_p = p;
+ count = 1;
+ }
+ }
+ }
+
+ if (!err && count > 0)
+ err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
+ count, block_to_free_p, p);
+ if (err < 0)
+ /* fatal error */
+ return;
+
+ if (this_bh) {
+ BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
+
+ /*
+ * The buffer head should have an attached journal head at this
+ * point. However, if the data is corrupted and an indirect
+ * block pointed to itself, it would have been detached when
+ * the block was cleared. Check for this instead of OOPSing.
+ */
+ if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
+ ext4_handle_dirty_metadata(handle, inode, this_bh);
+ else
+ EXT4_ERROR_INODE(inode,
+ "circular indirect block detected at "
+ "block %llu",
+ (unsigned long long) this_bh->b_blocknr);
+ }
+}
+
+/**
+ * ext4_free_branches - free an array of branches
+ * @handle: JBD handle for this transaction
+ * @inode: inode we are dealing with
+ * @parent_bh: the buffer_head which contains *@first and *@last
+ * @first: array of block numbers
+ * @last: pointer immediately past the end of array
+ * @depth: depth of the branches to free
+ *
+ * We are freeing all blocks referred from these branches (numbers are
+ * stored as little-endian 32-bit) and updating @inode->i_blocks
+ * appropriately.
+ */
+static void ext4_free_branches(handle_t *handle, struct inode *inode,
+ struct buffer_head *parent_bh,
+ __le32 *first, __le32 *last, int depth)
+{
+ ext4_fsblk_t nr;
+ __le32 *p;
+
+ if (ext4_handle_is_aborted(handle))
+ return;
+
+ if (depth--) {
+ struct buffer_head *bh;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ p = last;
+ while (--p >= first) {
+ nr = le32_to_cpu(*p);
+ if (!nr)
+ continue; /* A hole */
+
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+ nr, 1)) {
+ EXT4_ERROR_INODE(inode,
+ "invalid indirect mapped "
+ "block %lu (level %d)",
+ (unsigned long) nr, depth);
+ break;
+ }
+
+ /* Go read the buffer for the next level down */
+ bh = sb_bread(inode->i_sb, nr);
+
+ /*
+ * A read failure? Report error and clear slot
+ * (should be rare).
+ */
+ if (!bh) {
+ EXT4_ERROR_INODE_BLOCK(inode, nr,
+ "Read failure");
+ continue;
+ }
+
+ /* This zaps the entire block. Bottom up. */
+ BUFFER_TRACE(bh, "free child branches");
+ ext4_free_branches(handle, inode, bh,
+ (__le32 *) bh->b_data,
+ (__le32 *) bh->b_data + addr_per_block,
+ depth);
+ brelse(bh);
+
+ /*
+ * Everything below this this pointer has been
+ * released. Now let this top-of-subtree go.
+ *
+ * We want the freeing of this indirect block to be
+ * atomic in the journal with the updating of the
+ * bitmap block which owns it. So make some room in
+ * the journal.
+ *
+ * We zero the parent pointer *after* freeing its
+ * pointee in the bitmaps, so if extend_transaction()
+ * for some reason fails to put the bitmap changes and
+ * the release into the same transaction, recovery
+ * will merely complain about releasing a free block,
+ * rather than leaking blocks.
+ */
+ if (ext4_handle_is_aborted(handle))
+ return;
+ if (try_to_extend_transaction(handle, inode)) {
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_truncate_restart_trans(handle, inode,
+ ext4_blocks_for_truncate(inode));
+ }
+
+ /*
+ * The forget flag here is critical because if
+ * we are journaling (and not doing data
+ * journaling), we have to make sure a revoke
+ * record is written to prevent the journal
+ * replay from overwriting the (former)
+ * indirect block if it gets reallocated as a
+ * data block. This must happen in the same
+ * transaction where the data blocks are
+ * actually freed.
+ */
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
+ EXT4_FREE_BLOCKS_METADATA|
+ EXT4_FREE_BLOCKS_FORGET);
+
+ if (parent_bh) {
+ /*
+ * The block which we have just freed is
+ * pointed to by an indirect block: journal it
+ */
+ BUFFER_TRACE(parent_bh, "get_write_access");
+ if (!ext4_journal_get_write_access(handle,
+ parent_bh)){
+ *p = 0;
+ BUFFER_TRACE(parent_bh,
+ "call ext4_handle_dirty_metadata");
+ ext4_handle_dirty_metadata(handle,
+ inode,
+ parent_bh);
+ }
+ }
+ }
+ } else {
+ /* We have reached the bottom of the tree. */
+ BUFFER_TRACE(parent_bh, "free data blocks");
+ ext4_free_data(handle, inode, parent_bh, first, last);
+ }
+}
+
+void ext4_ind_truncate(struct inode *inode)
+{
+ handle_t *handle;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ __le32 *i_data = ei->i_data;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ ext4_lblk_t offsets[4];
+ Indirect chain[4];
+ Indirect *partial;
+ __le32 nr = 0;
+ int n = 0;
+ ext4_lblk_t last_block, max_block;
+ unsigned blocksize = inode->i_sb->s_blocksize;
+
+ handle = start_transaction(inode);
+ if (IS_ERR(handle))
+ return; /* AKPM: return what? */
+
+ last_block = (inode->i_size + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+
+ if (inode->i_size & (blocksize - 1))
+ if (ext4_block_truncate_page(handle, mapping, inode->i_size))
+ goto out_stop;
+
+ if (last_block != max_block) {
+ n = ext4_block_to_path(inode, last_block, offsets, NULL);
+ if (n == 0)
+ goto out_stop; /* error */
+ }
+
+ /*
+ * OK. This truncate is going to happen. We add the inode to the
+ * orphan list, so that if this truncate spans multiple transactions,
+ * and we crash, we will resume the truncate when the filesystem
+ * recovers. It also marks the inode dirty, to catch the new size.
+ *
+ * Implication: the file must always be in a sane, consistent
+ * truncatable state while each transaction commits.
+ */
+ if (ext4_orphan_add(handle, inode))
+ goto out_stop;
+
+ /*
+ * From here we block out all ext4_get_block() callers who want to
+ * modify the block allocation tree.
+ */
+ down_write(&ei->i_data_sem);
+
+ ext4_discard_preallocations(inode);
+
+ /*
+ * The orphan list entry will now protect us from any crash which
+ * occurs before the truncate completes, so it is now safe to propagate
+ * the new, shorter inode size (held for now in i_size) into the
+ * on-disk inode. We do this via i_disksize, which is the value which
+ * ext4 *really* writes onto the disk inode.
+ */
+ ei->i_disksize = inode->i_size;
+
+ if (last_block == max_block) {
+ /*
+ * It is unnecessary to free any data blocks if last_block is
+ * equal to the indirect block limit.
+ */
+ goto out_unlock;
+ } else if (n == 1) { /* direct blocks */
+ ext4_free_data(handle, inode, NULL, i_data+offsets[0],
+ i_data + EXT4_NDIR_BLOCKS);
+ goto do_indirects;
+ }
+
+ partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+ /* Kill the top of shared branch (not detached) */
+ if (nr) {
+ if (partial == chain) {
+ /* Shared branch grows from the inode */
+ ext4_free_branches(handle, inode, NULL,
+ &nr, &nr+1, (chain+n-1) - partial);
+ *partial->p = 0;
+ /*
+ * We mark the inode dirty prior to restart,
+ * and prior to stop. No need for it here.
+ */
+ } else {
+ /* Shared branch grows from an indirect block */
+ BUFFER_TRACE(partial->bh, "get_write_access");
+ ext4_free_branches(handle, inode, partial->bh,
+ partial->p,
+ partial->p+1, (chain+n-1) - partial);
+ }
+ }
+ /* Clear the ends of indirect blocks on the shared branch */
+ while (partial > chain) {
+ ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
+ (__le32*)partial->bh->b_data+addr_per_block,
+ (chain+n-1) - partial);
+ BUFFER_TRACE(partial->bh, "call brelse");
+ brelse(partial->bh);
+ partial--;
+ }
+do_indirects:
+ /* Kill the remaining (whole) subtrees */
+ switch (offsets[0]) {
+ default:
+ nr = i_data[EXT4_IND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+ i_data[EXT4_IND_BLOCK] = 0;
+ }
+ case EXT4_IND_BLOCK:
+ nr = i_data[EXT4_DIND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+ i_data[EXT4_DIND_BLOCK] = 0;
+ }
+ case EXT4_DIND_BLOCK:
+ nr = i_data[EXT4_TIND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+ i_data[EXT4_TIND_BLOCK] = 0;
+ }
+ case EXT4_TIND_BLOCK:
+ ;
+ }
+
+out_unlock:
+ up_write(&ei->i_data_sem);
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+
+ /*
+ * In a multi-transaction truncate, we only make the final transaction
+ * synchronous
+ */
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+out_stop:
+ /*
+ * If this was a simple ftruncate(), and the file will remain alive
+ * then we need to clear up the orphan record which we created above.
+ * However, if this was a real unlink then we were called by
+ * ext4_delete_inode(), and we allow that function to clean up the
+ * orphan info for us.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+
+ ext4_journal_stop(handle);
+ trace_ext4_truncate_exit(inode);
+}
+
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3e5191f9f398..d47264cafee0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -12,10 +12,6 @@
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
- * Goal-directed block allocation by Stephen Tweedie
- * (sct@redhat.com), 1993, 1998
- * Big-endian to little-endian byte-swapping/bitmaps by
- * David S. Miller (davem@caip.rutgers.edu), 1995
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
@@ -47,6 +43,7 @@
#include "xattr.h"
#include "acl.h"
#include "ext4_extents.h"
+#include "truncate.h"
#include <trace/events/ext4.h>
@@ -89,72 +86,6 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
}
/*
- * Work out how many blocks we need to proceed with the next chunk of a
- * truncate transaction.
- */
-static unsigned long blocks_for_truncate(struct inode *inode)
-{
- ext4_lblk_t needed;
-
- needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
-
- /* Give ourselves just enough room to cope with inodes in which
- * i_blocks is corrupt: we've seen disk corruptions in the past
- * which resulted in random data in an inode which looked enough
- * like a regular file for ext4 to try to delete it. Things
- * will go a bit crazy if that happens, but at least we should
- * try not to panic the whole kernel. */
- if (needed < 2)
- needed = 2;
-
- /* But we need to bound the transaction so we don't overflow the
- * journal. */
- if (needed > EXT4_MAX_TRANS_DATA)
- needed = EXT4_MAX_TRANS_DATA;
-
- return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
-}
-
-/*
- * Truncate transactions can be complex and absolutely huge. So we need to
- * be able to restart the transaction at a conventient checkpoint to make
- * sure we don't overflow the journal.
- *
- * start_transaction gets us a new handle for a truncate transaction,
- * and extend_transaction tries to extend the existing one a bit. If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- */
-static handle_t *start_transaction(struct inode *inode)
-{
- handle_t *result;
-
- result = ext4_journal_start(inode, blocks_for_truncate(inode));
- if (!IS_ERR(result))
- return result;
-
- ext4_std_error(inode->i_sb, PTR_ERR(result));
- return result;
-}
-
-/*
- * Try to extend this transaction for the purposes of truncation.
- *
- * Returns 0 if we managed to create more room. If we can't create more
- * room, and the transaction must be restarted we return 1.
- */
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
-{
- if (!ext4_handle_valid(handle))
- return 0;
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
- return 0;
- return 1;
-}
-
-/*
* Restart the transaction associated with *handle. This does a commit,
* so before we call here everything must be consistently dirtied against
* this transaction.
@@ -190,6 +121,33 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
if (inode->i_nlink) {
+ /*
+ * When journalling data dirty buffers are tracked only in the
+ * journal. So although mm thinks everything is clean and
+ * ready for reaping the inode might still have some pages to
+ * write in the running transaction or waiting to be
+ * checkpointed. Thus calling jbd2_journal_invalidatepage()
+ * (via truncate_inode_pages()) to discard these buffers can
+ * cause data loss. Also even if we did not discard these
+ * buffers, we would have no way to find them after the inode
+ * is reaped and thus user could see stale data if he tries to
+ * read them before the transaction is checkpointed. So be
+ * careful and force everything to disk here... We use
+ * ei->i_datasync_tid to store the newest transaction
+ * containing inode's data.
+ *
+ * Note that directories do not have this problem because they
+ * don't use page cache.
+ */
+ if (ext4_should_journal_data(inode) &&
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+ jbd2_log_start_commit(journal, commit_tid);
+ jbd2_log_wait_commit(journal, commit_tid);
+ filemap_write_and_wait(&inode->i_data);
+ }
truncate_inode_pages(&inode->i_data, 0);
goto no_delete;
}
@@ -204,7 +162,7 @@ void ext4_evict_inode(struct inode *inode)
if (is_bad_inode(inode))
goto no_delete;
- handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
+ handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
@@ -277,793 +235,6 @@ no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
-typedef struct {
- __le32 *p;
- __le32 key;
- struct buffer_head *bh;
-} Indirect;
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
-{
- p->key = *(p->p = v);
- p->bh = bh;
-}
-
-/**
- * ext4_block_to_path - parse the block number into array of offsets
- * @inode: inode in question (we are only interested in its superblock)
- * @i_block: block number to be parsed
- * @offsets: array to store the offsets in
- * @boundary: set this non-zero if the referred-to block is likely to be
- * followed (on disk) by an indirect block.
- *
- * To store the locations of file's data ext4 uses a data structure common
- * for UNIX filesystems - tree of pointers anchored in the inode, with
- * data blocks at leaves and indirect blocks in intermediate nodes.
- * This function translates the block number into path in that tree -
- * return value is the path length and @offsets[n] is the offset of
- * pointer to (n+1)th node in the nth one. If @block is out of range
- * (negative or too large) warning is printed and zero returned.
- *
- * Note: function doesn't find node addresses, so no IO is needed. All
- * we need to know is the capacity of indirect blocks (taken from the
- * inode->i_sb).
- */
-
-/*
- * Portability note: the last comparison (check that we fit into triple
- * indirect block) is spelled differently, because otherwise on an
- * architecture with 32-bit longs and 8Kb pages we might get into trouble
- * if our filesystem had 8Kb blocks. We might use long long, but that would
- * kill us on x86. Oh, well, at least the sign propagation does not matter -
- * i_block would have to be negative in the very beginning, so we would not
- * get there at all.
- */
-
-static int ext4_block_to_path(struct inode *inode,
- ext4_lblk_t i_block,
- ext4_lblk_t offsets[4], int *boundary)
-{
- int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
- const long direct_blocks = EXT4_NDIR_BLOCKS,
- indirect_blocks = ptrs,
- double_blocks = (1 << (ptrs_bits * 2));
- int n = 0;
- int final = 0;
-
- if (i_block < direct_blocks) {
- offsets[n++] = i_block;
- final = direct_blocks;
- } else if ((i_block -= direct_blocks) < indirect_blocks) {
- offsets[n++] = EXT4_IND_BLOCK;
- offsets[n++] = i_block;
- final = ptrs;
- } else if ((i_block -= indirect_blocks) < double_blocks) {
- offsets[n++] = EXT4_DIND_BLOCK;
- offsets[n++] = i_block >> ptrs_bits;
- offsets[n++] = i_block & (ptrs - 1);
- final = ptrs;
- } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
- offsets[n++] = EXT4_TIND_BLOCK;
- offsets[n++] = i_block >> (ptrs_bits * 2);
- offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
- offsets[n++] = i_block & (ptrs - 1);
- final = ptrs;
- } else {
- ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
- i_block + direct_blocks +
- indirect_blocks + double_blocks, inode->i_ino);
- }
- if (boundary)
- *boundary = final - 1 - (i_block & (ptrs - 1));
- return n;
-}
-
-static int __ext4_check_blockref(const char *function, unsigned int line,
- struct inode *inode,
- __le32 *p, unsigned int max)
-{
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
- __le32 *bref = p;
- unsigned int blk;
-
- while (bref < p+max) {
- blk = le32_to_cpu(*bref++);
- if (blk &&
- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- blk, 1))) {
- es->s_last_error_block = cpu_to_le64(blk);
- ext4_error_inode(inode, function, line, blk,
- "invalid block");
- return -EIO;
- }
- }
- return 0;
-}
-
-
-#define ext4_check_indirect_blockref(inode, bh) \
- __ext4_check_blockref(__func__, __LINE__, inode, \
- (__le32 *)(bh)->b_data, \
- EXT4_ADDR_PER_BLOCK((inode)->i_sb))
-
-#define ext4_check_inode_blockref(inode) \
- __ext4_check_blockref(__func__, __LINE__, inode, \
- EXT4_I(inode)->i_data, \
- EXT4_NDIR_BLOCKS)
-
-/**
- * ext4_get_branch - read the chain of indirect blocks leading to data
- * @inode: inode in question
- * @depth: depth of the chain (1 - direct pointer, etc.)
- * @offsets: offsets of pointers in inode/indirect blocks
- * @chain: place to store the result
- * @err: here we store the error value
- *
- * Function fills the array of triples <key, p, bh> and returns %NULL
- * if everything went OK or the pointer to the last filled triple
- * (incomplete one) otherwise. Upon the return chain[i].key contains
- * the number of (i+1)-th block in the chain (as it is stored in memory,
- * i.e. little-endian 32-bit), chain[i].p contains the address of that
- * number (it points into struct inode for i==0 and into the bh->b_data
- * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
- * block for i>0 and NULL for i==0. In other words, it holds the block
- * numbers of the chain, addresses they were taken from (and where we can
- * verify that chain did not change) and buffer_heads hosting these
- * numbers.
- *
- * Function stops when it stumbles upon zero pointer (absent block)
- * (pointer to last triple returned, *@err == 0)
- * or when it gets an IO error reading an indirect block
- * (ditto, *@err == -EIO)
- * or when it reads all @depth-1 indirect blocks successfully and finds
- * the whole chain, all way to the data (returns %NULL, *err == 0).
- *
- * Need to be called with
- * down_read(&EXT4_I(inode)->i_data_sem)
- */
-static Indirect *ext4_get_branch(struct inode *inode, int depth,
- ext4_lblk_t *offsets,
- Indirect chain[4], int *err)
-{
- struct super_block *sb = inode->i_sb;
- Indirect *p = chain;
- struct buffer_head *bh;
-
- *err = 0;
- /* i_data is not going away, no lock needed */
- add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
- if (!p->key)
- goto no_block;
- while (--depth) {
- bh = sb_getblk(sb, le32_to_cpu(p->key));
- if (unlikely(!bh))
- goto failure;
-
- if (!bh_uptodate_or_lock(bh)) {
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
- goto failure;
- }
- /* validate block references */
- if (ext4_check_indirect_blockref(inode, bh)) {
- put_bh(bh);
- goto failure;
- }
- }
-
- add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
- /* Reader: end */
- if (!p->key)
- goto no_block;
- }
- return NULL;
-
-failure:
- *err = -EIO;
-no_block:
- return p;
-}
-
-/**
- * ext4_find_near - find a place for allocation with sufficient locality
- * @inode: owner
- * @ind: descriptor of indirect block.
- *
- * This function returns the preferred place for block allocation.
- * It is used when heuristic for sequential allocation fails.
- * Rules are:
- * + if there is a block to the left of our position - allocate near it.
- * + if pointer will live in indirect block - allocate near that block.
- * + if pointer will live in inode - allocate in the same
- * cylinder group.
- *
- * In the latter case we colour the starting block by the callers PID to
- * prevent it from clashing with concurrent allocations for a different inode
- * in the same block group. The PID is used here so that functionally related
- * files will be close-by on-disk.
- *
- * Caller must make sure that @ind is valid and will stay that way.
- */
-static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
- __le32 *p;
- ext4_fsblk_t bg_start;
- ext4_fsblk_t last_block;
- ext4_grpblk_t colour;
- ext4_group_t block_group;
- int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
-
- /* Try to find previous block */
- for (p = ind->p - 1; p >= start; p--) {
- if (*p)
- return le32_to_cpu(*p);
- }
-
- /* No such thing, so let's try location of indirect block */
- if (ind->bh)
- return ind->bh->b_blocknr;
-
- /*
- * It is going to be referred to from the inode itself? OK, just put it
- * into the same cylinder group then.
- */
- block_group = ei->i_block_group;
- if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
- block_group &= ~(flex_size-1);
- if (S_ISREG(inode->i_mode))
- block_group++;
- }
- bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
- last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
-
- /*
- * If we are doing delayed allocation, we don't need take
- * colour into account.
- */
- if (test_opt(inode->i_sb, DELALLOC))
- return bg_start;
-
- if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
- (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
- return bg_start + colour;
-}
-
-/**
- * ext4_find_goal - find a preferred place for allocation.
- * @inode: owner
- * @block: block we want
- * @partial: pointer to the last triple within a chain
- *
- * Normally this function find the preferred place for block allocation,
- * returns it.
- * Because this is only used for non-extent files, we limit the block nr
- * to 32 bits.
- */
-static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
- Indirect *partial)
-{
- ext4_fsblk_t goal;
-
- /*
- * XXX need to get goal block from mballoc's data structures
- */
-
- goal = ext4_find_near(inode, partial);
- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
- return goal;
-}
-
-/**
- * ext4_blks_to_allocate - Look up the block map and count the number
- * of direct blocks need to be allocated for the given branch.
- *
- * @branch: chain of indirect blocks
- * @k: number of blocks need for indirect blocks
- * @blks: number of data blocks to be mapped.
- * @blocks_to_boundary: the offset in the indirect block
- *
- * return the total number of blocks to be allocate, including the
- * direct and indirect blocks.
- */
-static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
- int blocks_to_boundary)
-{
- unsigned int count = 0;
-
- /*
- * Simple case, [t,d]Indirect block(s) has not allocated yet
- * then it's clear blocks on that path have not allocated
- */
- if (k > 0) {
- /* right now we don't handle cross boundary allocation */
- if (blks < blocks_to_boundary + 1)
- count += blks;
- else
- count += blocks_to_boundary + 1;
- return count;
- }
-
- count++;
- while (count < blks && count <= blocks_to_boundary &&
- le32_to_cpu(*(branch[0].p + count)) == 0) {
- count++;
- }
- return count;
-}
-
-/**
- * ext4_alloc_blocks: multiple allocate blocks needed for a branch
- * @handle: handle for this transaction
- * @inode: inode which needs allocated blocks
- * @iblock: the logical block to start allocated at
- * @goal: preferred physical block of allocation
- * @indirect_blks: the number of blocks need to allocate for indirect
- * blocks
- * @blks: number of desired blocks
- * @new_blocks: on return it will store the new block numbers for
- * the indirect blocks(if needed) and the first direct block,
- * @err: on return it will store the error code
- *
- * This function will return the number of blocks allocated as
- * requested by the passed-in parameters.
- */
-static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
- ext4_lblk_t iblock, ext4_fsblk_t goal,
- int indirect_blks, int blks,
- ext4_fsblk_t new_blocks[4], int *err)
-{
- struct ext4_allocation_request ar;
- int target, i;
- unsigned long count = 0, blk_allocated = 0;
- int index = 0;
- ext4_fsblk_t current_block = 0;
- int ret = 0;
-
- /*
- * Here we try to allocate the requested multiple blocks at once,
- * on a best-effort basis.
- * To build a branch, we should allocate blocks for
- * the indirect blocks(if not allocated yet), and at least
- * the first direct block of this branch. That's the
- * minimum number of blocks need to allocate(required)
- */
- /* first we try to allocate the indirect blocks */
- target = indirect_blks;
- while (target > 0) {
- count = target;
- /* allocating blocks for indirect blocks and direct blocks */
- current_block = ext4_new_meta_blocks(handle, inode, goal,
- 0, &count, err);
- if (*err)
- goto failed_out;
-
- if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
- EXT4_ERROR_INODE(inode,
- "current_block %llu + count %lu > %d!",
- current_block, count,
- EXT4_MAX_BLOCK_FILE_PHYS);
- *err = -EIO;
- goto failed_out;
- }
-
- target -= count;
- /* allocate blocks for indirect blocks */
- while (index < indirect_blks && count) {
- new_blocks[index++] = current_block++;
- count--;
- }
- if (count > 0) {
- /*
- * save the new block number
- * for the first direct block
- */
- new_blocks[index] = current_block;
- printk(KERN_INFO "%s returned more blocks than "
- "requested\n", __func__);
- WARN_ON(1);
- break;
- }
- }
-
- target = blks - count ;
- blk_allocated = count;
- if (!target)
- goto allocated;
- /* Now allocate data blocks */
- memset(&ar, 0, sizeof(ar));
- ar.inode = inode;
- ar.goal = goal;
- ar.len = target;
- ar.logical = iblock;
- if (S_ISREG(inode->i_mode))
- /* enable in-core preallocation only for regular files */
- ar.flags = EXT4_MB_HINT_DATA;
-
- current_block = ext4_mb_new_blocks(handle, &ar, err);
- if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
- EXT4_ERROR_INODE(inode,
- "current_block %llu + ar.len %d > %d!",
- current_block, ar.len,
- EXT4_MAX_BLOCK_FILE_PHYS);
- *err = -EIO;
- goto failed_out;
- }
-
- if (*err && (target == blks)) {
- /*
- * if the allocation failed and we didn't allocate
- * any blocks before
- */
- goto failed_out;
- }
- if (!*err) {
- if (target == blks) {
- /*
- * save the new block number
- * for the first direct block
- */
- new_blocks[index] = current_block;
- }
- blk_allocated += ar.len;
- }
-allocated:
- /* total number of blocks allocated for direct blocks */
- ret = blk_allocated;
- *err = 0;
- return ret;
-failed_out:
- for (i = 0; i < index; i++)
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
- return ret;
-}
-
-/**
- * ext4_alloc_branch - allocate and set up a chain of blocks.
- * @handle: handle for this transaction
- * @inode: owner
- * @indirect_blks: number of allocated indirect blocks
- * @blks: number of allocated direct blocks
- * @goal: preferred place for allocation
- * @offsets: offsets (in the blocks) to store the pointers to next.
- * @branch: place to store the chain in.
- *
- * This function allocates blocks, zeroes out all but the last one,
- * links them into chain and (if we are synchronous) writes them to disk.
- * In other words, it prepares a branch that can be spliced onto the
- * inode. It stores the information about that chain in the branch[], in
- * the same format as ext4_get_branch() would do. We are calling it after
- * we had read the existing part of chain and partial points to the last
- * triple of that (one with zero ->key). Upon the exit we have the same
- * picture as after the successful ext4_get_block(), except that in one
- * place chain is disconnected - *branch->p is still zero (we did not
- * set the last link), but branch->key contains the number that should
- * be placed into *branch->p to fill that gap.
- *
- * If allocation fails we free all blocks we've allocated (and forget
- * their buffer_heads) and return the error value the from failed
- * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
- * as described above and return 0.
- */
-static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
- ext4_lblk_t iblock, int indirect_blks,
- int *blks, ext4_fsblk_t goal,
- ext4_lblk_t *offsets, Indirect *branch)
-{
- int blocksize = inode->i_sb->s_blocksize;
- int i, n = 0;
- int err = 0;
- struct buffer_head *bh;
- int num;
- ext4_fsblk_t new_blocks[4];
- ext4_fsblk_t current_block;
-
- num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
- *blks, new_blocks, &err);
- if (err)
- return err;
-
- branch[0].key = cpu_to_le32(new_blocks[0]);
- /*
- * metadata blocks and data blocks are allocated.
- */
- for (n = 1; n <= indirect_blks; n++) {
- /*
- * Get buffer_head for parent block, zero it out
- * and set the pointer to new one, then send
- * parent to disk.
- */
- bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
- if (unlikely(!bh)) {
- err = -EIO;
- goto failed;
- }
-
- branch[n].bh = bh;
- lock_buffer(bh);
- BUFFER_TRACE(bh, "call get_create_access");
- err = ext4_journal_get_create_access(handle, bh);
- if (err) {
- /* Don't brelse(bh) here; it's done in
- * ext4_journal_forget() below */
- unlock_buffer(bh);
- goto failed;
- }
-
- memset(bh->b_data, 0, blocksize);
- branch[n].p = (__le32 *) bh->b_data + offsets[n];
- branch[n].key = cpu_to_le32(new_blocks[n]);
- *branch[n].p = branch[n].key;
- if (n == indirect_blks) {
- current_block = new_blocks[n];
- /*
- * End of chain, update the last new metablock of
- * the chain to point to the new allocated
- * data blocks numbers
- */
- for (i = 1; i < num; i++)
- *(branch[n].p + i) = cpu_to_le32(++current_block);
- }
- BUFFER_TRACE(bh, "marking uptodate");
- set_buffer_uptodate(bh);
- unlock_buffer(bh);
-
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (err)
- goto failed;
- }
- *blks = num;
- return err;
-failed:
- /* Allocation failed, free what we already allocated */
- ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
- for (i = 1; i <= n ; i++) {
- /*
- * branch[i].bh is newly allocated, so there is no
- * need to revoke the block, which is why we don't
- * need to set EXT4_FREE_BLOCKS_METADATA.
- */
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
- EXT4_FREE_BLOCKS_FORGET);
- }
- for (i = n+1; i < indirect_blks; i++)
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
-
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
-
- return err;
-}
-
-/**
- * ext4_splice_branch - splice the allocated branch onto inode.
- * @handle: handle for this transaction
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- * ext4_alloc_branch)
- * @where: location of missing link
- * @num: number of indirect blocks we are adding
- * @blks: number of direct blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
- */
-static int ext4_splice_branch(handle_t *handle, struct inode *inode,
- ext4_lblk_t block, Indirect *where, int num,
- int blks)
-{
- int i;
- int err = 0;
- ext4_fsblk_t current_block;
-
- /*
- * If we're splicing into a [td]indirect block (as opposed to the
- * inode) then we need to get write access to the [td]indirect block
- * before the splice.
- */
- if (where->bh) {
- BUFFER_TRACE(where->bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, where->bh);
- if (err)
- goto err_out;
- }
- /* That's it */
-
- *where->p = where->key;
-
- /*
- * Update the host buffer_head or inode to point to more just allocated
- * direct blocks blocks
- */
- if (num == 0 && blks > 1) {
- current_block = le32_to_cpu(where->key) + 1;
- for (i = 1; i < blks; i++)
- *(where->p + i) = cpu_to_le32(current_block++);
- }
-
- /* We are done with atomic stuff, now do the rest of housekeeping */
- /* had we spliced it onto indirect block? */
- if (where->bh) {
- /*
- * If we spliced it onto an indirect block, we haven't
- * altered the inode. Note however that if it is being spliced
- * onto an indirect block at the very end of the file (the
- * file is growing) then we *will* alter the inode to reflect
- * the new i_size. But that is not done here - it is done in
- * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
- */
- jbd_debug(5, "splicing indirect only\n");
- BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, where->bh);
- if (err)
- goto err_out;
- } else {
- /*
- * OK, we spliced it into the inode itself on a direct block.
- */
- ext4_mark_inode_dirty(handle, inode);
- jbd_debug(5, "splicing direct\n");
- }
- return err;
-
-err_out:
- for (i = 1; i <= num; i++) {
- /*
- * branch[i].bh is newly allocated, so there is no
- * need to revoke the block, which is why we don't
- * need to set EXT4_FREE_BLOCKS_METADATA.
- */
- ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
- EXT4_FREE_BLOCKS_FORGET);
- }
- ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
- blks, 0);
-
- return err;
-}
-
-/*
- * The ext4_ind_map_blocks() function handles non-extents inodes
- * (i.e., using the traditional indirect/double-indirect i_blocks
- * scheme) for ext4_map_blocks().
- *
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
- * to tree, set linkage between the newborn blocks, write them if sync is
- * required, recheck the path, free and repeat if check fails, otherwise
- * set the last missing link (that will protect us from any truncate-generated
- * removals - all blocks on the path are immune now) and possibly force the
- * write on the parent block.
- * That has a nice additional property: no special recovery from the failed
- * allocations is needed - we simply release blocks and do not touch anything
- * reachable from inode.
- *
- * `handle' can be NULL if create == 0.
- *
- * return > 0, # of blocks mapped or allocated.
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- *
- * The ext4_ind_get_blocks() function should be called with
- * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
- * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
- * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
- * blocks.
- */
-static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
- struct ext4_map_blocks *map,
- int flags)
-{
- int err = -EIO;
- ext4_lblk_t offsets[4];
- Indirect chain[4];
- Indirect *partial;
- ext4_fsblk_t goal;
- int indirect_blks;
- int blocks_to_boundary = 0;
- int depth;
- int count = 0;
- ext4_fsblk_t first_block = 0;
-
- trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
- J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
- J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
- depth = ext4_block_to_path(inode, map->m_lblk, offsets,
- &blocks_to_boundary);
-
- if (depth == 0)
- goto out;
-
- partial = ext4_get_branch(inode, depth, offsets, chain, &err);
-
- /* Simplest case - block found, no allocation needed */
- if (!partial) {
- first_block = le32_to_cpu(chain[depth - 1].key);
- count++;
- /*map more blocks*/
- while (count < map->m_len && count <= blocks_to_boundary) {
- ext4_fsblk_t blk;
-
- blk = le32_to_cpu(*(chain[depth-1].p + count));
-
- if (blk == first_block + count)
- count++;
- else
- break;
- }
- goto got_it;
- }
-
- /* Next simple case - plain lookup or failed read of indirect block */
- if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
- goto cleanup;
-
- /*
- * Okay, we need to do block allocation.
- */
- goal = ext4_find_goal(inode, map->m_lblk, partial);
-
- /* the number of blocks need to allocate for [d,t]indirect blocks */
- indirect_blks = (chain + depth) - partial - 1;
-
- /*
- * Next look up the indirect map to count the totoal number of
- * direct blocks to allocate for this branch.
- */
- count = ext4_blks_to_allocate(partial, indirect_blks,
- map->m_len, blocks_to_boundary);
- /*
- * Block out ext4_truncate while we alter the tree
- */
- err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
- &count, goal,
- offsets + (partial - chain), partial);
-
- /*
- * The ext4_splice_branch call will free and forget any buffers
- * on the new chain if there is a failure, but that risks using
- * up transaction credits, especially for bitmaps where the
- * credits cannot be returned. Can we handle this somehow? We
- * may need to return -EAGAIN upwards in the worst case. --sct
- */
- if (!err)
- err = ext4_splice_branch(handle, inode, map->m_lblk,
- partial, indirect_blks, count);
- if (err)
- goto cleanup;
-
- map->m_flags |= EXT4_MAP_NEW;
-
- ext4_update_inode_fsync_trans(handle, inode, 1);
-got_it:
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = le32_to_cpu(chain[depth-1].key);
- map->m_len = count;
- if (count > blocks_to_boundary)
- map->m_flags |= EXT4_MAP_BOUNDARY;
- err = count;
- /* Clean up and exit */
- partial = chain + depth - 1; /* the whole chain */
-cleanup:
- while (partial > chain) {
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- partial--;
- }
-out:
- trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
- map->m_pblk, map->m_len, err);
- return err;
-}
-
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
{
@@ -1073,33 +244,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate a new block at @lblocks for non extent file based file
- */
-static int ext4_indirect_calc_metadata_amount(struct inode *inode,
- sector_t lblock)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
- int blk_bits;
-
- if (lblock < EXT4_NDIR_BLOCKS)
- return 0;
-
- lblock -= EXT4_NDIR_BLOCKS;
-
- if (ei->i_da_metadata_calc_len &&
- (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
- ei->i_da_metadata_calc_len++;
- return 0;
- }
- ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
- ei->i_da_metadata_calc_len = 1;
- blk_bits = order_base_2(lblock);
- return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
-}
-
-/*
- * Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
@@ -1107,7 +251,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
- return ext4_indirect_calc_metadata_amount(inode, lblock);
+ return ext4_ind_calc_metadata_amount(inode, lblock);
}
/*
@@ -1589,16 +733,6 @@ static int do_journal_get_write_access(handle_t *handle,
return ret;
}
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext4_truncate_failed_write(struct inode *inode)
-{
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext4_truncate(inode);
-}
-
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
@@ -1863,6 +997,7 @@ static int ext4_journalled_write_end(struct file *file,
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
if (new_i_size > EXT4_I(inode)->i_disksize) {
ext4_update_i_disksize(inode, new_i_size);
ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -2571,6 +1706,7 @@ static int __ext4_journalled_writepage(struct page *page,
write_end_fn);
if (ret == 0)
ret = err;
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -3450,112 +2586,6 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
}
/*
- * O_DIRECT for ext3 (or indirect map) based files
- *
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list. So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- handle_t *handle;
- ssize_t ret;
- int orphan = 0;
- size_t count = iov_length(iov, nr_segs);
- int retries = 0;
-
- if (rw == WRITE) {
- loff_t final_size = offset + count;
-
- if (final_size > inode->i_size) {
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
- ret = ext4_orphan_add(handle, inode);
- if (ret) {
- ext4_journal_stop(handle);
- goto out;
- }
- orphan = 1;
- ei->i_disksize = inode->i_size;
- ext4_journal_stop(handle);
- }
- }
-
-retry:
- if (rw == READ && ext4_should_dioread_nolock(inode))
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block, NULL, NULL, 0);
- else {
- ret = blockdev_direct_IO(rw, iocb, inode, iov,
- offset, nr_segs, ext4_get_block);
-
- if (unlikely((rw & WRITE) && ret < 0)) {
- loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
-
- if (end > isize)
- ext4_truncate_failed_write(inode);
- }
- }
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
-
- if (orphan) {
- int err;
-
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
- if (IS_ERR(handle)) {
- /* This is really bad luck. We've written the data
- * but cannot extend i_size. Bail out and pretend
- * the write failed... */
- ret = PTR_ERR(handle);
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
-
- goto out;
- }
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
- if (ret > 0) {
- loff_t end = offset + ret;
- if (end > inode->i_size) {
- ei->i_disksize = end;
- i_size_write(inode, end);
- /*
- * We're going to return a positive `ret'
- * here due to non-zero-length I/O, so there's
- * no way of reporting error returns from
- * ext4_mark_inode_dirty() to userspace. So
- * ignore it.
- */
- ext4_mark_inode_dirty(handle, inode);
- }
- }
- err = ext4_journal_stop(handle);
- if (ret == 0)
- ret = err;
- }
-out:
- return ret;
-}
-
-/*
* ext4_get_block used when preparing for a DIO write or buffer write.
* We allocate an uinitialized extent if blocks haven't been allocated.
* The extent will be converted to initialized after the IO is complete.
@@ -4033,383 +3063,6 @@ unlock:
return err;
}
-/*
- * Probably it should be a library function... search for first non-zero word
- * or memcmp with zero_page, whatever is better for particular architecture.
- * Linus?
- */
-static inline int all_zeroes(__le32 *p, __le32 *q)
-{
- while (p < q)
- if (*p++)
- return 0;
- return 1;
-}
-
-/**
- * ext4_find_shared - find the indirect blocks for partial truncation.
- * @inode: inode in question
- * @depth: depth of the affected branch
- * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
- * @chain: place to store the pointers to partial indirect blocks
- * @top: place to the (detached) top of branch
- *
- * This is a helper function used by ext4_truncate().
- *
- * When we do truncate() we may have to clean the ends of several
- * indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is referred
- * from it (and it is on the path to the first completely truncated
- * data block, indeed). We have to free the top of that path along
- * with everything to the right of the path. Since no allocation
- * past the truncation point is possible until ext4_truncate()
- * finishes, we may safely do the latter, but top of branch may
- * require special attention - pageout below the truncation point
- * might try to populate it.
- *
- * We atomically detach the top of branch from the tree, store the
- * block number of its root in *@top, pointers to buffer_heads of
- * partially truncated blocks - in @chain[].bh and pointers to
- * their last elements that should not be removed - in
- * @chain[].p. Return value is the pointer to last filled element
- * of @chain.
- *
- * The work left to caller to do the actual freeing of subtrees:
- * a) free the subtree starting from *@top
- * b) free the subtrees whose roots are stored in
- * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
- * c) free the subtrees growing from the inode past the @chain[0].
- * (no partially truncated stuff there). */
-
-static Indirect *ext4_find_shared(struct inode *inode, int depth,
- ext4_lblk_t offsets[4], Indirect chain[4],
- __le32 *top)
-{
- Indirect *partial, *p;
- int k, err;
-
- *top = 0;
- /* Make k index the deepest non-null offset + 1 */
- for (k = depth; k > 1 && !offsets[k-1]; k--)
- ;
- partial = ext4_get_branch(inode, k, offsets, chain, &err);
- /* Writer: pointers */
- if (!partial)
- partial = chain + k-1;
- /*
- * If the branch acquired continuation since we've looked at it -
- * fine, it should all survive and (new) top doesn't belong to us.
- */
- if (!partial->key && *partial->p)
- /* Writer: end */
- goto no_top;
- for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
- ;
- /*
- * OK, we've found the last block that must survive. The rest of our
- * branch should be detached before unlocking. However, if that rest
- * of branch is all ours and does not grow immediately from the inode
- * it's easier to cheat and just decrement partial->p.
- */
- if (p == chain + k - 1 && p > chain) {
- p->p--;
- } else {
- *top = *p->p;
- /* Nope, don't do this in ext4. Must leave the tree intact */
-#if 0
- *p->p = 0;
-#endif
- }
- /* Writer: end */
-
- while (partial > p) {
- brelse(partial->bh);
- partial--;
- }
-no_top:
- return partial;
-}
-
-/*
- * Zero a number of block pointers in either an inode or an indirect block.
- * If we restart the transaction we must again get write access to the
- * indirect block for further modification.
- *
- * We release `count' blocks on disk, but (last - first) may be greater
- * than `count' because there can be holes in there.
- *
- * Return 0 on success, 1 on invalid block range
- * and < 0 on fatal error.
- */
-static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
- struct buffer_head *bh,
- ext4_fsblk_t block_to_free,
- unsigned long count, __le32 *first,
- __le32 *last)
-{
- __le32 *p;
- int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
- int err;
-
- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- flags |= EXT4_FREE_BLOCKS_METADATA;
-
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
- count)) {
- EXT4_ERROR_INODE(inode, "attempt to clear invalid "
- "blocks %llu len %lu",
- (unsigned long long) block_to_free, count);
- return 1;
- }
-
- if (try_to_extend_transaction(handle, inode)) {
- if (bh) {
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err))
- goto out_err;
- }
- err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err))
- goto out_err;
- err = ext4_truncate_restart_trans(handle, inode,
- blocks_for_truncate(inode));
- if (unlikely(err))
- goto out_err;
- if (bh) {
- BUFFER_TRACE(bh, "retaking write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (unlikely(err))
- goto out_err;
- }
- }
-
- for (p = first; p < last; p++)
- *p = 0;
-
- ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
- return 0;
-out_err:
- ext4_std_error(inode->i_sb, err);
- return err;
-}
-
-/**
- * ext4_free_data - free a list of data blocks
- * @handle: handle for this transaction
- * @inode: inode we are dealing with
- * @this_bh: indirect buffer_head which contains *@first and *@last
- * @first: array of block numbers
- * @last: points immediately past the end of array
- *
- * We are freeing all blocks referred from that array (numbers are stored as
- * little-endian 32-bit) and updating @inode->i_blocks appropriately.
- *
- * We accumulate contiguous runs of blocks to free. Conveniently, if these
- * blocks are contiguous then releasing them at one time will only affect one
- * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
- * actually use a lot of journal space.
- *
- * @this_bh will be %NULL if @first and @last point into the inode's direct
- * block pointers.
- */
-static void ext4_free_data(handle_t *handle, struct inode *inode,
- struct buffer_head *this_bh,
- __le32 *first, __le32 *last)
-{
- ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
- unsigned long count = 0; /* Number of blocks in the run */
- __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
- corresponding to
- block_to_free */
- ext4_fsblk_t nr; /* Current block # */
- __le32 *p; /* Pointer into inode/ind
- for current block */
- int err = 0;
-
- if (this_bh) { /* For indirect block */
- BUFFER_TRACE(this_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, this_bh);
- /* Important: if we can't update the indirect pointers
- * to the blocks, we can't free them. */
- if (err)
- return;
- }
-
- for (p = first; p < last; p++) {
- nr = le32_to_cpu(*p);
- if (nr) {
- /* accumulate blocks to free if they're contiguous */
- if (count == 0) {
- block_to_free = nr;
- block_to_free_p = p;
- count = 1;
- } else if (nr == block_to_free + count) {
- count++;
- } else {
- err = ext4_clear_blocks(handle, inode, this_bh,
- block_to_free, count,
- block_to_free_p, p);
- if (err)
- break;
- block_to_free = nr;
- block_to_free_p = p;
- count = 1;
- }
- }
- }
-
- if (!err && count > 0)
- err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
- count, block_to_free_p, p);
- if (err < 0)
- /* fatal error */
- return;
-
- if (this_bh) {
- BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
-
- /*
- * The buffer head should have an attached journal head at this
- * point. However, if the data is corrupted and an indirect
- * block pointed to itself, it would have been detached when
- * the block was cleared. Check for this instead of OOPSing.
- */
- if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
- ext4_handle_dirty_metadata(handle, inode, this_bh);
- else
- EXT4_ERROR_INODE(inode,
- "circular indirect block detected at "
- "block %llu",
- (unsigned long long) this_bh->b_blocknr);
- }
-}
-
-/**
- * ext4_free_branches - free an array of branches
- * @handle: JBD handle for this transaction
- * @inode: inode we are dealing with
- * @parent_bh: the buffer_head which contains *@first and *@last
- * @first: array of block numbers
- * @last: pointer immediately past the end of array
- * @depth: depth of the branches to free
- *
- * We are freeing all blocks referred from these branches (numbers are
- * stored as little-endian 32-bit) and updating @inode->i_blocks
- * appropriately.
- */
-static void ext4_free_branches(handle_t *handle, struct inode *inode,
- struct buffer_head *parent_bh,
- __le32 *first, __le32 *last, int depth)
-{
- ext4_fsblk_t nr;
- __le32 *p;
-
- if (ext4_handle_is_aborted(handle))
- return;
-
- if (depth--) {
- struct buffer_head *bh;
- int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- p = last;
- while (--p >= first) {
- nr = le32_to_cpu(*p);
- if (!nr)
- continue; /* A hole */
-
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- nr, 1)) {
- EXT4_ERROR_INODE(inode,
- "invalid indirect mapped "
- "block %lu (level %d)",
- (unsigned long) nr, depth);
- break;
- }
-
- /* Go read the buffer for the next level down */
- bh = sb_bread(inode->i_sb, nr);
-
- /*
- * A read failure? Report error and clear slot
- * (should be rare).
- */
- if (!bh) {
- EXT4_ERROR_INODE_BLOCK(inode, nr,
- "Read failure");
- continue;
- }
-
- /* This zaps the entire block. Bottom up. */
- BUFFER_TRACE(bh, "free child branches");
- ext4_free_branches(handle, inode, bh,
- (__le32 *) bh->b_data,
- (__le32 *) bh->b_data + addr_per_block,
- depth);
- brelse(bh);
-
- /*
- * Everything below this this pointer has been
- * released. Now let this top-of-subtree go.
- *
- * We want the freeing of this indirect block to be
- * atomic in the journal with the updating of the
- * bitmap block which owns it. So make some room in
- * the journal.
- *
- * We zero the parent pointer *after* freeing its
- * pointee in the bitmaps, so if extend_transaction()
- * for some reason fails to put the bitmap changes and
- * the release into the same transaction, recovery
- * will merely complain about releasing a free block,
- * rather than leaking blocks.
- */
- if (ext4_handle_is_aborted(handle))
- return;
- if (try_to_extend_transaction(handle, inode)) {
- ext4_mark_inode_dirty(handle, inode);
- ext4_truncate_restart_trans(handle, inode,
- blocks_for_truncate(inode));
- }
-
- /*
- * The forget flag here is critical because if
- * we are journaling (and not doing data
- * journaling), we have to make sure a revoke
- * record is written to prevent the journal
- * replay from overwriting the (former)
- * indirect block if it gets reallocated as a
- * data block. This must happen in the same
- * transaction where the data blocks are
- * actually freed.
- */
- ext4_free_blocks(handle, inode, NULL, nr, 1,
- EXT4_FREE_BLOCKS_METADATA|
- EXT4_FREE_BLOCKS_FORGET);
-
- if (parent_bh) {
- /*
- * The block which we have just freed is
- * pointed to by an indirect block: journal it
- */
- BUFFER_TRACE(parent_bh, "get_write_access");
- if (!ext4_journal_get_write_access(handle,
- parent_bh)){
- *p = 0;
- BUFFER_TRACE(parent_bh,
- "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle,
- inode,
- parent_bh);
- }
- }
- }
- } else {
- /* We have reached the bottom of the tree. */
- BUFFER_TRACE(parent_bh, "free data blocks");
- ext4_free_data(handle, inode, parent_bh, first, last);
- }
-}
-
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
@@ -4476,19 +3129,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
*/
void ext4_truncate(struct inode *inode)
{
- handle_t *handle;
- struct ext4_inode_info *ei = EXT4_I(inode);
- __le32 *i_data = ei->i_data;
- int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- struct address_space *mapping = inode->i_mapping;
- ext4_lblk_t offsets[4];
- Indirect chain[4];
- Indirect *partial;
- __le32 nr = 0;
- int n = 0;
- ext4_lblk_t last_block, max_block;
- unsigned blocksize = inode->i_sb->s_blocksize;
-
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
@@ -4499,149 +3139,11 @@ void ext4_truncate(struct inode *inode)
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ext4_ext_truncate(inode);
- trace_ext4_truncate_exit(inode);
- return;
- }
-
- handle = start_transaction(inode);
- if (IS_ERR(handle))
- return; /* AKPM: return what? */
-
- last_block = (inode->i_size + blocksize-1)
- >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
- max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
- >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
-
- if (inode->i_size & (blocksize - 1))
- if (ext4_block_truncate_page(handle, mapping, inode->i_size))
- goto out_stop;
-
- if (last_block != max_block) {
- n = ext4_block_to_path(inode, last_block, offsets, NULL);
- if (n == 0)
- goto out_stop; /* error */
- }
-
- /*
- * OK. This truncate is going to happen. We add the inode to the
- * orphan list, so that if this truncate spans multiple transactions,
- * and we crash, we will resume the truncate when the filesystem
- * recovers. It also marks the inode dirty, to catch the new size.
- *
- * Implication: the file must always be in a sane, consistent
- * truncatable state while each transaction commits.
- */
- if (ext4_orphan_add(handle, inode))
- goto out_stop;
-
- /*
- * From here we block out all ext4_get_block() callers who want to
- * modify the block allocation tree.
- */
- down_write(&ei->i_data_sem);
-
- ext4_discard_preallocations(inode);
-
- /*
- * The orphan list entry will now protect us from any crash which
- * occurs before the truncate completes, so it is now safe to propagate
- * the new, shorter inode size (held for now in i_size) into the
- * on-disk inode. We do this via i_disksize, which is the value which
- * ext4 *really* writes onto the disk inode.
- */
- ei->i_disksize = inode->i_size;
-
- if (last_block == max_block) {
- /*
- * It is unnecessary to free any data blocks if last_block is
- * equal to the indirect block limit.
- */
- goto out_unlock;
- } else if (n == 1) { /* direct blocks */
- ext4_free_data(handle, inode, NULL, i_data+offsets[0],
- i_data + EXT4_NDIR_BLOCKS);
- goto do_indirects;
- }
-
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
- /* Kill the top of shared branch (not detached) */
- if (nr) {
- if (partial == chain) {
- /* Shared branch grows from the inode */
- ext4_free_branches(handle, inode, NULL,
- &nr, &nr+1, (chain+n-1) - partial);
- *partial->p = 0;
- /*
- * We mark the inode dirty prior to restart,
- * and prior to stop. No need for it here.
- */
- } else {
- /* Shared branch grows from an indirect block */
- BUFFER_TRACE(partial->bh, "get_write_access");
- ext4_free_branches(handle, inode, partial->bh,
- partial->p,
- partial->p+1, (chain+n-1) - partial);
- }
- }
- /* Clear the ends of indirect blocks on the shared branch */
- while (partial > chain) {
- ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
- (__le32*)partial->bh->b_data+addr_per_block,
- (chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- partial--;
- }
-do_indirects:
- /* Kill the remaining (whole) subtrees */
- switch (offsets[0]) {
- default:
- nr = i_data[EXT4_IND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
- i_data[EXT4_IND_BLOCK] = 0;
- }
- case EXT4_IND_BLOCK:
- nr = i_data[EXT4_DIND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
- i_data[EXT4_DIND_BLOCK] = 0;
- }
- case EXT4_DIND_BLOCK:
- nr = i_data[EXT4_TIND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
- i_data[EXT4_TIND_BLOCK] = 0;
- }
- case EXT4_TIND_BLOCK:
- ;
- }
-
-out_unlock:
- up_write(&ei->i_data_sem);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
-
- /*
- * In a multi-transaction truncate, we only make the final transaction
- * synchronous
- */
- if (IS_SYNC(inode))
- ext4_handle_sync(handle);
-out_stop:
- /*
- * If this was a simple ftruncate(), and the file will remain alive
- * then we need to clear up the orphan record which we created above.
- * However, if this was a real unlink then we were called by
- * ext4_delete_inode(), and we allow that function to clean up the
- * orphan info for us.
- */
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
+ else
+ ext4_ind_truncate(inode);
- ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
@@ -5012,7 +3514,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))) {
/* Validate block references which are part of inode */
- ret = ext4_check_inode_blockref(inode);
+ ret = ext4_ind_check_inode(inode);
}
if (ret)
goto bad_inode;
@@ -5459,34 +3961,10 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
- int chunk)
-{
- int indirects;
-
- /* if nrblocks are contiguous */
- if (chunk) {
- /*
- * With N contiguous data blocks, we need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
- * 2 dindirect blocks, and 1 tindirect block
- */
- return DIV_ROUND_UP(nrblocks,
- EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
- }
- /*
- * if nrblocks are not contiguous, worse case, each block touch
- * a indirect block, and each indirect block touch a double indirect
- * block, plus a triple indirect block
- */
- indirects = nrblocks * 2 + 1;
- return indirects;
-}
-
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
+ return ext4_ind_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 808c554e773f..f18bfe37aff8 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -202,8 +202,9 @@ setversion_out:
struct super_block *sb = inode->i_sb;
int err, err2=0;
- if (!capable(CAP_SYS_RESOURCE))
- return -EPERM;
+ err = ext4_resize_begin(sb);
+ if (err)
+ return err;
if (get_user(n_blocks_count, (__u32 __user *)arg))
return -EFAULT;
@@ -221,6 +222,7 @@ setversion_out:
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
+ ext4_resize_end(sb);
return err;
}
@@ -271,8 +273,9 @@ mext_out:
struct super_block *sb = inode->i_sb;
int err, err2=0;
- if (!capable(CAP_SYS_RESOURCE))
- return -EPERM;
+ err = ext4_resize_begin(sb);
+ if (err)
+ return err;
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
sizeof(input)))
@@ -291,6 +294,7 @@ mext_out:
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
+ ext4_resize_end(sb);
return err;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 6ed859d56850..17a5a57c415a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -75,8 +75,8 @@
*
* The inode preallocation space is used looking at the _logical_ start
* block. If only the logical file block falls within the range of prealloc
- * space we will consume the particular prealloc space. This make sure that
- * that the we have contiguous physical blocks representing the file blocks
+ * space we will consume the particular prealloc space. This makes sure that
+ * we have contiguous physical blocks representing the file blocks
*
* The important thing to be noted in case of inode prealloc space is that
* we don't modify the values associated to inode prealloc space except
@@ -84,7 +84,7 @@
*
* If we are not able to find blocks in the inode prealloc space and if we
* have the group allocation flag set then we look at the locality group
- * prealloc space. These are per CPU prealloc list repreasented as
+ * prealloc space. These are per CPU prealloc list represented as
*
* ext4_sb_info.s_locality_groups[smp_processor_id()]
*
@@ -128,12 +128,13 @@
* we are doing a group prealloc we try to normalize the request to
* sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
* 512 blocks. This can be tuned via
- * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
+ * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
* terms of number of blocks. If we have mounted the file system with -O
* stripe=<value> option the group prealloc request is normalized to the
- * stripe value (sbi->s_stripe)
+ * the smallest multiple of the stripe value (sbi->s_stripe) which is
+ * greater than the default mb_group_prealloc.
*
- * The regular allocator(using the buddy cache) supports few tunables.
+ * The regular allocator (using the buddy cache) supports a few tunables.
*
* /sys/fs/ext4/<partition>/mb_min_to_scan
* /sys/fs/ext4/<partition>/mb_max_to_scan
@@ -152,7 +153,7 @@
* best extent in the found extents. Searching for the blocks starts with
* the group specified as the goal value in allocation context via
* ac_g_ex. Each group is first checked based on the criteria whether it
- * can used for allocation. ext4_mb_good_group explains how the groups are
+ * can be used for allocation. ext4_mb_good_group explains how the groups are
* checked.
*
* Both the prealloc space are getting populated as above. So for the first
@@ -492,10 +493,11 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
b2 = (unsigned char *) bitmap;
for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
if (b1[i] != b2[i]) {
- printk(KERN_ERR "corruption in group %u "
- "at byte %u(%u): %x in copy != %x "
- "on disk/prealloc\n",
- e4b->bd_group, i, i * 8, b1[i], b2[i]);
+ ext4_msg(e4b->bd_sb, KERN_ERR,
+ "corruption in group %u "
+ "at byte %u(%u): %x in copy != %x "
+ "on disk/prealloc",
+ e4b->bd_group, i, i * 8, b1[i], b2[i]);
BUG();
}
}
@@ -1125,7 +1127,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
grp = ext4_get_group_info(sb, group);
e4b->bd_blkbits = sb->s_blocksize_bits;
- e4b->bd_info = ext4_get_group_info(sb, group);
+ e4b->bd_info = grp;
e4b->bd_sb = sb;
e4b->bd_group = group;
e4b->bd_buddy_page = NULL;
@@ -1281,7 +1283,7 @@ static void mb_clear_bits(void *bm, int cur, int len)
}
}
-static void mb_set_bits(void *bm, int cur, int len)
+void ext4_set_bits(void *bm, int cur, int len)
{
__u32 *addr;
@@ -1510,7 +1512,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
}
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
- mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
+ ext4_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
mb_check_buddy(e4b);
return ret;
@@ -2223,8 +2225,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
EXT4_DESC_PER_BLOCK_BITS(sb);
meta_group_info = kmalloc(metalen, GFP_KERNEL);
if (meta_group_info == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
- "buddy group\n");
+ ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate mem "
+ "for a buddy group");
goto exit_meta_group_info;
}
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
@@ -2237,7 +2239,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
if (meta_group_info[i] == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
+ ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate buddy mem");
goto exit_group_info;
}
memset(meta_group_info[i], 0, kmem_cache_size(cachep));
@@ -2279,8 +2281,10 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info:
/* If a meta_group_info table has been allocated, release it now */
- if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
+ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+ sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
+ }
exit_meta_group_info:
return -ENOMEM;
} /* ext4_mb_add_groupinfo */
@@ -2328,23 +2332,26 @@ static int ext4_mb_init_backend(struct super_block *sb)
/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
* kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
* So a two level scheme suffices for now. */
- sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
+ sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
if (sbi->s_group_info == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
+ ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
}
sbi->s_buddy_cache = new_inode(sb);
if (sbi->s_buddy_cache == NULL) {
- printk(KERN_ERR "EXT4-fs: can't get new inode\n");
+ ext4_msg(sb, KERN_ERR, "can't get new inode");
goto err_freesgi;
}
- sbi->s_buddy_cache->i_ino = get_next_ino();
+ /* To avoid potentially colliding with an valid on-disk inode number,
+ * use EXT4_BAD_INO for the buddy cache inode number. This inode is
+ * not in the inode hash, so it should never be found by iget(), but
+ * this will avoid confusion if it ever shows up during debugging. */
+ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
for (i = 0; i < ngroups; i++) {
desc = ext4_get_group_desc(sb, i, NULL);
if (desc == NULL) {
- printk(KERN_ERR
- "EXT4-fs: can't read descriptor %u\n", i);
+ ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
goto err_freebuddy;
}
if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
@@ -2362,7 +2369,7 @@ err_freebuddy:
kfree(sbi->s_group_info[i]);
iput(sbi->s_buddy_cache);
err_freesgi:
- kfree(sbi->s_group_info);
+ ext4_kvfree(sbi->s_group_info);
return -ENOMEM;
}
@@ -2404,14 +2411,15 @@ static int ext4_groupinfo_create_slab(size_t size)
slab_size, 0, SLAB_RECLAIM_ACCOUNT,
NULL);
+ ext4_groupinfo_caches[cache_index] = cachep;
+
mutex_unlock(&ext4_grpinfo_slab_create_mutex);
if (!cachep) {
- printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
+ printk(KERN_EMERG
+ "EXT4-fs: no memory for groupinfo slab cache\n");
return -ENOMEM;
}
- ext4_groupinfo_caches[cache_index] = cachep;
-
return 0;
}
@@ -2457,12 +2465,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
i++;
} while (i <= sb->s_blocksize_bits + 1);
- /* init file for buddy data */
- ret = ext4_mb_init_backend(sb);
- if (ret != 0) {
- goto out;
- }
-
spin_lock_init(&sbi->s_md_lock);
spin_lock_init(&sbi->s_bal_lock);
@@ -2472,6 +2474,18 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
+ /*
+ * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
+ * to the lowest multiple of s_stripe which is bigger than
+ * the s_mb_group_prealloc as determined above. We want
+ * the preallocation size to be an exact multiple of the
+ * RAID stripe size so that preallocations don't fragment
+ * the stripes.
+ */
+ if (sbi->s_stripe > 1) {
+ sbi->s_mb_group_prealloc = roundup(
+ sbi->s_mb_group_prealloc, sbi->s_stripe);
+ }
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
if (sbi->s_locality_groups == NULL) {
@@ -2487,6 +2501,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
spin_lock_init(&lg->lg_prealloc_lock);
}
+ /* init file for buddy data */
+ ret = ext4_mb_init_backend(sb);
+ if (ret != 0) {
+ goto out;
+ }
+
if (sbi->s_proc)
proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_fops, sb);
@@ -2544,32 +2564,32 @@ int ext4_mb_release(struct super_block *sb)
EXT4_DESC_PER_BLOCK_BITS(sb);
for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]);
- kfree(sbi->s_group_info);
+ ext4_kvfree(sbi->s_group_info);
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
if (sbi->s_buddy_cache)
iput(sbi->s_buddy_cache);
if (sbi->s_mb_stats) {
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u blocks %u reqs (%u success)",
atomic_read(&sbi->s_bal_allocated),
atomic_read(&sbi->s_bal_reqs),
atomic_read(&sbi->s_bal_success));
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
- "%u 2^N hits, %u breaks, %u lost\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u extents scanned, %u goal hits, "
+ "%u 2^N hits, %u breaks, %u lost",
atomic_read(&sbi->s_bal_ex_scanned),
atomic_read(&sbi->s_bal_goals),
atomic_read(&sbi->s_bal_2orders),
atomic_read(&sbi->s_bal_breaks),
atomic_read(&sbi->s_mb_lost_chunks));
- printk(KERN_INFO
- "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
- sbi->s_mb_buddies_generated++,
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %lu generated and it took %Lu",
+ sbi->s_mb_buddies_generated,
sbi->s_mb_generation_time);
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u preallocated, %u discarded",
atomic_read(&sbi->s_mb_preallocated),
atomic_read(&sbi->s_mb_discarded));
}
@@ -2628,6 +2648,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
rb_erase(&entry->node, &(db->bb_free_root));
mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
+ /*
+ * Clear the trimmed flag for the group so that the next
+ * ext4_trim_fs can trim it.
+ * If the volume is mounted with -o discard, online discard
+ * is supported and the free blocks will be trimmed online.
+ */
+ if (!test_opt(sb, DISCARD))
+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
+
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
@@ -2771,8 +2800,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
* We leak some of the blocks here.
*/
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
- mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
- ac->ac_b_ex.fe_len);
+ ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
@@ -2790,7 +2819,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
}
}
#endif
- mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
+ ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len);
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_blks_set(sb, gdp,
@@ -2830,8 +2860,9 @@ out_err:
/*
* here we normalize request for locality group
- * Group request are normalized to s_strip size if we set the same via mount
- * option. If not we set it to s_mb_group_prealloc which can be configured via
+ * Group request are normalized to s_mb_group_prealloc, which goes to
+ * s_strip if we set the same via mount option.
+ * s_mb_group_prealloc can be configured via
* /sys/fs/ext4/<partition>/mb_group_prealloc
*
* XXX: should we try to preallocate more than the group has now?
@@ -2842,10 +2873,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
struct ext4_locality_group *lg = ac->ac_lg;
BUG_ON(lg == NULL);
- if (EXT4_SB(sb)->s_stripe)
- ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
- else
- ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
+ ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
mb_debug(1, "#%u: goal %u blocks for locality group\n",
current->pid, ac->ac_g_ex.fe_len);
}
@@ -3001,9 +3029,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
if (start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical) {
- printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
- (unsigned long) start, (unsigned long) size,
- (unsigned long) ac->ac_o_ex.fe_logical);
+ ext4_msg(ac->ac_sb, KERN_ERR,
+ "start %lu, size %lu, fe_logical %lu",
+ (unsigned long) start, (unsigned long) size,
+ (unsigned long) ac->ac_o_ex.fe_logical);
}
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical);
@@ -3262,7 +3291,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
while (n) {
entry = rb_entry(n, struct ext4_free_data, node);
- mb_set_bits(bitmap, entry->start_blk, entry->count);
+ ext4_set_bits(bitmap, entry->start_blk, entry->count);
n = rb_next(n);
}
return;
@@ -3304,7 +3333,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
if (unlikely(len == 0))
continue;
BUG_ON(groupnr != group);
- mb_set_bits(bitmap, start, len);
+ ext4_set_bits(bitmap, start, len);
preallocated += len;
count++;
}
@@ -3584,10 +3613,11 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
bit = next + 1;
}
if (free != pa->pa_free) {
- printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
- pa, (unsigned long) pa->pa_lstart,
- (unsigned long) pa->pa_pstart,
- (unsigned long) pa->pa_len);
+ ext4_msg(e4b->bd_sb, KERN_CRIT,
+ "pa %p: logic %lu, phys. %lu, len %lu",
+ pa, (unsigned long) pa->pa_lstart,
+ (unsigned long) pa->pa_pstart,
+ (unsigned long) pa->pa_len);
ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
free, pa->pa_free);
/*
@@ -3775,7 +3805,8 @@ repeat:
* use preallocation while we're discarding it */
spin_unlock(&pa->pa_lock);
spin_unlock(&ei->i_prealloc_lock);
- printk(KERN_ERR "uh-oh! used pa while discarding\n");
+ ext4_msg(sb, KERN_ERR,
+ "uh-oh! used pa while discarding");
WARN_ON(1);
schedule_timeout_uninterruptible(HZ);
goto repeat;
@@ -3852,12 +3883,13 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
- printk(KERN_ERR "EXT4-fs: Can't allocate:"
- " Allocation context details:\n");
- printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: Can't allocate:"
+ " Allocation context details:");
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: status %d flags %d",
ac->ac_status, ac->ac_flags);
- printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
- "best %lu/%lu/%lu@%lu cr %d\n",
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: orig %lu/%lu/%lu@%lu, "
+ "goal %lu/%lu/%lu@%lu, "
+ "best %lu/%lu/%lu@%lu cr %d",
(unsigned long)ac->ac_o_ex.fe_group,
(unsigned long)ac->ac_o_ex.fe_start,
(unsigned long)ac->ac_o_ex.fe_len,
@@ -3871,9 +3903,9 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(unsigned long)ac->ac_b_ex.fe_len,
(unsigned long)ac->ac_b_ex.fe_logical,
(int)ac->ac_criteria);
- printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
- ac->ac_found);
- printk(KERN_ERR "EXT4-fs: groups: \n");
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: %lu scanned, %d found",
+ ac->ac_ex_scanned, ac->ac_found);
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: groups: ");
ngroups = ext4_get_groups_count(sb);
for (i = 0; i < ngroups; i++) {
struct ext4_group_info *grp = ext4_get_group_info(sb, i);
@@ -4637,7 +4669,7 @@ do_more:
}
ext4_mark_super_dirty(sb);
error_return:
- if (freed)
+ if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
@@ -4645,7 +4677,7 @@ error_return:
}
/**
- * ext4_add_groupblocks() -- Add given blocks to an existing group
+ * ext4_group_add_blocks() -- Add given blocks to an existing group
* @handle: handle to this transaction
* @sb: super block
* @block: start physcial block to add to the block group
@@ -4653,7 +4685,7 @@ error_return:
*
* This marks the blocks as free in the bitmap and buddy.
*/
-void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count)
{
struct buffer_head *bitmap_bh = NULL;
@@ -4666,25 +4698,35 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
struct ext4_buddy e4b;
int err = 0, ret, blk_free_count;
ext4_grpblk_t blocks_freed;
- struct ext4_group_info *grp;
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
+ if (count == 0)
+ return 0;
+
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
- grp = ext4_get_group_info(sb, block_group);
/*
* Check to see if we are freeing blocks across a group
* boundary.
*/
- if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
+ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_warning(sb, "too much blocks added to group %u\n",
+ block_group);
+ err = -EINVAL;
goto error_return;
+ }
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
- if (!bitmap_bh)
+ if (!bitmap_bh) {
+ err = -EIO;
goto error_return;
+ }
+
desc = ext4_get_group_desc(sb, block_group, &gd_bh);
- if (!desc)
+ if (!desc) {
+ err = -EIO;
goto error_return;
+ }
if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
in_range(ext4_inode_bitmap(sb, desc), block, count) ||
@@ -4694,6 +4736,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
ext4_error(sb, "Adding blocks in system zones - "
"Block = %llu, count = %lu",
block, count);
+ err = -EINVAL;
goto error_return;
}
@@ -4762,7 +4805,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, err);
- return;
+ return err;
}
/**
@@ -4782,6 +4825,8 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
{
struct ext4_free_extent ex;
+ trace_ext4_trim_extent(sb, group, start, count);
+
assert_spin_locked(ext4_group_lock_ptr(sb, group));
ex.fe_start = start;
@@ -4802,7 +4847,7 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
/**
* ext4_trim_all_free -- function to trim all free space in alloc. group
* @sb: super block for file system
- * @e4b: ext4 buddy
+ * @group: group to be trimmed
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
@@ -4823,10 +4868,12 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t minblocks)
{
void *bitmap;
- ext4_grpblk_t next, count = 0;
+ ext4_grpblk_t next, count = 0, free_count = 0;
struct ext4_buddy e4b;
int ret;
+ trace_ext4_trim_all_free(sb, group, start, max);
+
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
ext4_error(sb, "Error in loading buddy "
@@ -4836,6 +4883,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
bitmap = e4b.bd_bitmap;
ext4_lock_group(sb, group);
+ if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
+ minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
+ goto out;
+
start = (e4b.bd_info->bb_first_free > start) ?
e4b.bd_info->bb_first_free : start;
@@ -4850,6 +4901,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
next - start, group, &e4b);
count += next - start;
}
+ free_count += next - start;
start = next + 1;
if (fatal_signal_pending(current)) {
@@ -4863,9 +4915,13 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
}
- if ((e4b.bd_info->bb_free - count) < minblocks)
+ if ((e4b.bd_info->bb_free - free_count) < minblocks)
break;
}
+
+ if (!ret)
+ EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -4904,6 +4960,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
return -EINVAL;
+ if (start + len <= first_data_blk)
+ goto out;
if (start < first_data_blk) {
len -= first_data_blk - start;
start = first_data_blk;
@@ -4952,5 +5010,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
range->len = trimmed * sb->s_blocksize;
+ if (!ret)
+ atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
+
+out:
return ret;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 20b5e7bfebd1..9d4a636b546c 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -187,7 +187,6 @@ struct ext4_allocation_context {
__u16 ac_flags; /* allocation hints */
__u8 ac_status;
__u8 ac_criteria;
- __u8 ac_repeats;
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8c9babac43dc..565a154e22d4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -289,7 +289,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
while (len--) printk("%c", *name++);
ext4fs_dirhash(de->name, de->name_len, &h);
printk(":%x.%u ", h.hash,
- ((char *) de - base));
+ (unsigned) ((char *) de - base));
}
space += EXT4_DIR_REC_LEN(de->name_len);
names++;
@@ -1013,7 +1013,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
*err = -ENOENT;
errout:
- dxtrace(printk(KERN_DEBUG "%s not found\n", name));
+ dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
dx_release (frames);
return NULL;
}
@@ -1985,18 +1985,11 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
if (!list_empty(&EXT4_I(inode)->i_orphan))
goto out_unlock;
- /* Orphan handling is only valid for files with data blocks
- * being truncated, or files being unlinked. */
-
- /* @@@ FIXME: Observation from aviro:
- * I think I can trigger J_ASSERT in ext4_orphan_add(). We block
- * here (on s_orphan_lock), so race with ext4_link() which might bump
- * ->i_nlink. For, say it, character device. Not a regular file,
- * not a directory, not a symlink and ->i_nlink > 0.
- *
- * tytso, 4/25/2009: I'm not sure how that could happen;
- * shouldn't the fs core protect us from these sort of
- * unlink()/link() races?
+ /*
+ * Orphan handling is only valid for files with data blocks
+ * being truncated, or files being unlinked. Note that we either
+ * hold i_mutex, or the inode can not be referenced from outside,
+ * so i_nlink should not be bumped due to race
*/
J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7bb8f76d470a..430c401d0895 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -285,11 +285,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end)
return -ENOMEM;
- do {
- bio = bio_alloc(GFP_NOIO, nvecs);
- nvecs >>= 1;
- } while (bio == NULL);
-
+ bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_private = io->io_end = io_end;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 80bbc9c60c24..707d3f16f7ce 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -16,6 +16,35 @@
#include "ext4_jbd2.h"
+int ext4_resize_begin(struct super_block *sb)
+{
+ int ret = 0;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ /*
+ * We are not allowed to do online-resizing on a filesystem mounted
+ * with error, because it can destroy the filesystem easily.
+ */
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ ext4_warning(sb, "There are errors in the filesystem, "
+ "so online resizing is not allowed\n");
+ return -EPERM;
+ }
+
+ if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
+ ret = -EBUSY;
+
+ return ret;
+}
+
+void ext4_resize_end(struct super_block *sb)
+{
+ clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
+ smp_mb__after_clear_bit();
+}
+
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
@@ -118,10 +147,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
brelse(bh);
bh = ERR_PTR(err);
} else {
- lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
- unlock_buffer(bh);
}
return bh;
@@ -132,8 +159,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for block_bitmap modifications.
*/
-static int extend_or_restart_transaction(handle_t *handle, int thresh,
- struct buffer_head *bh)
+static int extend_or_restart_transaction(handle_t *handle, int thresh)
{
int err;
@@ -144,9 +170,8 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
if (err < 0)
return err;
if (err) {
- if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
- return err;
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
+ if (err)
return err;
}
@@ -181,21 +206,7 @@ static int setup_new_group_blocks(struct super_block *sb,
if (IS_ERR(handle))
return PTR_ERR(handle);
- mutex_lock(&sbi->s_resize_lock);
- if (input->group != sbi->s_groups_count) {
- err = -EBUSY;
- goto exit_journal;
- }
-
- if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
- err = PTR_ERR(bh);
- goto exit_journal;
- }
-
- if (ext4_bg_has_super(sb, input->group)) {
- ext4_debug("mark backup superblock %#04llx (+0)\n", start);
- ext4_set_bit(0, bh->b_data);
- }
+ BUG_ON(input->group != sbi->s_groups_count);
/* Copy all of the GDT blocks into the backup in this group */
for (i = 0, bit = 1, block = start + 1;
@@ -203,29 +214,26 @@ static int setup_new_group_blocks(struct super_block *sb,
struct buffer_head *gdb;
ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
-
- if ((err = extend_or_restart_transaction(handle, 1, bh)))
- goto exit_bh;
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ goto exit_journal;
gdb = sb_getblk(sb, block);
if (!gdb) {
err = -EIO;
- goto exit_bh;
+ goto exit_journal;
}
if ((err = ext4_journal_get_write_access(handle, gdb))) {
brelse(gdb);
- goto exit_bh;
+ goto exit_journal;
}
- lock_buffer(gdb);
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
- unlock_buffer(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
if (unlikely(err)) {
brelse(gdb);
- goto exit_bh;
+ goto exit_journal;
}
- ext4_set_bit(bit, bh->b_data);
brelse(gdb);
}
@@ -235,9 +243,22 @@ static int setup_new_group_blocks(struct super_block *sb,
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
GFP_NOFS);
if (err)
- goto exit_bh;
- for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++)
- ext4_set_bit(bit, bh->b_data);
+ goto exit_journal;
+
+ err = extend_or_restart_transaction(handle, 2);
+ if (err)
+ goto exit_journal;
+
+ bh = bclean(handle, sb, input->block_bitmap);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ goto exit_journal;
+ }
+
+ if (ext4_bg_has_super(sb, input->group)) {
+ ext4_debug("mark backup group tables %#04llx (+0)\n", start);
+ ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
+ }
ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
input->block_bitmap - start);
@@ -253,12 +274,9 @@ static int setup_new_group_blocks(struct super_block *sb,
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
if (err)
goto exit_bh;
- for (i = 0, bit = input->inode_table - start;
- i < sbi->s_itb_per_group; i++, bit++)
- ext4_set_bit(bit, bh->b_data);
+ ext4_set_bits(bh->b_data, input->inode_table - start,
+ sbi->s_itb_per_group);
- if ((err = extend_or_restart_transaction(handle, 2, bh)))
- goto exit_bh;
ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
bh->b_data);
@@ -285,7 +303,6 @@ exit_bh:
brelse(bh);
exit_journal:
- mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
@@ -377,15 +394,15 @@ static int verify_reserved_gdb(struct super_block *sb,
* fail once we start modifying the data on disk, because JBD has no rollback.
*/
static int add_new_gdb(handle_t *handle, struct inode *inode,
- struct ext4_new_group_data *input,
- struct buffer_head **primary)
+ ext4_group_t group)
{
struct super_block *sb = inode->i_sb;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
+ unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
struct buffer_head **o_group_desc, **n_group_desc;
struct buffer_head *dind;
+ struct buffer_head *gdb_bh;
int gdbackups;
struct ext4_iloc iloc;
__le32 *data;
@@ -408,11 +425,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return -EPERM;
}
- *primary = sb_bread(sb, gdblock);
- if (!*primary)
+ gdb_bh = sb_bread(sb, gdblock);
+ if (!gdb_bh)
return -EIO;
- if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
+ gdbackups = verify_reserved_gdb(sb, gdb_bh);
+ if (gdbackups < 0) {
err = gdbackups;
goto exit_bh;
}
@@ -427,7 +445,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
ext4_warning(sb, "new group %u GDT block %llu not reserved",
- input->group, gdblock);
+ group, gdblock);
err = -EINVAL;
goto exit_dind;
}
@@ -436,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (unlikely(err))
goto exit_dind;
- err = ext4_journal_get_write_access(handle, *primary);
+ err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
goto exit_sbh;
@@ -449,12 +467,13 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (unlikely(err))
goto exit_dindj;
- n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
- GFP_NOFS);
+ n_group_desc = ext4_kvmalloc((gdb_num + 1) *
+ sizeof(struct buffer_head *),
+ GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
- ext4_warning(sb,
- "not enough memory for %lu groups", gdb_num + 1);
+ ext4_warning(sb, "not enough memory for %lu groups",
+ gdb_num + 1);
goto exit_inode;
}
@@ -475,8 +494,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
}
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
ext4_mark_iloc_dirty(handle, inode, &iloc);
- memset((*primary)->b_data, 0, sb->s_blocksize);
- err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+ memset(gdb_bh->b_data, 0, sb->s_blocksize);
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
goto exit_inode;
@@ -486,10 +505,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
o_group_desc = EXT4_SB(sb)->s_group_desc;
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
- n_group_desc[gdb_num] = *primary;
+ n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
- kfree(o_group_desc);
+ ext4_kvfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
@@ -499,6 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
+ ext4_kvfree(n_group_desc);
/* ext4_handle_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
exit_dindj:
@@ -508,7 +528,7 @@ exit_sbh:
exit_dind:
brelse(dind);
exit_bh:
- brelse(*primary);
+ brelse(gdb_bh);
ext4_debug("leaving with error %d\n", err);
return err;
@@ -528,7 +548,7 @@ exit_bh:
* backup GDT blocks are stored in their reserved primary GDT block.
*/
static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
- struct ext4_new_group_data *input)
+ ext4_group_t group)
{
struct super_block *sb = inode->i_sb;
int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
@@ -599,7 +619,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
* Finally we can add each of the reserved backup GDT blocks from
* the new group to its reserved primary GDT block.
*/
- blk = input->group * EXT4_BLOCKS_PER_GROUP(sb);
+ blk = group * EXT4_BLOCKS_PER_GROUP(sb);
for (i = 0; i < reserved_gdb; i++) {
int err2;
data = (__le32 *)primary[i]->b_data;
@@ -799,13 +819,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
goto exit_put;
}
- mutex_lock(&sbi->s_resize_lock);
- if (input->group != sbi->s_groups_count) {
- ext4_warning(sb, "multiple resizers run on filesystem!");
- err = -EBUSY;
- goto exit_journal;
- }
-
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
@@ -820,16 +833,25 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
if ((err = ext4_journal_get_write_access(handle, primary)))
goto exit_journal;
- if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
- (err = reserve_backup_gdb(handle, inode, input)))
+ if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
+ err = reserve_backup_gdb(handle, inode, input->group);
+ if (err)
+ goto exit_journal;
+ }
+ } else {
+ /*
+ * Note that we can access new group descriptor block safely
+ * only if add_new_gdb() succeeds.
+ */
+ err = add_new_gdb(handle, inode, input->group);
+ if (err)
goto exit_journal;
- } else if ((err = add_new_gdb(handle, inode, input, &primary)))
- goto exit_journal;
+ primary = sbi->s_group_desc[gdb_num];
+ }
/*
* OK, now we've set up the new group. Time to make it active.
*
- * We do not lock all allocations via s_resize_lock
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
@@ -886,13 +908,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
*
* The precise rules we use are:
*
- * * Writers of s_groups_count *must* hold s_resize_lock
- * AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
- * * Readers must hold s_resize_lock over the access
- * OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
*
@@ -937,10 +955,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
ext4_handle_dirty_super(handle, sb);
exit_journal:
- mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
- if (!err) {
+ if (!err && primary) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
@@ -969,16 +986,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_grpblk_t add;
struct buffer_head *bh;
handle_t *handle;
- int err;
+ int err, err2;
ext4_group_t group;
- /* We don't need to worry about locking wrt other resizers just
- * yet: we're going to revalidate es->s_blocks_count after
- * taking the s_resize_lock below. */
o_blocks_count = ext4_blocks_count(es);
if (test_opt(sb, DEBUG))
- printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n",
+ printk(KERN_DEBUG "EXT4-fs: extending last group from %llu to %llu blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
@@ -995,7 +1009,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
if (n_blocks_count < o_blocks_count) {
ext4_warning(sb, "can't shrink FS - resize aborted");
- return -EBUSY;
+ return -EINVAL;
}
/* Handle the remaining blocks in the last group only. */
@@ -1038,32 +1052,25 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
goto exit_put;
}
- mutex_lock(&EXT4_SB(sb)->s_resize_lock);
- if (o_blocks_count != ext4_blocks_count(es)) {
- ext4_warning(sb, "multiple resizers run on filesystem!");
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
- ext4_journal_stop(handle);
- err = -EBUSY;
- goto exit_put;
- }
-
if ((err = ext4_journal_get_write_access(handle,
EXT4_SB(sb)->s_sbh))) {
ext4_warning(sb, "error %d on journal write access", err);
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_journal_stop(handle);
goto exit_put;
}
ext4_blocks_count_set(es, o_blocks_count + add);
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
/* We add the blocks to the bitmap and set the group need init bit */
- ext4_add_groupblocks(handle, sb, o_blocks_count, add);
+ err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
ext4_handle_dirty_super(handle, sb);
ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
- if ((err = ext4_journal_stop(handle)))
+ err2 = ext4_journal_stop(handle);
+ if (!err && err2)
+ err = err2;
+
+ if (err)
goto exit_put;
if (test_opt(sb, DEBUG))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9ea71aa864b3..4687fea0c00f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -110,6 +110,35 @@ static struct file_system_type ext3_fs_type = {
#define IS_EXT3_SB(sb) (0)
#endif
+void *ext4_kvmalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kmalloc(size, flags);
+ if (!ret)
+ ret = __vmalloc(size, flags, PAGE_KERNEL);
+ return ret;
+}
+
+void *ext4_kvzalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kzalloc(size, flags);
+ if (!ret)
+ ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
+ return ret;
+}
+
+void ext4_kvfree(void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+
+}
+
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
@@ -269,6 +298,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
journal_t *journal;
handle_t *handle;
+ trace_ext4_journal_start(sb, nblocks, _RET_IP_);
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
@@ -789,11 +819,8 @@ static void ext4_put_super(struct super_block *sb)
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
- if (is_vmalloc_addr(sbi->s_flex_groups))
- vfree(sbi->s_flex_groups);
- else
- kfree(sbi->s_flex_groups);
+ ext4_kvfree(sbi->s_group_desc);
+ ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -1976,15 +2003,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
size = flex_group_count * sizeof(struct flex_groups);
- sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
+ sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
- sbi->s_flex_groups = vzalloc(size);
- if (sbi->s_flex_groups == NULL) {
- ext4_msg(sb, KERN_ERR,
- "not enough memory for %u flex groups",
- flex_group_count);
- goto failed;
- }
+ ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
+ flex_group_count);
+ goto failed;
}
for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2383,17 +2406,25 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
unsigned long stripe_width =
le32_to_cpu(sbi->s_es->s_raid_stripe_width);
+ int ret;
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
- return sbi->s_stripe;
-
- if (stripe_width <= sbi->s_blocks_per_group)
- return stripe_width;
+ ret = sbi->s_stripe;
+ else if (stripe_width <= sbi->s_blocks_per_group)
+ ret = stripe_width;
+ else if (stride <= sbi->s_blocks_per_group)
+ ret = stride;
+ else
+ ret = 0;
- if (stride <= sbi->s_blocks_per_group)
- return stride;
+ /*
+ * If the stripe width is 1, this makes no sense and
+ * we set it to 0 to turn off stripe handling code.
+ */
+ if (ret <= 1)
+ ret = 0;
- return 0;
+ return ret;
}
/* sysfs supprt */
@@ -3408,8 +3439,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
- sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
- GFP_KERNEL);
+ sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
goto failed_mount;
@@ -3491,7 +3523,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
- mutex_init(&sbi->s_resize_lock);
+ sbi->s_resize_flags = 0;
sb->s_root = NULL;
@@ -3741,12 +3773,8 @@ failed_mount_wq:
}
failed_mount3:
del_timer(&sbi->s_err_report);
- if (sbi->s_flex_groups) {
- if (is_vmalloc_addr(sbi->s_flex_groups))
- vfree(sbi->s_flex_groups);
- else
- kfree(sbi->s_flex_groups);
- }
+ if (sbi->s_flex_groups)
+ ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -3756,7 +3784,7 @@ failed_mount3:
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
+ ext4_kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_proc) {
remove_proc_entry(sb->s_id, ext4_proc_root);
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
new file mode 100644
index 000000000000..011ba6670d99
--- /dev/null
+++ b/fs/ext4/truncate.h
@@ -0,0 +1,43 @@
+/*
+ * linux/fs/ext4/truncate.h
+ *
+ * Common inline functions needed for truncate support
+ */
+
+/*
+ * Truncate blocks that were not used by write. We have to truncate the
+ * pagecache as well so that corresponding buffers get properly unmapped.
+ */
+static inline void ext4_truncate_failed_write(struct inode *inode)
+{
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ ext4_truncate(inode);
+}
+
+/*
+ * Work out how many blocks we need to proceed with the next chunk of a
+ * truncate transaction.
+ */
+static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
+{
+ ext4_lblk_t needed;
+
+ needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
+
+ /* Give ourselves just enough room to cope with inodes in which
+ * i_blocks is corrupt: we've seen disk corruptions in the past
+ * which resulted in random data in an inode which looked enough
+ * like a regular file for ext4 to try to delete it. Things
+ * will go a bit crazy if that happens, but at least we should
+ * try not to panic the whole kernel. */
+ if (needed < 2)
+ needed = 2;
+
+ /* But we need to bound the transaction so we don't overflow the
+ * journal. */
+ if (needed > EXT4_MAX_TRANS_DATA)
+ needed = EXT4_MAX_TRANS_DATA;
+
+ return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
+}
+
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index d5e33a077a67..d0dddaceac59 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -82,18 +82,14 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
return PTR_ERR(acl);
}
if (acl) {
- mode_t mode;
-
error = posix_acl_valid(acl);
if (error)
goto failed;
switch (type) {
case ACL_TYPE_ACCESS:
- mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
goto failed;
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME;
if (error == 0) {
posix_acl_release(acl);
@@ -125,21 +121,20 @@ int
generic_acl_init(struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
- mode_t mode = inode->i_mode;
int error;
- inode->i_mode = mode & ~current_umask();
if (!S_ISLNK(inode->i_mode))
acl = get_cached_acl(dir, ACL_TYPE_DEFAULT);
if (acl) {
if (S_ISDIR(inode->i_mode))
set_cached_acl(inode, ACL_TYPE_DEFAULT, acl);
- error = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0)
set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+ } else {
+ inode->i_mode &= ~current_umask();
}
error = 0;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 884c9af0542f..34501b64bc47 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -72,7 +72,7 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
return gfs2_acl_get(GFS2_I(inode), type);
}
-static int gfs2_set_mode(struct inode *inode, mode_t mode)
+static int gfs2_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@@ -117,7 +117,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct posix_acl *acl;
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
int error = 0;
if (!sdp->sd_args.ar_posix_acl)
@@ -276,7 +276,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
goto out_release;
if (type == ACL_TYPE_ACCESS) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 8635be5ffd97..970ea987b3f6 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -16,6 +16,7 @@
#include <linux/statfs.h>
#include <linux/types.h>
#include <linux/pid_namespace.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
#include "os.h"
diff --git a/fs/inode.c b/fs/inode.c
index d0c72ff6b30e..73920d555c88 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -143,6 +143,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_op = &empty_iops;
inode->i_fop = &empty_fops;
inode->i_nlink = 1;
+ inode->i_opflags = 0;
inode->i_uid = 0;
inode->i_gid = 0;
atomic_set(&inode->i_writecount, 0);
@@ -399,12 +400,12 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
EXPORT_SYMBOL(__insert_inode_hash);
/**
- * remove_inode_hash - remove an inode from the hash
+ * __remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
* Remove an inode from the superblock.
*/
-void remove_inode_hash(struct inode *inode)
+void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
@@ -412,7 +413,7 @@ void remove_inode_hash(struct inode *inode)
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
-EXPORT_SYMBOL(remove_inode_hash);
+EXPORT_SYMBOL(__remove_inode_hash);
void end_writeback(struct inode *inode)
{
@@ -454,7 +455,9 @@ static void evict(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
- inode_wb_list_del(inode);
+ if (!list_empty(&inode->i_wb_list))
+ inode_wb_list_del(inode);
+
inode_sb_list_del(inode);
if (op->evict_inode) {
@@ -1328,7 +1331,8 @@ static void iput_final(struct inode *inode)
}
inode->i_state |= I_FREEING;
- inode_lru_list_del(inode);
+ if (!list_empty(&inode->i_lru))
+ inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
evict(inode);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 2c62c5aae82f..16a698bd906d 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -257,9 +257,12 @@ static void
__flush_batch(journal_t *journal, int *batch_count)
{
int i;
+ struct blk_plug plug;
+ blk_start_plug(&plug);
for (i = 0; i < *batch_count; i++)
- write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
+ write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
+ blk_finish_plug(&plug);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = journal->j_chkpt_bhs[i];
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0dfa5b598e68..f24df13adc4e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2390,73 +2390,6 @@ static void __exit journal_exit(void)
jbd2_journal_destroy_caches();
}
-/*
- * jbd2_dev_to_name is a utility function used by the jbd2 and ext4
- * tracing infrastructure to map a dev_t to a device name.
- *
- * The caller should use rcu_read_lock() in order to make sure the
- * device name stays valid until its done with it. We use
- * rcu_read_lock() as well to make sure we're safe in case the caller
- * gets sloppy, and because rcu_read_lock() is cheap and can be safely
- * nested.
- */
-struct devname_cache {
- struct rcu_head rcu;
- dev_t device;
- char devname[BDEVNAME_SIZE];
-};
-#define CACHE_SIZE_BITS 6
-static struct devname_cache *devcache[1 << CACHE_SIZE_BITS];
-static DEFINE_SPINLOCK(devname_cache_lock);
-
-static void free_devcache(struct rcu_head *rcu)
-{
- kfree(rcu);
-}
-
-const char *jbd2_dev_to_name(dev_t device)
-{
- int i = hash_32(device, CACHE_SIZE_BITS);
- char *ret;
- struct block_device *bd;
- static struct devname_cache *new_dev;
-
- rcu_read_lock();
- if (devcache[i] && devcache[i]->device == device) {
- ret = devcache[i]->devname;
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
- if (!new_dev)
- return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
- bd = bdget(device);
- spin_lock(&devname_cache_lock);
- if (devcache[i]) {
- if (devcache[i]->device == device) {
- kfree(new_dev);
- bdput(bd);
- ret = devcache[i]->devname;
- spin_unlock(&devname_cache_lock);
- return ret;
- }
- call_rcu(&devcache[i]->rcu, free_devcache);
- }
- devcache[i] = new_dev;
- devcache[i]->device = device;
- if (bd) {
- bdevname(bd, devcache[i]->devname);
- bdput(bd);
- } else
- __bdevname(device, devcache[i]->devname);
- ret = devcache[i]->devname;
- spin_unlock(&devname_cache_lock);
- return ret;
-}
-EXPORT_SYMBOL(jbd2_dev_to_name);
-
MODULE_LICENSE("GPL");
module_init(journal_init);
module_exit(journal_exit);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 27c511a1cf05..926d02068a14 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -227,7 +227,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
case ACL_TYPE_ACCESS:
xprefix = JFFS2_XPREFIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
rc = posix_acl_equiv_mode(acl, &mode);
if (rc < 0)
return rc;
@@ -259,7 +259,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return rc;
}
-int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, mode_t *i_mode)
+int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode)
{
struct posix_acl *acl;
int rc;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index b3421c78d9f8..9b477246f2a6 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,7 @@ struct jffs2_acl_header {
struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_acl_chmod(struct inode *);
-extern int jffs2_init_acl_pre(struct inode *, struct inode *, mode_t *);
+extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
extern int jffs2_init_acl_post(struct inode *);
extern const struct xattr_handler jffs2_acl_access_xattr_handler;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index b81b35ddf4e4..bbcb9755dd2b 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -406,7 +406,7 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
fill in the raw_inode while you're at it. */
-struct inode *jffs2_new_inode (struct inode *dir_i, mode_t mode, struct jffs2_raw_inode *ri)
+struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
{
struct inode *inode;
struct super_block *sb = dir_i->i_sb;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 526979c607b6..6c1755c59c0f 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -173,7 +173,7 @@ int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode, int flags);
-struct inode *jffs2_new_inode (struct inode *dir_i, mode_t mode,
+struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
int jffs2_remount_fs (struct super_block *, int *, char *);
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index b3a32caf2b45..45559dc3ea2f 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -127,16 +127,14 @@ int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
return PTR_ERR(acl);
if (acl) {
- mode_t mode = inode->i_mode;
if (S_ISDIR(inode->i_mode)) {
rc = jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, acl);
if (rc)
goto cleanup;
}
- rc = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ rc = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (rc < 0)
goto cleanup; /* posix_acl_release(NULL) is no-op */
- inode->i_mode = mode;
if (rc > 0)
rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl);
cleanup:
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 24838f1eeee5..e87fedef23db 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -693,8 +693,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
return rc;
}
if (acl) {
- mode_t mode = inode->i_mode;
- rc = posix_acl_equiv_mode(acl, &mode);
+ rc = posix_acl_equiv_mode(acl, &inode->i_mode);
posix_acl_release(acl);
if (rc < 0) {
printk(KERN_ERR
@@ -702,7 +701,6 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
rc);
return rc;
}
- inode->i_mode = mode;
mark_inode_dirty(inode);
}
/*
diff --git a/fs/namei.c b/fs/namei.c
index f8c69d373793..2826db35dc25 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -179,19 +179,14 @@ static int check_acl(struct inode *inode, int mask)
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
- /*
- * Under RCU walk, we cannot even do a "get_cached_acl()",
- * because that involves locking and getting a refcount on
- * a cached ACL.
- *
- * So the only case we handle during RCU walking is the
- * case of a cached "no ACL at all", which needs no locks
- * or refcounts.
- */
if (mask & MAY_NOT_BLOCK) {
- if (negative_cached_acl(inode, ACL_TYPE_ACCESS))
+ acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
+ if (!acl)
return -EAGAIN;
- return -ECHILD;
+ /* no ->get_acl() calls in RCU mode... */
+ if (acl == ACL_NOT_CACHED)
+ return -ECHILD;
+ return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
@@ -313,6 +308,26 @@ int generic_permission(struct inode *inode, int mask)
return -EACCES;
}
+/*
+ * We _really_ want to just do "generic_permission()" without
+ * even looking at the inode->i_op values. So we keep a cache
+ * flag in inode->i_opflags, that says "this has not special
+ * permission function, use the fast case".
+ */
+static inline int do_inode_permission(struct inode *inode, int mask)
+{
+ if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
+ if (likely(inode->i_op->permission))
+ return inode->i_op->permission(inode, mask);
+
+ /* This gets set once for the inode lifetime */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_FASTPERM;
+ spin_unlock(&inode->i_lock);
+ }
+ return generic_permission(inode, mask);
+}
+
/**
* inode_permission - check for access rights to a given inode
* @inode: inode to check permission on
@@ -327,7 +342,7 @@ int inode_permission(struct inode *inode, int mask)
{
int retval;
- if (mask & MAY_WRITE) {
+ if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/*
@@ -344,11 +359,7 @@ int inode_permission(struct inode *inode, int mask)
return -EACCES;
}
- if (inode->i_op->permission)
- retval = inode->i_op->permission(inode, mask);
- else
- retval = generic_permission(inode, mask);
-
+ retval = do_inode_permission(inode, mask);
if (retval)
return retval;
@@ -716,19 +727,25 @@ static int follow_automount(struct path *path, unsigned flags,
if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
return -EISDIR; /* we actually want to stop here */
- /* We want to mount if someone is trying to open/create a file of any
- * type under the mountpoint, wants to traverse through the mountpoint
- * or wants to open the mounted directory.
- *
+ /*
* We don't want to mount if someone's just doing a stat and they've
* set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
* appended a '/' to the name.
*/
- if (!(flags & LOOKUP_FOLLOW) &&
- !(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
- LOOKUP_OPEN | LOOKUP_CREATE)))
- return -EISDIR;
-
+ if (!(flags & LOOKUP_FOLLOW)) {
+ /* We do, however, want to mount if someone wants to open or
+ * create a file of any type under the mountpoint, wants to
+ * traverse through the mountpoint or wants to open the mounted
+ * directory.
+ * Also, autofs may mark negative dentries as being automount
+ * points. These will need the attentions of the daemon to
+ * instantiate them before they can be used.
+ */
+ if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+ LOOKUP_OPEN | LOOKUP_CREATE)) &&
+ path->dentry->d_inode)
+ return -EISDIR;
+ }
current->total_link_count++;
if (current->total_link_count >= 40)
return -ELOOP;
@@ -1244,6 +1261,26 @@ static void terminate_walk(struct nameidata *nd)
}
}
+/*
+ * Do we need to follow links? We _really_ want to be able
+ * to do this check without having to look at inode->i_op,
+ * so we keep a cache of "no, this doesn't need follow_link"
+ * for the common case.
+ */
+static inline int should_follow_link(struct inode *inode, int follow)
+{
+ if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+ if (likely(inode->i_op->follow_link))
+ return follow;
+
+ /* This gets set once for the inode lifetime */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_NOFOLLOW;
+ spin_unlock(&inode->i_lock);
+ }
+ return 0;
+}
+
static inline int walk_component(struct nameidata *nd, struct path *path,
struct qstr *name, int type, int follow)
{
@@ -1266,7 +1303,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
terminate_walk(nd);
return -ENOENT;
}
- if (unlikely(inode->i_op->follow_link) && follow) {
+ if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
terminate_walk(nd);
@@ -1319,6 +1356,26 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
}
/*
+ * We really don't want to look at inode->i_op->lookup
+ * when we don't have to. So we keep a cache bit in
+ * the inode ->i_opflags field that says "yes, we can
+ * do lookup on this inode".
+ */
+static inline int can_lookup(struct inode *inode)
+{
+ if (likely(inode->i_opflags & IOP_LOOKUP))
+ return 1;
+ if (likely(!inode->i_op->lookup))
+ return 0;
+
+ /* We do this once for the lifetime of the inode */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_LOOKUP;
+ spin_unlock(&inode->i_lock);
+ return 1;
+}
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -1397,10 +1454,10 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err)
return err;
}
+ if (can_lookup(nd->inode))
+ continue;
err = -ENOTDIR;
- if (!nd->inode->i_op->lookup)
- break;
- continue;
+ break;
/* here ends the main loop */
last_component:
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index e49e73107e62..7ef23979896d 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -415,7 +415,7 @@ fail:
}
int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
- mode_t mode)
+ umode_t mode)
{
struct posix_acl *dfacl, *acl;
int error = 0;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 38053d823eb0..85f1690ca08c 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -316,7 +316,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nfs_open_context *ctx)
{
struct nfs3_createdata *data;
- mode_t mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call create %s\n", dentry->d_name.name);
@@ -562,7 +562,7 @@ static int
nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct nfs3_createdata *data;
- int mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
@@ -681,7 +681,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
dev_t rdev)
{
struct nfs3_createdata *data;
- mode_t mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name,
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 783c58d9daf1..a7219075b4de 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -247,7 +247,7 @@ static int ocfs2_set_acl(handle_t *handle,
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
ret = posix_acl_equiv_mode(acl, &mode);
if (ret < 0)
return ret;
@@ -351,7 +351,7 @@ int ocfs2_init_acl(handle_t *handle,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0, ret2;
- mode_t mode;
+ umode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index d43729a760e2..10027b42b7e2 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -149,10 +149,10 @@ posix_acl_valid(const struct posix_acl *acl)
* file mode permission bits, or else 1. Returns -E... on error.
*/
int
-posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
+posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
{
const struct posix_acl_entry *pa, *pe;
- mode_t mode = 0;
+ umode_t mode = 0;
int not_equiv = 0;
FOREACH_ACL_ENTRY(pa, acl, pe) {
@@ -188,7 +188,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
* Create an ACL representing the file mode permission bits of an inode.
*/
struct posix_acl *
-posix_acl_from_mode(mode_t mode, gfp_t flags)
+posix_acl_from_mode(umode_t mode, gfp_t flags)
{
struct posix_acl *acl = posix_acl_alloc(3, flags);
if (!acl)
@@ -279,11 +279,11 @@ check_perm:
* system calls. All permissions that are not granted by the acl are removed.
* The permissions in the acl are changed to reflect the mode_p parameter.
*/
-static int posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p)
+static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
{
struct posix_acl_entry *pa, *pe;
struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
- mode_t mode = *mode_p;
+ umode_t mode = *mode_p;
int not_equiv = 0;
/* assert(atomic_read(acl->a_refcount) == 1); */
@@ -336,7 +336,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p)
/*
* Modify the ACL for the chmod syscall.
*/
-static int posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
+static int posix_acl_chmod_masq(struct posix_acl *acl, umode_t mode)
{
struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
struct posix_acl_entry *pa, *pe;
@@ -382,7 +382,7 @@ static int posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
}
int
-posix_acl_create(struct posix_acl **acl, gfp_t gfp, mode_t *mode_p)
+posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
{
struct posix_acl *clone = posix_acl_clone(*acl, gfp);
int err = -ENOMEM;
@@ -400,7 +400,7 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, mode_t *mode_p)
EXPORT_SYMBOL(posix_acl_create);
int
-posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, mode_t mode)
+posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
{
struct posix_acl *clone = posix_acl_clone(*acl, gfp);
int err = -ENOMEM;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 08e3eccf9a12..5eb02069e1b8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1118,7 +1118,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
* Warn that /proc/pid/oom_adj is deprecated, see
* Documentation/feature-removal-schedule.txt.
*/
- WARN_ONCE(1, "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+ printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
current->comm, task_pid_nr(current), task_pid_nr(task),
task_pid_nr(task));
task->signal->oom_adj = oom_adjust;
@@ -1919,6 +1919,14 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
spin_lock(&files->file_lock);
file = fcheck_files(files, fd);
if (file) {
+ unsigned int f_flags;
+ struct fdtable *fdt;
+
+ fdt = files_fdtable(files);
+ f_flags = file->f_flags & ~O_CLOEXEC;
+ if (FD_ISSET(fd, fdt->close_on_exec))
+ f_flags |= O_CLOEXEC;
+
if (path) {
*path = file->f_path;
path_get(&file->f_path);
@@ -1928,7 +1936,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
"pos:\t%lli\n"
"flags:\t0%o\n",
(long long) file->f_pos,
- file->f_flags);
+ f_flags);
spin_unlock(&files->file_lock);
put_files_struct(files);
return 0;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 977ed2723845..893b961dcfd8 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -39,8 +39,9 @@
#define PSTORE_NAMELEN 64
struct pstore_private {
+ struct pstore_info *psi;
+ enum pstore_type_id type;
u64 id;
- int (*erase)(u64);
ssize_t size;
char data[];
};
@@ -73,7 +74,7 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = dentry->d_inode->i_private;
- p->erase(p->id);
+ p->psi->erase(p->type, p->id, p->psi);
return simple_unlink(dir, dentry);
}
@@ -175,8 +176,8 @@ int pstore_is_mounted(void)
* Set the mtime & ctime to the date that this record was originally stored.
*/
int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
- char *data, size_t size,
- struct timespec time, int (*erase)(u64))
+ char *data, size_t size, struct timespec time,
+ struct pstore_info *psi)
{
struct dentry *root = pstore_sb->s_root;
struct dentry *dentry;
@@ -192,8 +193,9 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
private = kmalloc(sizeof *private + size, GFP_KERNEL);
if (!private)
goto fail_alloc;
+ private->type = type;
private->id = id;
- private->erase = erase;
+ private->psi = psi;
switch (type) {
case PSTORE_TYPE_DMESG:
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 8c9f23eb1645..611c1b3c46fa 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -2,5 +2,5 @@ extern void pstore_set_kmsg_bytes(int);
extern void pstore_get_records(void);
extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
char *data, size_t size,
- struct timespec time, int (*erase)(u64));
+ struct timespec time, struct pstore_info *psi);
extern int pstore_is_mounted(void);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index f2c3ff20ea68..c5300ec31696 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -37,6 +37,8 @@
static DEFINE_SPINLOCK(pstore_lock);
static struct pstore_info *psinfo;
+static char *backend;
+
/* How much of the console log to snapshot */
static unsigned long kmsg_bytes = 10240;
@@ -67,7 +69,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long size, total = 0;
char *dst, *why;
u64 id;
- int hsize, part = 1;
+ int hsize;
+ unsigned int part = 1;
if (reason < ARRAY_SIZE(reason_str))
why = reason_str[reason];
@@ -78,7 +81,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
oopscount++;
while (total < kmsg_bytes) {
dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part++);
+ hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -94,14 +97,16 @@ static void pstore_dump(struct kmsg_dumper *dumper,
memcpy(dst, s1 + s1_start, l1_cpy);
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
- id = psinfo->write(PSTORE_TYPE_DMESG, hsize + l1_cpy + l2_cpy);
+ id = psinfo->write(PSTORE_TYPE_DMESG, part,
+ hsize + l1_cpy + l2_cpy, psinfo);
if (reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id,
psinfo->buf, hsize + l1_cpy + l2_cpy,
- CURRENT_TIME, psinfo->erase);
+ CURRENT_TIME, psinfo);
l1 -= l1_cpy;
l2 -= l2_cpy;
total += l1_cpy + l2_cpy;
+ part++;
}
mutex_unlock(&psinfo->buf_mutex);
}
@@ -128,6 +133,12 @@ int pstore_register(struct pstore_info *psi)
spin_unlock(&pstore_lock);
return -EBUSY;
}
+
+ if (backend && strcmp(backend, psi->name)) {
+ spin_unlock(&pstore_lock);
+ return -EINVAL;
+ }
+
psinfo = psi;
spin_unlock(&pstore_lock);
@@ -166,9 +177,9 @@ void pstore_get_records(void)
if (rc)
goto out;
- while ((size = psi->read(&id, &type, &time)) > 0) {
+ while ((size = psi->read(&id, &type, &time, psi)) > 0) {
if (pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
- time, psi->erase))
+ time, psi))
failed++;
}
psi->close(psi);
@@ -196,12 +207,15 @@ int pstore_write(enum pstore_type_id type, char *buf, size_t size)
mutex_lock(&psinfo->buf_mutex);
memcpy(psinfo->buf, buf, size);
- id = psinfo->write(type, size);
+ id = psinfo->write(type, 0, size, psinfo);
if (pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id, psinfo->buf,
- size, CURRENT_TIME, psinfo->erase);
+ size, CURRENT_TIME, psinfo);
mutex_unlock(&psinfo->buf_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pstore_write);
+
+module_param(backend, charp, 0444);
+MODULE_PARM_DESC(backend, "Pstore backend to use");
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 7362cf4c946a..6da0396e5052 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -272,12 +272,10 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
if (error == 0)
acl = NULL;
}
@@ -354,8 +352,6 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
return PTR_ERR(acl);
if (acl) {
- mode_t mode = inode->i_mode;
-
/* Copy the default ACL to the default ACL of a new directory */
if (S_ISDIR(inode->i_mode)) {
err = reiserfs_set_acl(th, inode, ACL_TYPE_DEFAULT,
@@ -366,12 +362,10 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
/* Now we reconcile the new ACL and the mode,
potentially modifying both */
- err = posix_acl_create(&acl, GFP_NOFS, &mode);
+ err = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (err < 0)
return err;
- inode->i_mode = mode;
-
/* If we need an ACL.. */
if (err > 0)
err = reiserfs_set_acl(th, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/stack.c b/fs/stack.c
index 4a6f7f440658..b4f2ab48a61f 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -29,10 +29,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
*
* We don't actually know what locking is used at the lower level;
* but if it's a filesystem that supports quotas, it will be using
- * i_lock as in inode_add_bytes(). tmpfs uses other locking, and
- * its 32-bit is (just) able to exceed 2TB i_size with the aid of
- * holes; but its i_blocks cannot carry into the upper long without
- * almost 2TB swap - let's ignore that case.
+ * i_lock as in inode_add_bytes().
*/
if (sizeof(i_blocks) > sizeof(long))
spin_lock(&src->i_lock);
diff --git a/fs/stat.c b/fs/stat.c
index 961039121cb8..ba5316ffac61 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -27,12 +27,12 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->size = i_size_read(inode);
- stat->blocks = inode->i_blocks;
stat->blksize = (1 << inode->i_blkbits);
+ stat->blocks = inode->i_blocks;
}
EXPORT_SYMBOL(generic_fillattr);
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 44ce51656804..b6c4b3795c4a 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -221,7 +221,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
static int
-xfs_set_mode(struct inode *inode, mode_t mode)
+xfs_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@@ -267,7 +267,7 @@ posix_acl_default_exists(struct inode *inode)
int
xfs_inherit_acl(struct inode *inode, struct posix_acl *acl)
{
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
int error = 0, inherit = 0;
if (S_ISDIR(inode->i_mode)) {
@@ -381,7 +381,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
goto out_release;
if (type == ACL_TYPE_ACCESS) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {