diff options
Diffstat (limited to 'fs/nilfs2/the_nilfs.c')
-rw-r--r-- | fs/nilfs2/the_nilfs.c | 339 |
1 files changed, 106 insertions, 233 deletions
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index d27715103376..0254be2d73c6 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -35,9 +35,6 @@ #include "segbuf.h" -static LIST_HEAD(nilfs_objects); -static DEFINE_SPINLOCK(nilfs_lock); - static int nilfs_valid_sb(struct nilfs_super_block *sbp); void nilfs_set_last_segment(struct the_nilfs *nilfs, @@ -61,16 +58,13 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, } /** - * alloc_nilfs - allocate the_nilfs structure + * alloc_nilfs - allocate a nilfs object * @bdev: block device to which the_nilfs is related * - * alloc_nilfs() allocates memory for the_nilfs and - * initializes its reference count and locks. - * * Return Value: On success, pointer to the_nilfs is returned. * On error, NULL is returned. */ -static struct the_nilfs *alloc_nilfs(struct block_device *bdev) +struct the_nilfs *alloc_nilfs(struct block_device *bdev) { struct the_nilfs *nilfs; @@ -79,103 +73,38 @@ static struct the_nilfs *alloc_nilfs(struct block_device *bdev) return NULL; nilfs->ns_bdev = bdev; - atomic_set(&nilfs->ns_count, 1); atomic_set(&nilfs->ns_ndirtyblks, 0); init_rwsem(&nilfs->ns_sem); - init_rwsem(&nilfs->ns_super_sem); - mutex_init(&nilfs->ns_mount_mutex); - init_rwsem(&nilfs->ns_writer_sem); - INIT_LIST_HEAD(&nilfs->ns_list); - INIT_LIST_HEAD(&nilfs->ns_supers); + INIT_LIST_HEAD(&nilfs->ns_gc_inodes); spin_lock_init(&nilfs->ns_last_segment_lock); - nilfs->ns_gc_inodes_h = NULL; + nilfs->ns_cptree = RB_ROOT; + spin_lock_init(&nilfs->ns_cptree_lock); init_rwsem(&nilfs->ns_segctor_sem); return nilfs; } /** - * find_or_create_nilfs - find or create nilfs object - * @bdev: block device to which the_nilfs is related - * - * find_nilfs() looks up an existent nilfs object created on the - * device and gets the reference count of the object. If no nilfs object - * is found on the device, a new nilfs object is allocated. - * - * Return Value: On success, pointer to the nilfs object is returned. - * On error, NULL is returned. - */ -struct the_nilfs *find_or_create_nilfs(struct block_device *bdev) -{ - struct the_nilfs *nilfs, *new = NULL; - - retry: - spin_lock(&nilfs_lock); - list_for_each_entry(nilfs, &nilfs_objects, ns_list) { - if (nilfs->ns_bdev == bdev) { - get_nilfs(nilfs); - spin_unlock(&nilfs_lock); - if (new) - put_nilfs(new); - return nilfs; /* existing object */ - } - } - if (new) { - list_add_tail(&new->ns_list, &nilfs_objects); - spin_unlock(&nilfs_lock); - return new; /* new object */ - } - spin_unlock(&nilfs_lock); - - new = alloc_nilfs(bdev); - if (new) - goto retry; - return NULL; /* insufficient memory */ -} - -/** - * put_nilfs - release a reference to the_nilfs - * @nilfs: the_nilfs structure to be released - * - * put_nilfs() decrements a reference counter of the_nilfs. - * If the reference count reaches zero, the_nilfs is freed. + * destroy_nilfs - destroy nilfs object + * @nilfs: nilfs object to be released */ -void put_nilfs(struct the_nilfs *nilfs) +void destroy_nilfs(struct the_nilfs *nilfs) { - spin_lock(&nilfs_lock); - if (!atomic_dec_and_test(&nilfs->ns_count)) { - spin_unlock(&nilfs_lock); - return; - } - list_del_init(&nilfs->ns_list); - spin_unlock(&nilfs_lock); - - /* - * Increment of ns_count never occurs below because the caller - * of get_nilfs() holds at least one reference to the_nilfs. - * Thus its exclusion control is not required here. - */ - might_sleep(); - if (nilfs_loaded(nilfs)) { - nilfs_mdt_destroy(nilfs->ns_sufile); - nilfs_mdt_destroy(nilfs->ns_cpfile); - nilfs_mdt_destroy(nilfs->ns_dat); - nilfs_mdt_destroy(nilfs->ns_gc_dat); - } if (nilfs_init(nilfs)) { - nilfs_destroy_gccache(nilfs); brelse(nilfs->ns_sbh[0]); brelse(nilfs->ns_sbh[1]); } kfree(nilfs); } -static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block) +static int nilfs_load_super_root(struct the_nilfs *nilfs, + struct super_block *sb, sector_t sr_block) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; struct nilfs_super_block **sbp = nilfs->ns_sbp; + struct nilfs_inode *rawi; unsigned dat_entry_size, segment_usage_size, checkpoint_size; unsigned inode_size; int err; @@ -192,40 +121,22 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block) inode_size = nilfs->ns_inode_size; - err = -ENOMEM; - nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size); - if (unlikely(!nilfs->ns_dat)) + rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size); + err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat); + if (err) goto failed; - nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size); - if (unlikely(!nilfs->ns_gc_dat)) + rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size); + err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile); + if (err) goto failed_dat; - nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size); - if (unlikely(!nilfs->ns_cpfile)) - goto failed_gc_dat; - - nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size); - if (unlikely(!nilfs->ns_sufile)) + rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size); + err = nilfs_sufile_read(sb, segment_usage_size, rawi, + &nilfs->ns_sufile); + if (err) goto failed_cpfile; - nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); - - err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data + - NILFS_SR_DAT_OFFSET(inode_size)); - if (unlikely(err)) - goto failed_sufile; - - err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data + - NILFS_SR_CPFILE_OFFSET(inode_size)); - if (unlikely(err)) - goto failed_sufile; - - err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data + - NILFS_SR_SUFILE_OFFSET(inode_size)); - if (unlikely(err)) - goto failed_sufile; - raw_sr = (struct nilfs_super_root *)bh_sr->b_data; nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); @@ -233,17 +144,11 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block) brelse(bh_sr); return err; - failed_sufile: - nilfs_mdt_destroy(nilfs->ns_sufile); - failed_cpfile: - nilfs_mdt_destroy(nilfs->ns_cpfile); - - failed_gc_dat: - nilfs_mdt_destroy(nilfs->ns_gc_dat); + iput(nilfs->ns_cpfile); failed_dat: - nilfs_mdt_destroy(nilfs->ns_dat); + iput(nilfs->ns_dat); goto failed; } @@ -306,15 +211,6 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) int valid_fs = nilfs_valid_fs(nilfs); int err; - if (nilfs_loaded(nilfs)) { - if (valid_fs || - ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY))) - return 0; - printk(KERN_ERR "NILFS: the filesystem is in an incomplete " - "recovery state.\n"); - return -EINVAL; - } - if (!valid_fs) { printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n"); if (s_flags & MS_RDONLY) { @@ -375,7 +271,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto scan_error; } - err = nilfs_load_super_root(nilfs, ri.ri_super_root); + err = nilfs_load_super_root(nilfs, sbi->s_super, ri.ri_super_root); if (unlikely(err)) { printk(KERN_ERR "NILFS: error loading super root.\n"); goto failed; @@ -443,10 +339,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto failed; failed_unload: - nilfs_mdt_destroy(nilfs->ns_cpfile); - nilfs_mdt_destroy(nilfs->ns_sufile); - nilfs_mdt_destroy(nilfs->ns_dat); - nilfs_mdt_destroy(nilfs->ns_gc_dat); + iput(nilfs->ns_cpfile); + iput(nilfs->ns_sufile); + iput(nilfs->ns_dat); failed: nilfs_clear_recovery_info(&ri); @@ -468,8 +363,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits) static int nilfs_store_disk_layout(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { - if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { - printk(KERN_ERR "NILFS: revision mismatch " + if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { + printk(KERN_ERR "NILFS: unsupported revision " "(superblock rev.=%d.%d, current rev.=%d.%d). " "Please check the version of mkfs.nilfs.\n", le32_to_cpu(sbp->s_rev_level), @@ -631,12 +526,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, * * init_nilfs() performs common initialization per block device (e.g. * reading the super block, getting disk layout information, initializing - * shared fields in the_nilfs). It takes on some portion of the jobs - * typically done by a fill_super() routine. This division arises from - * the nature that multiple NILFS instances may be simultaneously - * mounted on a device. - * For multiple mounts on the same device, only the first mount - * invokes these tasks. + * shared fields in the_nilfs). * * Return Value: On success, 0 is returned. On error, a negative error * code is returned. @@ -645,32 +535,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) { struct super_block *sb = sbi->s_super; struct nilfs_super_block *sbp; - struct backing_dev_info *bdi; int blocksize; int err; down_write(&nilfs->ns_sem); - if (nilfs_init(nilfs)) { - /* Load values from existing the_nilfs */ - sbp = nilfs->ns_sbp[0]; - err = nilfs_store_magic_and_option(sb, sbp, data); - if (err) - goto out; - - err = nilfs_check_feature_compatibility(sb, sbp); - if (err) - goto out; - - blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); - if (sb->s_blocksize != blocksize && - !sb_set_blocksize(sb, blocksize)) { - printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", - blocksize); - err = -EINVAL; - } - sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); - goto out; - } blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { @@ -729,18 +597,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); - bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; - nilfs->ns_bdi = bdi ? : &default_backing_dev_info; - err = nilfs_store_log_cursor(nilfs, sbp); if (err) goto failed_sbh; - /* Initialize gcinode cache */ - err = nilfs_init_gccache(nilfs); - if (err) - goto failed_sbh; - set_nilfs_init(nilfs); err = 0; out: @@ -812,79 +672,92 @@ int nilfs_near_disk_full(struct the_nilfs *nilfs) return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs; } -/** - * nilfs_find_sbinfo - find existing nilfs_sb_info structure - * @nilfs: nilfs object - * @rw_mount: mount type (non-zero value for read/write mount) - * @cno: checkpoint number (zero for read-only mount) - * - * nilfs_find_sbinfo() returns the nilfs_sb_info structure which - * @rw_mount and @cno (in case of snapshots) matched. If no instance - * was found, NULL is returned. Although the super block instance can - * be unmounted after this function returns, the nilfs_sb_info struct - * is kept on memory until nilfs_put_sbinfo() is called. - */ -struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs, - int rw_mount, __u64 cno) +struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno) { - struct nilfs_sb_info *sbi; - - down_read(&nilfs->ns_super_sem); - /* - * The SNAPSHOT flag and sb->s_flags are supposed to be - * protected with nilfs->ns_super_sem. - */ - sbi = nilfs->ns_current; - if (rw_mount) { - if (sbi && !(sbi->s_super->s_flags & MS_RDONLY)) - goto found; /* read/write mount */ - else - goto out; - } else if (cno == 0) { - if (sbi && (sbi->s_super->s_flags & MS_RDONLY)) - goto found; /* read-only mount */ - else - goto out; + struct rb_node *n; + struct nilfs_root *root; + + spin_lock(&nilfs->ns_cptree_lock); + n = nilfs->ns_cptree.rb_node; + while (n) { + root = rb_entry(n, struct nilfs_root, rb_node); + + if (cno < root->cno) { + n = n->rb_left; + } else if (cno > root->cno) { + n = n->rb_right; + } else { + atomic_inc(&root->count); + spin_unlock(&nilfs->ns_cptree_lock); + return root; + } } + spin_unlock(&nilfs->ns_cptree_lock); - list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { - if (nilfs_test_opt(sbi, SNAPSHOT) && - sbi->s_snapshot_cno == cno) - goto found; /* snapshot mount */ - } - out: - up_read(&nilfs->ns_super_sem); return NULL; - - found: - atomic_inc(&sbi->s_count); - up_read(&nilfs->ns_super_sem); - return sbi; } -int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, - int snapshot_mount) +struct nilfs_root * +nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) { - struct nilfs_sb_info *sbi; - int ret = 0; + struct rb_node **p, *parent; + struct nilfs_root *root, *new; - down_read(&nilfs->ns_super_sem); - if (cno == 0 || cno > nilfs->ns_cno) - goto out_unlock; + root = nilfs_lookup_root(nilfs, cno); + if (root) + return root; - list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { - if (sbi->s_snapshot_cno == cno && - (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { - /* exclude read-only mounts */ - ret++; - break; + new = kmalloc(sizeof(*root), GFP_KERNEL); + if (!new) + return NULL; + + spin_lock(&nilfs->ns_cptree_lock); + + p = &nilfs->ns_cptree.rb_node; + parent = NULL; + + while (*p) { + parent = *p; + root = rb_entry(parent, struct nilfs_root, rb_node); + + if (cno < root->cno) { + p = &(*p)->rb_left; + } else if (cno > root->cno) { + p = &(*p)->rb_right; + } else { + atomic_inc(&root->count); + spin_unlock(&nilfs->ns_cptree_lock); + kfree(new); + return root; } } - /* for protecting recent checkpoints */ - if (cno >= nilfs_last_cno(nilfs)) - ret++; - out_unlock: - up_read(&nilfs->ns_super_sem); - return ret; + new->cno = cno; + new->ifile = NULL; + new->nilfs = nilfs; + atomic_set(&new->count, 1); + atomic_set(&new->inodes_count, 0); + atomic_set(&new->blocks_count, 0); + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, &nilfs->ns_cptree); + + spin_unlock(&nilfs->ns_cptree_lock); + + return new; +} + +void nilfs_put_root(struct nilfs_root *root) +{ + if (atomic_dec_and_test(&root->count)) { + struct the_nilfs *nilfs = root->nilfs; + + spin_lock(&nilfs->ns_cptree_lock); + rb_erase(&root->rb_node, &nilfs->ns_cptree); + spin_unlock(&nilfs->ns_cptree_lock); + if (root->ifile) + iput(root->ifile); + + kfree(root); + } } |