diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 15:19:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 15:19:35 -0700 |
commit | ae8ac6b7dbfd67f883050421fd195c153d02f5f3 (patch) | |
tree | 162db451c57aa9be511aeb704f7a4301e3ea2225 /fs | |
parent | 460352c2f18e18accc993a9127a03e903ed8e0ed (diff) | |
parent | 6c83fd5142c68294acb0e857b7bac2ce8a5077f7 (diff) | |
download | linux-ae8ac6b7dbfd67f883050421fd195c153d02f5f3.tar.bz2 |
Merge branch 'quota_scaling' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull quota scaling updates from Jan Kara:
"This contains changes to make the quota subsystem more scalable.
Reportedly it improves number of files created per second on ext4
filesystem on fast storage by about a factor of 2x"
* 'quota_scaling' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (28 commits)
quota: Add lock annotations to struct members
quota: Reduce contention on dq_data_lock
fs: Provide __inode_get_bytes()
quota: Inline dquot_[re]claim_reserved_space() into callsite
quota: Inline inode_{incr,decr}_space() into callsites
quota: Inline functions into their callsites
ext4: Disable dirty list tracking of dquots when journalling quotas
quota: Allow disabling tracking of dirty dquots in a list
quota: Remove dq_wait_unused from dquot
quota: Move locking into clear_dquot_dirty()
quota: Do not dirty bad dquots
quota: Fix possible corruption of dqi_flags
quota: Propagate ->quota_read errors from v2_read_file_info()
quota: Fix error codes in v2_read_file_info()
quota: Push dqio_sem down to ->read_file_info()
quota: Push dqio_sem down to ->write_file_info()
quota: Push dqio_sem down to ->get_next_id()
quota: Push dqio_sem down to ->release_dqblk()
quota: Remove locking for writing to the old quota format
quota: Do not acquire dqio_sem for dquot overwrites in v2 format
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext4/super.c | 26 | ||||
-rw-r--r-- | fs/ocfs2/quota_global.c | 36 | ||||
-rw-r--r-- | fs/ocfs2/quota_local.c | 17 | ||||
-rw-r--r-- | fs/quota/dquot.c | 504 | ||||
-rw-r--r-- | fs/quota/quota_tree.c | 10 | ||||
-rw-r--r-- | fs/quota/quota_v1.c | 11 | ||||
-rw-r--r-- | fs/quota/quota_v2.c | 95 | ||||
-rw-r--r-- | fs/stat.c | 2 | ||||
-rw-r--r-- | fs/super.c | 2 |
9 files changed, 392 insertions, 311 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c9e7be58756b..93aece6891f2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5215,7 +5215,7 @@ static int ext4_statfs_project(struct super_block *sb, dquot = dqget(sb, qid); if (IS_ERR(dquot)) return PTR_ERR(dquot); - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); limit = (dquot->dq_dqb.dqb_bsoftlimit ? dquot->dq_dqb.dqb_bsoftlimit : @@ -5238,7 +5238,7 @@ static int ext4_statfs_project(struct super_block *sb, (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; } - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); dqput(dquot); return 0; } @@ -5284,18 +5284,13 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -/* Helper function for writing quotas on sync - we need to start transaction - * before quota file is locked for write. Otherwise the are possible deadlocks: - * Process 1 Process 2 - * ext4_create() quota_sync() - * jbd2_journal_start() write_dquot() - * dquot_initialize() down(dqio_mutex) - * down(dqio_mutex) jbd2_journal_start() - * - */ #ifdef CONFIG_QUOTA +/* + * Helper functions so that transaction is started before we acquire dqio_sem + * to keep correct lock ordering of transaction > dqio_sem + */ static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; @@ -5430,6 +5425,13 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); + sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; + } else { + /* + * Clear the flag just in case mount options changed since + * last time. + */ + sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; } /* @@ -5526,7 +5528,7 @@ static int ext4_enable_quotas(struct super_block *sb) test_opt(sb, PRJQUOTA), }; - sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; + sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; for (type = 0; type < EXT4_MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index cec495a921e3..c94b6baaa551 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -33,7 +33,7 @@ * Locking of quotas with OCFS2 is rather complex. Here are rules that * should be obeyed by all the functions: * - any write of quota structure (either to local or global file) is protected - * by dqio_mutex or dquot->dq_lock. + * by dqio_sem or dquot->dq_lock. * - any modification of global quota file holds inode cluster lock, i_mutex, * and ip_alloc_sem of the global quota file (achieved by * ocfs2_lock_global_qf). It also has to hold qinfo_lock. @@ -42,9 +42,9 @@ * * A rough sketch of locking dependencies (lf = local file, gf = global file): * Normal filesystem operation: - * start_trans -> dqio_mutex -> write to lf + * start_trans -> dqio_sem -> write to lf * Syncing of local and global file: - * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock -> + * ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock -> * write to gf * -> write to lf * Acquire dquot for the first time: @@ -60,7 +60,7 @@ * Recovery: * inode cluster lock of recovered lf * -> read bitmaps -> ip_alloc_sem of lf - * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock -> + * -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock -> * write to gf */ @@ -443,13 +443,17 @@ static int __ocfs2_global_write_info(struct super_block *sb, int type) int ocfs2_global_write_info(struct super_block *sb, int type) { int err; - struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; + struct quota_info *dqopt = sb_dqopt(sb); + struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv; + down_write(&dqopt->dqio_sem); err = ocfs2_qinfo_lock(info, 1); if (err < 0) - return err; + goto out_sem; err = __ocfs2_global_write_info(sb, type); ocfs2_qinfo_unlock(info, 1); +out_sem: + up_write(&dqopt->dqio_sem); return err; } @@ -500,7 +504,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) /* Update space and inode usage. Get also other information from * global quota file so that we don't overwrite any changes there. * We are */ - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); spacechange = dquot->dq_dqb.dqb_curspace - OCFS2_DQUOT(dquot)->dq_origspace; inodechange = dquot->dq_dqb.dqb_curinodes - @@ -556,7 +560,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); err = ocfs2_qinfo_lock(info, freeing); if (err < 0) { mlog(ML_ERROR, "Failed to lock quota info, losing quota write" @@ -611,7 +615,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) mlog_errno(status); goto out_ilock; } - mutex_lock(&sb_dqopt(sb)->dqio_mutex); + down_write(&sb_dqopt(sb)->dqio_sem); status = ocfs2_sync_dquot(dquot); if (status < 0) mlog_errno(status); @@ -619,7 +623,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) status = ocfs2_local_write_dquot(dquot); if (status < 0) mlog_errno(status); - mutex_unlock(&sb_dqopt(sb)->dqio_mutex); + up_write(&sb_dqopt(sb)->dqio_sem); ocfs2_commit_trans(osb, handle); out_ilock: ocfs2_unlock_global_qf(oinfo, 1); @@ -666,9 +670,9 @@ static int ocfs2_write_dquot(struct dquot *dquot) mlog_errno(status); goto out; } - mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); + down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem); status = ocfs2_local_write_dquot(dquot); - mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); + up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem); ocfs2_commit_trans(osb, handle); out: return status; @@ -920,10 +924,10 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot) /* In case user set some limits, sync dquot immediately to global * quota file so that information propagates quicker */ - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); if (dquot->dq_flags & mask) sync = 1; - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); /* This is a slight hack but we can't afford getting global quota * lock if we already have a transaction started. */ if (!sync || journal_current_handle()) { @@ -939,7 +943,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot) mlog_errno(status); goto out_ilock; } - mutex_lock(&sb_dqopt(sb)->dqio_mutex); + down_write(&sb_dqopt(sb)->dqio_sem); status = ocfs2_sync_dquot(dquot); if (status < 0) { mlog_errno(status); @@ -948,7 +952,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot) /* Now write updated local dquot structure */ status = ocfs2_local_write_dquot(dquot); out_dlock: - mutex_unlock(&sb_dqopt(sb)->dqio_mutex); + up_write(&sb_dqopt(sb)->dqio_sem); ocfs2_commit_trans(osb, handle); out_ilock: ocfs2_unlock_global_qf(oinfo, 1); diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 32c5a40c1257..aa700fd10610 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -520,8 +520,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, mlog_errno(status); goto out_drop_lock; } - mutex_lock(&sb_dqopt(sb)->dqio_mutex); - spin_lock(&dq_data_lock); + down_write(&sb_dqopt(sb)->dqio_sem); + spin_lock(&dquot->dq_dqb_lock); /* Add usage from quota entry into quota changes * of our node. Auxiliary variables are important * due to signedness */ @@ -529,7 +529,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, inodechange = le64_to_cpu(dqblk->dqb_inodemod); dquot->dq_dqb.dqb_curspace += spacechange; dquot->dq_dqb.dqb_curinodes += inodechange; - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); /* We want to drop reference held by the crashed * node. Since we have our own reference we know * global structure actually won't be freed. */ @@ -553,7 +553,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, unlock_buffer(qbh); ocfs2_journal_dirty(handle, qbh); out_commit: - mutex_unlock(&sb_dqopt(sb)->dqio_mutex); + up_write(&sb_dqopt(sb)->dqio_sem); ocfs2_commit_trans(OCFS2_SB(sb), handle); out_drop_lock: ocfs2_unlock_global_qf(oinfo, 1); @@ -691,9 +691,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) struct ocfs2_quota_recovery *rec; int locked = 0; - /* We don't need the lock and we have to acquire quota file locks - * which will later depend on this lock */ - mutex_unlock(&sb_dqopt(sb)->dqio_mutex); info->dqi_max_spc_limit = 0x7fffffffffffffffLL; info->dqi_max_ino_limit = 0x7fffffffffffffffLL; oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS); @@ -772,7 +769,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) goto out_err; } - mutex_lock(&sb_dqopt(sb)->dqio_mutex); return 0; out_err: if (oinfo) { @@ -786,7 +782,6 @@ out_err: kfree(oinfo); } brelse(bh); - mutex_lock(&sb_dqopt(sb)->dqio_mutex); return -1; } @@ -882,12 +877,12 @@ static void olq_set_dquot(struct buffer_head *bh, void *private) dqblk->dqb_id = cpu_to_le64(from_kqid(&init_user_ns, od->dq_dquot.dq_id)); - spin_lock(&dq_data_lock); + spin_lock(&od->dq_dquot.dq_dqb_lock); dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace - od->dq_origspace); dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - od->dq_originodes); - spin_unlock(&dq_data_lock); + spin_unlock(&od->dq_dquot.dq_dqb_lock); trace_olq_set_dquot( (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod), (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod), diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 566e6ef99f07..8381db9db6d9 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -82,16 +82,19 @@ #include <linux/uaccess.h> /* - * There are three quota SMP locks. dq_list_lock protects all lists with quotas - * and quota formats. - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects - * modifications of quota state (on quotaon and quotaoff) and readers who care - * about latest values take it as well. + * There are five quota SMP locks: + * * dq_list_lock protects all lists with quotas and quota formats. + * * dquot->dq_dqb_lock protects data from dq_dqb + * * inode->i_lock protects inode->i_blocks, i_bytes and also guards + * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that + * dquot_transfer() can stabilize amount it transfers + * * dq_data_lock protects mem_dqinfo structures and modifications of dquot + * pointers in the inode + * * dq_state_lock protects modifications of quota state (on quotaon and + * quotaoff) and readers who care about latest values take it as well. * - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, + * The spinlock ordering is hence: + * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock, * dq_list_lock > dq_state_lock * * Note that some things (eg. sb pointer, type, id) doesn't change during @@ -110,17 +113,14 @@ * sure they cannot race with quotaon which first sets S_NOQUOTA flag and * then drops all pointers to dquots from an inode. * - * Each dquot has its dq_lock mutex. Locked dquots might not be referenced - * from inodes (dquot_alloc_space() and such don't check the dq_lock). - * Currently dquot is locked only when it is being read to memory (or space for - * it is being allocated) on the first dqget() and when it is being released on - * the last dqput(). The allocation and release oparations are serialized by - * the dq_lock and by checking the use count in dquot_release(). Write - * operations on dquots don't hold dq_lock as they copy data under dq_data_lock - * spinlock to internal buffers before writing. + * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to + * memory (or space for it is being allocated) on the first dqget(), when it is + * being written out, and when it is being released on the last dqput(). The + * allocation and release operations are serialized by the dq_lock and by + * checking the use count in dquot_release(). * * Lock ordering (including related VFS locks) is the following: - * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex + * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); @@ -129,6 +129,8 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); DEFINE_STATIC_SRCU(dquot_srcu); +static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq); + void __quota_error(struct super_block *sb, const char *func, const char *fmt, ...) { @@ -247,6 +249,7 @@ struct dqstats dqstats; EXPORT_SYMBOL(dqstats); static qsize_t inode_get_rsv_space(struct inode *inode); +static qsize_t __inode_get_rsv_space(struct inode *inode); static int __dquot_initialize(struct inode *inode, int type); static inline unsigned int @@ -342,6 +345,12 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) { int ret = 1; + if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) + return 0; + + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) + return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); + /* If quota is dirty already, we don't have to acquire dq_list_lock */ if (test_bit(DQ_MOD_B, &dquot->dq_flags)) return 1; @@ -381,18 +390,26 @@ static inline void dqput_all(struct dquot **dquot) dqput(dquot[cnt]); } -/* This function needs dq_list_lock */ static inline int clear_dquot_dirty(struct dquot *dquot) { - if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) + if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) + return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags); + + spin_lock(&dq_list_lock); + if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) { + spin_unlock(&dq_list_lock); return 0; + } list_del_init(&dquot->dq_dirty); + spin_unlock(&dq_list_lock); return 1; } void mark_info_dirty(struct super_block *sb, int type) { - set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); + spin_lock(&dq_data_lock); + sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY; + spin_unlock(&dq_data_lock); } EXPORT_SYMBOL(mark_info_dirty); @@ -406,7 +423,6 @@ int dquot_acquire(struct dquot *dquot) struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); - mutex_lock(&dqopt->dqio_mutex); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); if (ret < 0) @@ -436,7 +452,6 @@ int dquot_acquire(struct dquot *dquot) smp_mb__before_atomic(); set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: - mutex_unlock(&dqopt->dqio_mutex); mutex_unlock(&dquot->dq_lock); return ret; } @@ -450,21 +465,17 @@ int dquot_commit(struct dquot *dquot) int ret = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - mutex_lock(&dqopt->dqio_mutex); - spin_lock(&dq_list_lock); - if (!clear_dquot_dirty(dquot)) { - spin_unlock(&dq_list_lock); - goto out_sem; - } - spin_unlock(&dq_list_lock); + mutex_lock(&dquot->dq_lock); + if (!clear_dquot_dirty(dquot)) + goto out_lock; /* Inactive dquot can be only if there was error during read/init * => we have better not writing it */ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); else ret = -EIO; -out_sem: - mutex_unlock(&dqopt->dqio_mutex); +out_lock: + mutex_unlock(&dquot->dq_lock); return ret; } EXPORT_SYMBOL(dquot_commit); @@ -481,7 +492,6 @@ int dquot_release(struct dquot *dquot) /* Check whether we are not racing with some other dqget() */ if (atomic_read(&dquot->dq_count) > 1) goto out_dqlock; - mutex_lock(&dqopt->dqio_mutex); if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); /* Write the info */ @@ -493,7 +503,6 @@ int dquot_release(struct dquot *dquot) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - mutex_unlock(&dqopt->dqio_mutex); out_dqlock: mutex_unlock(&dquot->dq_lock); return ret; @@ -530,22 +539,18 @@ restart: continue; /* Wait for dquot users */ if (atomic_read(&dquot->dq_count)) { - DEFINE_WAIT(wait); - dqgrab(dquot); - prepare_to_wait(&dquot->dq_wait_unused, &wait, - TASK_UNINTERRUPTIBLE); spin_unlock(&dq_list_lock); - /* Once dqput() wakes us up, we know it's time to free + /* + * Once dqput() wakes us up, we know it's time to free * the dquot. * IMPORTANT: we rely on the fact that there is always * at most one process waiting for dquot to free. * Otherwise dq_count would be > 1 and we would never * wake up. */ - if (atomic_read(&dquot->dq_count) > 1) - schedule(); - finish_wait(&dquot->dq_wait_unused, &wait); + wait_event(dquot_ref_wq, + atomic_read(&dquot->dq_count) == 1); dqput(dquot); /* At this moment dquot() need not exist (it could be * reclaimed by prune_dqcache(). Hence we must @@ -629,11 +634,9 @@ int dquot_writeback_dquots(struct super_block *sb, int type) while (!list_empty(dirty)) { dquot = list_first_entry(dirty, struct dquot, dq_dirty); - /* Dirty and inactive can be only bad dquot... */ - if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { - clear_dquot_dirty(dquot); - continue; - } + + WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); + /* Now we have active dquot from which someone is * holding reference so we can safely just increase * use count */ @@ -759,12 +762,12 @@ we_slept: /* Releasing dquot during quotaoff phase? */ if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && atomic_read(&dquot->dq_count) == 1) - wake_up(&dquot->dq_wait_unused); + wake_up(&dquot_ref_wq); spin_unlock(&dq_list_lock); return; } /* Need to release dquot? */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { + if (dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); /* Commit dquot before releasing */ ret = dquot->dq_sb->dq_op->write_dquot(dquot); @@ -776,14 +779,10 @@ we_slept: * We clear dirty bit anyway, so that we avoid * infinite loop here */ - spin_lock(&dq_list_lock); clear_dquot_dirty(dquot); - spin_unlock(&dq_list_lock); } goto we_slept; } - /* Clear flag in case dquot was inactive (something bad happened) */ - clear_dquot_dirty(dquot); if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { spin_unlock(&dq_list_lock); dquot->dq_sb->dq_op->release_dquot(dquot); @@ -818,10 +817,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); - init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_id = make_kqid_invalid(type); atomic_set(&dquot->dq_count, 1); + spin_lock_init(&dquot->dq_dqb_lock); return dquot; } @@ -1079,42 +1078,6 @@ static void drop_dquot_ref(struct super_block *sb, int type) } } -static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curinodes += number; -} - -static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_curspace += number; -} - -static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) -{ - dquot->dq_dqb.dqb_rsvspace += number; -} - -/* - * Claim reserved quota space - */ -static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) -{ - if (dquot->dq_dqb.dqb_rsvspace < number) { - WARN_ON_ONCE(1); - number = dquot->dq_dqb.dqb_rsvspace; - } - dquot->dq_dqb.dqb_curspace += number; - dquot->dq_dqb.dqb_rsvspace -= number; -} - -static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number) -{ - if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) - number = dquot->dq_dqb.dqb_curspace; - dquot->dq_dqb.dqb_rsvspace += number; - dquot->dq_dqb.dqb_curspace -= number; -} - static inline void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) { @@ -1278,21 +1241,24 @@ static int ignore_hardlimit(struct dquot *dquot) !(info->dqi_flags & DQF_ROOT_SQUASH)); } -/* needs dq_data_lock */ -static int check_idq(struct dquot *dquot, qsize_t inodes, - struct dquot_warn *warn) +static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes, + struct dquot_warn *warn) { - qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; + qsize_t newinodes; + int ret = 0; + spin_lock(&dquot->dq_dqb_lock); + newinodes = dquot->dq_dqb.dqb_curinodes + inodes; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return 0; + goto add; if (dquot->dq_dqb.dqb_ihardlimit && newinodes > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); - return -EDQUOT; + ret = -EDQUOT; + goto out; } if (dquot->dq_dqb.dqb_isoftlimit && @@ -1301,7 +1267,8 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); - return -EDQUOT; + ret = -EDQUOT; + goto out; } if (dquot->dq_dqb.dqb_isoftlimit && @@ -1311,30 +1278,40 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; } +add: + dquot->dq_dqb.dqb_curinodes = newinodes; - return 0; +out: + spin_unlock(&dquot->dq_dqb_lock); + return ret; } -/* needs dq_data_lock */ -static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, - struct dquot_warn *warn) +static int dquot_add_space(struct dquot *dquot, qsize_t space, + qsize_t rsv_space, unsigned int flags, + struct dquot_warn *warn) { qsize_t tspace; struct super_block *sb = dquot->dq_sb; + int ret = 0; + spin_lock(&dquot->dq_dqb_lock); if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) - return 0; + goto add; tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace - + space; + + space + rsv_space; + + if (flags & DQUOT_SPACE_NOFAIL) + goto add; if (dquot->dq_dqb.dqb_bhardlimit && tspace > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { - if (!prealloc) + if (flags & DQUOT_SPACE_WARN) prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); - return -EDQUOT; + ret = -EDQUOT; + goto out; } if (dquot->dq_dqb.dqb_bsoftlimit && @@ -1342,28 +1319,34 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, dquot->dq_dqb.dqb_btime && ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { - if (!prealloc) + if (flags & DQUOT_SPACE_WARN) prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); - return -EDQUOT; + ret = -EDQUOT; + goto out; } if (dquot->dq_dqb.dqb_bsoftlimit && tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { - if (!prealloc) { + if (flags & DQUOT_SPACE_WARN) { prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() + sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; - } - else + } else { /* * We don't allow preallocation to exceed softlimit so exceeding will * be always printed */ - return -EDQUOT; + ret = -EDQUOT; + goto out; + } } - - return 0; +add: + dquot->dq_dqb.dqb_rsvspace += rsv_space; + dquot->dq_dqb.dqb_curspace += space; +out: + spin_unlock(&dquot->dq_dqb_lock); + return ret; } static int info_idq_free(struct dquot *dquot, qsize_t inodes) @@ -1502,8 +1485,15 @@ static int __dquot_initialize(struct inode *inode, int type) * did a write before quota was turned on */ rsv = inode_get_rsv_space(inode); - if (unlikely(rsv)) - dquot_resv_space(dquots[cnt], rsv); + if (unlikely(rsv)) { + spin_lock(&inode->i_lock); + /* Get reservation again under proper lock */ + rsv = __inode_get_rsv_space(inode); + spin_lock(&dquots[cnt]->dq_dqb_lock); + dquots[cnt]->dq_dqb.dqb_rsvspace += rsv; + spin_unlock(&dquots[cnt]->dq_dqb_lock); + spin_unlock(&inode->i_lock); + } } } out_lock: @@ -1598,39 +1588,12 @@ static qsize_t *inode_reserved_space(struct inode * inode) return inode->i_sb->dq_op->get_reserved_space(inode); } -void inode_add_rsv_space(struct inode *inode, qsize_t number) -{ - spin_lock(&inode->i_lock); - *inode_reserved_space(inode) += number; - spin_unlock(&inode->i_lock); -} -EXPORT_SYMBOL(inode_add_rsv_space); - -void inode_claim_rsv_space(struct inode *inode, qsize_t number) +static qsize_t __inode_get_rsv_space(struct inode *inode) { - spin_lock(&inode->i_lock); - *inode_reserved_space(inode) -= number; - __inode_add_bytes(inode, number); - spin_unlock(&inode->i_lock); -} -EXPORT_SYMBOL(inode_claim_rsv_space); - -void inode_reclaim_rsv_space(struct inode *inode, qsize_t number) -{ - spin_lock(&inode->i_lock); - *inode_reserved_space(inode) += number; - __inode_sub_bytes(inode, number); - spin_unlock(&inode->i_lock); -} -EXPORT_SYMBOL(inode_reclaim_rsv_space); - -void inode_sub_rsv_space(struct inode *inode, qsize_t number) -{ - spin_lock(&inode->i_lock); - *inode_reserved_space(inode) -= number; - spin_unlock(&inode->i_lock); + if (!inode->i_sb->dq_op->get_reserved_space) + return 0; + return *inode_reserved_space(inode); } -EXPORT_SYMBOL(inode_sub_rsv_space); static qsize_t inode_get_rsv_space(struct inode *inode) { @@ -1639,28 +1602,11 @@ static qsize_t inode_get_rsv_space(struct inode *inode) if (!inode->i_sb->dq_op->get_reserved_space) return 0; spin_lock(&inode->i_lock); - ret = *inode_reserved_space(inode); + ret = __inode_get_rsv_space(inode); spin_unlock(&inode->i_lock); return ret; } -static void inode_incr_space(struct inode *inode, qsize_t number, - int reserve) -{ - if (reserve) - inode_add_rsv_space(inode, number); - else - inode_add_bytes(inode, number); -} - -static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) -{ - if (reserve) - inode_sub_rsv_space(inode, number); - else - inode_sub_bytes(inode, number); -} - /* * This functions updates i_blocks+i_bytes fields and quota information * (together with appropriate checks). @@ -1682,7 +1628,13 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) struct dquot **dquots; if (!dquot_active(inode)) { - inode_incr_space(inode, number, reserve); + if (reserve) { + spin_lock(&inode->i_lock); + *inode_reserved_space(inode) += number; + spin_unlock(&inode->i_lock); + } else { + inode_add_bytes(inode, number); + } goto out; } @@ -1691,27 +1643,41 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) continue; - ret = check_bdq(dquots[cnt], number, - !(flags & DQUOT_SPACE_WARN), &warn[cnt]); - if (ret && !(flags & DQUOT_SPACE_NOFAIL)) { - spin_unlock(&dq_data_lock); + if (flags & DQUOT_SPACE_RESERVE) { + ret = dquot_add_space(dquots[cnt], 0, number, flags, + &warn[cnt]); + } else { + ret = dquot_add_space(dquots[cnt], number, 0, flags, + &warn[cnt]); + } + if (ret) { + /* Back out changes we already did */ + for (cnt--; cnt >= 0; cnt--) { + if (!dquots[cnt]) + continue; + spin_lock(&dquots[cnt]->dq_dqb_lock); + if (flags & DQUOT_SPACE_RESERVE) { + dquots[cnt]->dq_dqb.dqb_rsvspace -= + number; + } else { + dquots[cnt]->dq_dqb.dqb_curspace -= + number; + } + spin_unlock(&dquots[cnt]->dq_dqb_lock); + } + spin_unlock(&inode->i_lock); goto out_flush_warn; } } - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) - continue; - if (reserve) - dquot_resv_space(dquots[cnt], number); - else - dquot_incr_space(dquots[cnt], number); - } - inode_incr_space(inode, number, reserve); - spin_unlock(&dq_data_lock); + if (reserve) + *inode_reserved_space(inode) += number; + else + __inode_add_bytes(inode, number); + spin_unlock(&inode->i_lock); if (reserve) goto out_flush_warn; @@ -1740,23 +1706,26 @@ int dquot_alloc_inode(struct inode *inode) dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) continue; - ret = check_idq(dquots[cnt], 1, &warn[cnt]); - if (ret) + ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]); + if (ret) { + for (cnt--; cnt >= 0; cnt--) { + if (!dquots[cnt]) + continue; + /* Back out changes we already did */ + spin_lock(&dquots[cnt]->dq_dqb_lock); + dquots[cnt]->dq_dqb.dqb_curinodes--; + spin_unlock(&dquots[cnt]->dq_dqb_lock); + } goto warn_put_all; - } - - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) - continue; - dquot_incr_inodes(dquots[cnt], 1); + } } warn_put_all: - spin_unlock(&dq_data_lock); + spin_unlock(&inode->i_lock); if (ret == 0) mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); @@ -1774,21 +1743,33 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) int cnt, index; if (!dquot_active(inode)) { - inode_claim_rsv_space(inode, number); + spin_lock(&inode->i_lock); + *inode_reserved_space(inode) -= number; + __inode_add_bytes(inode, number); + spin_unlock(&inode->i_lock); return 0; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) - dquot_claim_reserved_space(dquots[cnt], number); + if (dquots[cnt]) { + struct dquot *dquot = dquots[cnt]; + + spin_lock(&dquot->dq_dqb_lock); + if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) + number = dquot->dq_dqb.dqb_rsvspace; + dquot->dq_dqb.dqb_curspace += number; + dquot->dq_dqb.dqb_rsvspace -= number; + spin_unlock(&dquot->dq_dqb_lock); + } } /* Update inode bytes */ - inode_claim_rsv_space(inode, number); - spin_unlock(&dq_data_lock); + *inode_reserved_space(inode) -= number; + __inode_add_bytes(inode, number); + spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); return 0; @@ -1804,21 +1785,33 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) int cnt, index; if (!dquot_active(inode)) { - inode_reclaim_rsv_space(inode, number); + spin_lock(&inode->i_lock); + *inode_reserved_space(inode) += number; + __inode_sub_bytes(inode, number); + spin_unlock(&inode->i_lock); return; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) - dquot_reclaim_reserved_space(dquots[cnt], number); + if (dquots[cnt]) { + struct dquot *dquot = dquots[cnt]; + + spin_lock(&dquot->dq_dqb_lock); + if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) + number = dquot->dq_dqb.dqb_curspace; + dquot->dq_dqb.dqb_rsvspace += number; + dquot->dq_dqb.dqb_curspace -= number; + spin_unlock(&dquot->dq_dqb_lock); + } } /* Update inode bytes */ - inode_reclaim_rsv_space(inode, number); - spin_unlock(&dq_data_lock); + *inode_reserved_space(inode) += number; + __inode_sub_bytes(inode, number); + spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); return; @@ -1836,19 +1829,26 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) int reserve = flags & DQUOT_SPACE_RESERVE, index; if (!dquot_active(inode)) { - inode_decr_space(inode, number, reserve); + if (reserve) { + spin_lock(&inode->i_lock); + *inode_reserved_space(inode) -= number; + spin_unlock(&inode->i_lock); + } else { + inode_sub_bytes(inode, number); + } return; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; if (!dquots[cnt]) continue; + spin_lock(&dquots[cnt]->dq_dqb_lock); wtype = info_bdq_free(dquots[cnt], number); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn[cnt], dquots[cnt], wtype); @@ -1856,9 +1856,13 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) dquot_free_reserved_space(dquots[cnt], number); else dquot_decr_space(dquots[cnt], number); + spin_unlock(&dquots[cnt]->dq_dqb_lock); } - inode_decr_space(inode, number, reserve); - spin_unlock(&dq_data_lock); + if (reserve) + *inode_reserved_space(inode) -= number; + else + __inode_sub_bytes(inode, number); + spin_unlock(&inode->i_lock); if (reserve) goto out_unlock; @@ -1884,19 +1888,21 @@ void dquot_free_inode(struct inode *inode) dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); - spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; if (!dquots[cnt]) continue; + spin_lock(&dquots[cnt]->dq_dqb_lock); wtype = info_idq_free(dquots[cnt], 1); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn[cnt], dquots[cnt], wtype); dquot_decr_inodes(dquots[cnt], 1); + spin_unlock(&dquots[cnt]->dq_dqb_lock); } - spin_unlock(&dq_data_lock); + spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); @@ -1917,7 +1923,7 @@ EXPORT_SYMBOL(dquot_free_inode); */ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) { - qsize_t space, cur_space; + qsize_t cur_space; qsize_t rsv_space = 0; qsize_t inode_usage = 1; struct dquot *transfer_from[MAXQUOTAS] = {}; @@ -1944,14 +1950,18 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) } spin_lock(&dq_data_lock); + spin_lock(&inode->i_lock); if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ + spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); return 0; } - cur_space = inode_get_bytes(inode); - rsv_space = inode_get_rsv_space(inode); - space = cur_space + rsv_space; - /* Build the transfer_from list and check the limits */ + cur_space = __inode_get_bytes(inode); + rsv_space = __inode_get_rsv_space(inode); + /* + * Build the transfer_from list, check limits, and update usage in + * the target structures. + */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { /* * Skip changes for same uid or gid or for turned off quota-type. @@ -1963,28 +1973,33 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) continue; is_valid[cnt] = 1; transfer_from[cnt] = i_dquot(inode)[cnt]; - ret = check_idq(transfer_to[cnt], inode_usage, &warn_to[cnt]); + ret = dquot_add_inodes(transfer_to[cnt], inode_usage, + &warn_to[cnt]); if (ret) goto over_quota; - ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]); - if (ret) + ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0, + &warn_to[cnt]); + if (ret) { + dquot_decr_inodes(transfer_to[cnt], inode_usage); goto over_quota; + } } - /* - * Finally perform the needed transfer from transfer_from to transfer_to - */ + /* Decrease usage for source structures and update quota pointers */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!is_valid[cnt]) continue; /* Due to IO error we might not have transfer_from[] structure */ if (transfer_from[cnt]) { int wtype; + + spin_lock(&transfer_from[cnt]->dq_dqb_lock); wtype = info_idq_free(transfer_from[cnt], inode_usage); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn_from_inodes[cnt], transfer_from[cnt], wtype); - wtype = info_bdq_free(transfer_from[cnt], space); + wtype = info_bdq_free(transfer_from[cnt], + cur_space + rsv_space); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn_from_space[cnt], transfer_from[cnt], wtype); @@ -1992,14 +2007,11 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) dquot_decr_space(transfer_from[cnt], cur_space); dquot_free_reserved_space(transfer_from[cnt], rsv_space); + spin_unlock(&transfer_from[cnt]->dq_dqb_lock); } - - dquot_incr_inodes(transfer_to[cnt], inode_usage); - dquot_incr_space(transfer_to[cnt], cur_space); - dquot_resv_space(transfer_to[cnt], rsv_space); - i_dquot(inode)[cnt] = transfer_to[cnt]; } + spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); mark_all_dquot_dirty(transfer_from); @@ -2013,6 +2025,17 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) transfer_to[cnt] = transfer_from[cnt]; return 0; over_quota: + /* Back out changes we already did */ + for (cnt--; cnt >= 0; cnt--) { + if (!is_valid[cnt]) + continue; + spin_lock(&transfer_to[cnt]->dq_dqb_lock); + dquot_decr_inodes(transfer_to[cnt], inode_usage); + dquot_decr_space(transfer_to[cnt], cur_space); + dquot_free_reserved_space(transfer_to[cnt], rsv_space); + spin_unlock(&transfer_to[cnt]->dq_dqb_lock); + } + spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); flush_warnings(warn_to); return ret; @@ -2066,29 +2089,21 @@ EXPORT_SYMBOL(dquot_transfer); */ int dquot_commit_info(struct super_block *sb, int type) { - int ret; struct quota_info *dqopt = sb_dqopt(sb); - mutex_lock(&dqopt->dqio_mutex); - ret = dqopt->ops[type]->write_file_info(sb, type); - mutex_unlock(&dqopt->dqio_mutex); - return ret; + return dqopt->ops[type]->write_file_info(sb, type); } EXPORT_SYMBOL(dquot_commit_info); int dquot_get_next_id(struct super_block *sb, struct kqid *qid) { struct quota_info *dqopt = sb_dqopt(sb); - int err; if (!sb_has_quota_active(sb, qid->type)) return -ESRCH; if (!dqopt->ops[qid->type]->get_next_id) return -ENOSYS; - mutex_lock(&dqopt->dqio_mutex); - err = dqopt->ops[qid->type]->get_next_id(sb, qid); - mutex_unlock(&dqopt->dqio_mutex); - return err; + return dqopt->ops[qid->type]->get_next_id(sb, qid); } EXPORT_SYMBOL(dquot_get_next_id); @@ -2337,15 +2352,14 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, dqopt->info[type].dqi_format = fmt; dqopt->info[type].dqi_fmt_id = format_id; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - mutex_lock(&dqopt->dqio_mutex); error = dqopt->ops[type]->read_file_info(sb, type); - if (error < 0) { - mutex_unlock(&dqopt->dqio_mutex); + if (error < 0) goto out_file_init; - } - if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) + if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) { + spin_lock(&dq_data_lock); dqopt->info[type].dqi_flags |= DQF_SYS_FILE; - mutex_unlock(&dqopt->dqio_mutex); + spin_unlock(&dq_data_lock); + } spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(flags, type); spin_unlock(&dq_state_lock); @@ -2572,7 +2586,7 @@ static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) struct mem_dqblk *dm = &dquot->dq_dqb; memset(di, 0, sizeof(*di)); - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); di->d_spc_hardlimit = dm->dqb_bhardlimit; di->d_spc_softlimit = dm->dqb_bsoftlimit; di->d_ino_hardlimit = dm->dqb_ihardlimit; @@ -2581,7 +2595,7 @@ static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) di->d_ino_count = dm->dqb_curinodes; di->d_spc_timer = dm->dqb_btime; di->d_ino_timer = dm->dqb_itime; - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); } int dquot_get_dqblk(struct super_block *sb, struct kqid qid, @@ -2645,7 +2659,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) return -ERANGE; - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); if (di->d_fieldmask & QC_SPACE) { dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; check_blim = 1; @@ -2711,7 +2725,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) clear_bit(DQ_FAKE_B, &dquot->dq_flags); else set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); mark_dquot_dirty(dquot); return 0; diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 0738972e8d3f..bb3f59bcfcf5 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -379,7 +379,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) if (!ddquot) return -ENOMEM; - /* dq_off is guarded by dqio_mutex */ + /* dq_off is guarded by dqio_sem */ if (!dquot->dq_off) { ret = dq_insert_tree(info, dquot); if (ret < 0) { @@ -389,9 +389,9 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) return ret; } } - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); info->dqi_ops->mem2disk_dqblk(ddquot, dquot); - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, dquot->dq_off); if (ret != info->dqi_entry_size) { @@ -649,14 +649,14 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) kfree(ddquot); goto out; } - spin_lock(&dq_data_lock); + spin_lock(&dquot->dq_dqb_lock); info->dqi_ops->disk2mem_dqblk(dquot, ddquot); if (!dquot->dq_dqb.dqb_bhardlimit && !dquot->dq_dqb.dqb_bsoftlimit && !dquot->dq_dqb.dqb_ihardlimit && !dquot->dq_dqb.dqb_isoftlimit) set_bit(DQ_FAKE_B, &dquot->dq_flags); - spin_unlock(&dq_data_lock); + spin_unlock(&dquot->dq_dqb_lock); kfree(ddquot); out: dqstats_inc(DQST_READS); diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c index 8fe79beced5c..7ac5298aba70 100644 --- a/fs/quota/quota_v1.c +++ b/fs/quota/quota_v1.c @@ -56,8 +56,9 @@ static int v1_read_dqblk(struct dquot *dquot) { int type = dquot->dq_id.type; struct v1_disk_dqblk dqblk; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - if (!sb_dqopt(dquot->dq_sb)->files[type]) + if (!dqopt->files[type]) return -EINVAL; /* Set structure to 0s in case read fails/is after end of file */ @@ -160,6 +161,7 @@ static int v1_read_file_info(struct super_block *sb, int type) struct v1_disk_dqblk dqblk; int ret; + down_read(&dqopt->dqio_sem); ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0)); if (ret != sizeof(struct v1_disk_dqblk)) { @@ -176,6 +178,7 @@ static int v1_read_file_info(struct super_block *sb, int type) dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; out: + up_read(&dqopt->dqio_sem); return ret; } @@ -185,7 +188,7 @@ static int v1_write_file_info(struct super_block *sb, int type) struct v1_disk_dqblk dqblk; int ret; - dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; + down_write(&dqopt->dqio_sem); ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0)); if (ret != sizeof(struct v1_disk_dqblk)) { @@ -193,8 +196,11 @@ static int v1_write_file_info(struct super_block *sb, int type) ret = -EIO; goto out; } + spin_lock(&dq_data_lock); + dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; dqblk.dqb_itime = dqopt->info[type].dqi_igrace; dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; + spin_unlock(&dq_data_lock); ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0)); if (ret == sizeof(struct v1_disk_dqblk)) @@ -202,6 +208,7 @@ static int v1_write_file_info(struct super_block *sb, int type) else if (ret > 0) ret = -EIO; out: + up_write(&dqopt->dqio_sem); return ret; } diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 2259329616b7..c0187cda2c1e 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -65,9 +65,11 @@ static int v2_read_header(struct super_block *sb, int type, if (size != sizeof(struct v2_disk_dqheader)) { quota_error(sb, "Failed header read: expected=%zd got=%zd", sizeof(struct v2_disk_dqheader), size); - return 0; + if (size < 0) + return size; + return -EIO; } - return 1; + return 0; } /* Check whether given file is really vfsv0 quotafile */ @@ -77,7 +79,7 @@ static int v2_check_quota_file(struct super_block *sb, int type) static const uint quota_magics[] = V2_INITQMAGICS; static const uint quota_versions[] = V2_INITQVERSIONS; - if (!v2_read_header(sb, type, &dqhead)) + if (v2_read_header(sb, type, &dqhead)) return 0; if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || le32_to_cpu(dqhead.dqh_version) > quota_versions[type]) @@ -90,28 +92,39 @@ static int v2_read_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; struct v2_disk_dqheader dqhead; - struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct quota_info *dqopt = sb_dqopt(sb); + struct mem_dqinfo *info = &dqopt->info[type]; struct qtree_mem_dqinfo *qinfo; ssize_t size; unsigned int version; + int ret; - if (!v2_read_header(sb, type, &dqhead)) - return -1; + down_read(&dqopt->dqio_sem); + ret = v2_read_header(sb, type, &dqhead); + if (ret < 0) + goto out; version = le32_to_cpu(dqhead.dqh_version); if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) || - (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) - return -1; + (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) { + ret = -EINVAL; + goto out; + } size = sb->s_op->quota_read(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { quota_error(sb, "Can't read info structure"); - return -1; + if (size < 0) + ret = size; + else + ret = -EIO; + goto out; } info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); - if (!info->dqi_priv) - return -ENOMEM; - + if (!info->dqi_priv) { + ret = -ENOMEM; + goto out; + } qinfo = info->dqi_priv; if (version == 0) { /* limits are stored as unsigned 32-bit data */ @@ -145,17 +158,22 @@ static int v2_read_file_info(struct super_block *sb, int type) qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk); qinfo->dqi_ops = &v2r1_qtree_ops; } - return 0; + ret = 0; +out: + up_read(&dqopt->dqio_sem); + return ret; } /* Write information header to quota file */ static int v2_write_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; - struct mem_dqinfo *info = sb_dqinfo(sb, type); + struct quota_info *dqopt = sb_dqopt(sb); + struct mem_dqinfo *info = &dqopt->info[type]; struct qtree_mem_dqinfo *qinfo = info->dqi_priv; ssize_t size; + down_write(&dqopt->dqio_sem); spin_lock(&dq_data_lock); info->dqi_flags &= ~DQF_INFO_DIRTY; dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); @@ -168,6 +186,7 @@ static int v2_write_file_info(struct super_block *sb, int type) dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); size = sb->s_op->quota_write(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); + up_write(&dqopt->dqio_sem); if (size != sizeof(struct v2_disk_dqinfo)) { quota_error(sb, "Can't write info structure"); return -1; @@ -283,17 +302,51 @@ static int v2r1_is_id(void *dp, struct dquot *dquot) static int v2_read_dquot(struct dquot *dquot) { - return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + int ret; + + down_read(&dqopt->dqio_sem); + ret = qtree_read_dquot( + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, + dquot); + up_read(&dqopt->dqio_sem); + return ret; } static int v2_write_dquot(struct dquot *dquot) { - return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + int ret; + bool alloc = false; + + /* + * If space for dquot is already allocated, we don't need any + * protection as we'll only overwrite the place of dquot. We are + * still protected by concurrent writes of the same dquot by + * dquot->dq_lock. + */ + if (!dquot->dq_off) { + alloc = true; + down_write(&dqopt->dqio_sem); + } + ret = qtree_write_dquot( + sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, + dquot); + if (alloc) + up_write(&dqopt->dqio_sem); + return ret; } static int v2_release_dquot(struct dquot *dquot) { - return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + int ret; + + down_write(&dqopt->dqio_sem); + ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); + up_write(&dqopt->dqio_sem); + + return ret; } static int v2_free_file_info(struct super_block *sb, int type) @@ -304,7 +357,13 @@ static int v2_free_file_info(struct super_block *sb, int type) static int v2_get_next_id(struct super_block *sb, struct kqid *qid) { - return qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid); + struct quota_info *dqopt = sb_dqopt(sb); + int ret; + + down_read(&dqopt->dqio_sem); + ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid); + up_read(&dqopt->dqio_sem); + return ret; } static const struct quota_format_ops v2_format_ops = { diff --git a/fs/stat.c b/fs/stat.c index c35610845ab1..8a6aa8caf891 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -710,7 +710,7 @@ loff_t inode_get_bytes(struct inode *inode) loff_t ret; spin_lock(&inode->i_lock); - ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; + ret = __inode_get_bytes(inode); spin_unlock(&inode->i_lock); return ret; } diff --git a/fs/super.c b/fs/super.c index 6bc3352adcf3..221cfa1f4e92 100644 --- a/fs/super.c +++ b/fs/super.c @@ -242,7 +242,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, atomic_set(&s->s_active, 1); mutex_init(&s->s_vfs_rename_mutex); lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); - mutex_init(&s->s_dquot.dqio_mutex); + init_rwsem(&s->s_dquot.dqio_sem); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; |