summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl22
-rw-r--r--Documentation/mutex-design.txt135
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c12
-rw-r--r--drivers/block/loop.c31
-rw-r--r--drivers/block/sx8.c12
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/watchdog/cpu5wdt.c9
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide.c8
-rw-r--r--drivers/isdn/capi/capifs.c6
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/usb/core/inode.c28
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--fs/affs/inode.c4
-rw-r--r--fs/autofs/root.c4
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/inode.c8
-rw-r--r--fs/coda/dir.c4
-rw-r--r--fs/coda/file.c8
-rw-r--r--fs/configfs/dir.c54
-rw-r--r--fs/configfs/file.c4
-rw-r--r--fs/configfs/inode.c6
-rw-r--r--fs/debugfs/inode.c8
-rw-r--r--fs/devfs/base.c22
-rw-r--r--fs/devpts/inode.c8
-rw-r--r--fs/direct-io.c30
-rw-r--r--fs/dquot.c16
-rw-r--r--fs/exportfs/expfs.c12
-rw-r--r--fs/ext2/acl.c10
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/super.c4
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/acl.c10
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/ext3/xattr.c2
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/fat/file.c4
-rw-r--r--fs/fifo.c6
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfsplus/bitmap.c8
-rw-r--r--fs/hfsplus/inode.c4
-rw-r--r--fs/hpfs/dir.c6
-rw-r--r--fs/hppfs/hppfs_kern.c6
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c2
-rw-r--r--fs/jffs/inode-v23.c2
-rw-r--r--fs/jfs/jfs_incore.h4
-rw-r--r--fs/libfs.c8
-rw-r--r--fs/namei.c82
-rw-r--r--fs/namespace.c12
-rw-r--r--fs/nfs/dir.c10
-rw-r--r--fs/nfsd/nfs4recover.c20
-rw-r--r--fs/nfsd/vfs.c12
-rw-r--r--fs/ntfs/attrib.c4
-rw-r--r--fs/ntfs/dir.c8
-rw-r--r--fs/ntfs/file.c18
-rw-r--r--fs/ntfs/index.c6
-rw-r--r--fs/ntfs/inode.c8
-rw-r--r--fs/ntfs/namei.c6
-rw-r--r--fs/ntfs/quota.c6
-rw-r--r--fs/ntfs/super.c16
-rw-r--r--fs/ocfs2/alloc.c24
-rw-r--r--fs/ocfs2/cluster/nodemanager.c2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/inode.c12
-rw-r--r--fs/ocfs2/journal.c14
-rw-r--r--fs/ocfs2/localalloc.c6
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/open.c24
-rw-r--r--fs/pipe.c44
-rw-r--r--fs/quota.c6
-rw-r--r--fs/read_write.c4
-rw-r--r--fs/readdir.c4
-rw-r--r--fs/reiserfs/file.c10
-rw-r--r--fs/reiserfs/inode.c14
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/tail_conversion.c2
-rw-r--r--fs/reiserfs/xattr.c34
-rw-r--r--fs/reiserfs/xattr_acl.c6
-rw-r--r--fs/relayfs/inode.c12
-rw-r--r--fs/super.c2
-rw-r--r--fs/sysfs/dir.c31
-rw-r--r--fs/sysfs/file.c17
-rw-r--r--fs/sysfs/inode.c8
-rw-r--r--fs/sysfs/symlink.c5
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/xattr.c8
-rw-r--r--fs/xfs/linux-2.6/mutex.h10
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c18
-rw-r--r--fs/xfs/quota/xfs_dquot.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c10
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c8
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h2
-rw-r--r--fs/xfs/support/uuid.c6
-rw-r--r--fs/xfs/xfs_dmapi.h14
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--include/asm-alpha/atomic.h1
-rw-r--r--include/asm-alpha/mutex.h9
-rw-r--r--include/asm-arm/atomic.h2
-rw-r--r--include/asm-arm/mutex.h128
-rw-r--r--include/asm-arm26/atomic.h2
-rw-r--r--include/asm-cris/atomic.h2
-rw-r--r--include/asm-cris/mutex.h9
-rw-r--r--include/asm-frv/atomic.h1
-rw-r--r--include/asm-frv/mutex.h9
-rw-r--r--include/asm-generic/mutex-dec.h110
-rw-r--r--include/asm-generic/mutex-null.h24
-rw-r--r--include/asm-generic/mutex-xchg.h117
-rw-r--r--include/asm-h8300/atomic.h2
-rw-r--r--include/asm-h8300/mutex.h9
-rw-r--r--include/asm-i386/atomic.h1
-rw-r--r--include/asm-i386/mutex.h124
-rw-r--r--include/asm-ia64/atomic.h1
-rw-r--r--include/asm-ia64/mutex.h9
-rw-r--r--include/asm-m32r/atomic.h1
-rw-r--r--include/asm-m32r/mutex.h9
-rw-r--r--include/asm-m68k/atomic.h1
-rw-r--r--include/asm-m68k/mutex.h9
-rw-r--r--include/asm-m68knommu/atomic.h1
-rw-r--r--include/asm-m68knommu/mutex.h9
-rw-r--r--include/asm-mips/atomic.h1
-rw-r--r--include/asm-mips/mutex.h9
-rw-r--r--include/asm-parisc/atomic.h1
-rw-r--r--include/asm-parisc/mutex.h9
-rw-r--r--include/asm-powerpc/atomic.h1
-rw-r--r--include/asm-powerpc/mutex.h9
-rw-r--r--include/asm-s390/atomic.h2
-rw-r--r--include/asm-s390/mutex.h9
-rw-r--r--include/asm-sh/atomic.h2
-rw-r--r--include/asm-sh/mutex.h9
-rw-r--r--include/asm-sh64/atomic.h2
-rw-r--r--include/asm-sh64/mutex.h9
-rw-r--r--include/asm-sparc/atomic.h1
-rw-r--r--include/asm-sparc/mutex.h9
-rw-r--r--include/asm-sparc64/atomic.h1
-rw-r--r--include/asm-sparc64/mutex.h9
-rw-r--r--include/asm-um/mutex.h9
-rw-r--r--include/asm-v850/atomic.h2
-rw-r--r--include/asm-v850/mutex.h9
-rw-r--r--include/asm-x86_64/atomic.h1
-rw-r--r--include/asm-x86_64/mutex.h113
-rw-r--r--include/asm-xtensa/atomic.h1
-rw-r--r--include/asm-xtensa/mutex.h9
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/ide.h5
-rw-r--r--include/linux/jffs2_fs_i.h4
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/loop.h4
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mutex-debug.h21
-rw-r--r--include/linux/mutex.h119
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/sched.h5
-rw-r--r--ipc/mqueue.c8
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/mutex-debug.c464
-rw-r--r--kernel/mutex-debug.h134
-rw-r--r--kernel/mutex.c325
-rw-r--r--kernel/mutex.h35
-rw-r--r--kernel/sched.c1
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--mm/filemap.c30
-rw-r--r--mm/filemap_xip.c6
-rw-r--r--mm/memory.c4
-rw-r--r--mm/msync.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/slab.c1
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/truncate.c2
-rw-r--r--net/sunrpc/rpc_pipe.c58
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--security/inode.c8
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/seq/seq_memory.c4
198 files changed, 2754 insertions, 649 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 90dc2de8e0af..158ffe9bfade 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -222,7 +222,7 @@
<title>Two Main Types of Kernel Locks: Spinlocks and Semaphores</title>
<para>
- There are two main types of kernel locks. The fundamental type
+ There are three main types of kernel locks. The fundamental type
is the spinlock
(<filename class="headerfile">include/asm/spinlock.h</filename>),
which is a very simple single-holder lock: if you can't get the
@@ -230,16 +230,22 @@
very small and fast, and can be used anywhere.
</para>
<para>
- The second type is a semaphore
+ The second type is a mutex
+ (<filename class="headerfile">include/linux/mutex.h</filename>): it
+ is like a spinlock, but you may block holding a mutex.
+ If you can't lock a mutex, your task will suspend itself, and be woken
+ up when the mutex is released. This means the CPU can do something
+ else while you are waiting. There are many cases when you simply
+ can't sleep (see <xref linkend="sleeping-things"/>), and so have to
+ use a spinlock instead.
+ </para>
+ <para>
+ The third type is a semaphore
(<filename class="headerfile">include/asm/semaphore.h</filename>): it
can have more than one holder at any time (the number decided at
initialization time), although it is most commonly used as a
- single-holder lock (a mutex). If you can't get a semaphore,
- your task will put itself on the queue, and be woken up when the
- semaphore is released. This means the CPU will do something
- else while you are waiting, but there are many cases when you
- simply can't sleep (see <xref linkend="sleeping-things"/>), and so
- have to use a spinlock instead.
+ single-holder lock (a mutex). If you can't get a semaphore, your
+ task will be suspended and later on woken up - just like for mutexes.
</para>
<para>
Neither type of lock is recursive: see
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
new file mode 100644
index 000000000000..cbf79881a41c
--- /dev/null
+++ b/Documentation/mutex-design.txt
@@ -0,0 +1,135 @@
+Generic Mutex Subsystem
+
+started by Ingo Molnar <mingo@redhat.com>
+
+ "Why on earth do we need a new mutex subsystem, and what's wrong
+ with semaphores?"
+
+firstly, there's nothing wrong with semaphores. But if the simpler
+mutex semantics are sufficient for your code, then there are a couple
+of advantages of mutexes:
+
+ - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
+ A smaller structure size means less RAM footprint, and better
+ CPU-cache utilization.
+
+ - tighter code. On x86 i get the following .text sizes when
+ switching all mutex-alike semaphores in the kernel to the mutex
+ subsystem:
+
+ text data bss dec hex filename
+ 3280380 868188 396860 4545428 455b94 vmlinux-semaphore
+ 3255329 865296 396732 4517357 44eded vmlinux-mutex
+
+ that's 25051 bytes of code saved, or a 0.76% win - off the hottest
+ codepaths of the kernel. (The .data savings are 2892 bytes, or 0.33%)
+ Smaller code means better icache footprint, which is one of the
+ major optimization goals in the Linux kernel currently.
+
+ - the mutex subsystem is slightly faster and has better scalability for
+ contended workloads. On an 8-way x86 system, running a mutex-based
+ kernel and testing creat+unlink+close (of separate, per-task files)
+ in /tmp with 16 parallel tasks, the average number of ops/sec is:
+
+ Semaphores: Mutexes:
+
+ $ ./test-mutex V 16 10 $ ./test-mutex V 16 10
+ 8 CPUs, running 16 tasks. 8 CPUs, running 16 tasks.
+ checking VFS performance. checking VFS performance.
+ avg loops/sec: 34713 avg loops/sec: 84153
+ CPU utilization: 63% CPU utilization: 22%
+
+ i.e. in this workload, the mutex based kernel was 2.4 times faster
+ than the semaphore based kernel, _and_ it also had 2.8 times less CPU
+ utilization. (In terms of 'ops per CPU cycle', the semaphore kernel
+ performed 551 ops/sec per 1% of CPU time used, while the mutex kernel
+ performed 3825 ops/sec per 1% of CPU time used - it was 6.9 times
+ more efficient.)
+
+ the scalability difference is visible even on a 2-way P4 HT box:
+
+ Semaphores: Mutexes:
+
+ $ ./test-mutex V 16 10 $ ./test-mutex V 16 10
+ 4 CPUs, running 16 tasks. 8 CPUs, running 16 tasks.
+ checking VFS performance. checking VFS performance.
+ avg loops/sec: 127659 avg loops/sec: 181082
+ CPU utilization: 100% CPU utilization: 34%
+
+ (the straight performance advantage of mutexes is 41%, the per-cycle
+ efficiency of mutexes is 4.1 times better.)
+
+ - there are no fastpath tradeoffs, the mutex fastpath is just as tight
+ as the semaphore fastpath. On x86, the locking fastpath is 2
+ instructions:
+
+ c0377ccb <mutex_lock>:
+ c0377ccb: f0 ff 08 lock decl (%eax)
+ c0377cce: 78 0e js c0377cde <.text.lock.mutex>
+ c0377cd0: c3 ret
+
+ the unlocking fastpath is equally tight:
+
+ c0377cd1 <mutex_unlock>:
+ c0377cd1: f0 ff 00 lock incl (%eax)
+ c0377cd4: 7e 0f jle c0377ce5 <.text.lock.mutex+0x7>
+ c0377cd6: c3 ret
+
+ - 'struct mutex' semantics are well-defined and are enforced if
+ CONFIG_DEBUG_MUTEXES is turned on. Semaphores on the other hand have
+ virtually no debugging code or instrumentation. The mutex subsystem
+ checks and enforces the following rules:
+
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in irq contexts
+
+ furthermore, there are also convenience features in the debugging
+ code:
+
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+
+Disadvantages
+-------------
+
+The stricter mutex API means you cannot use mutexes the same way you
+can use semaphores: e.g. they cannot be used from an interrupt context,
+nor can they be unlocked from a different context that which acquired
+it. [ I'm not aware of any other (e.g. performance) disadvantages from
+using mutexes at the moment, please let me know if you find any. ]
+
+Implementation of mutexes
+-------------------------
+
+'struct mutex' is the new mutex type, defined in include/linux/mutex.h
+and implemented in kernel/mutex.c. It is a counter-based mutex with a
+spinlock and a wait-list. The counter has 3 states: 1 for "unlocked",
+0 for "locked" and negative numbers (usually -1) for "locked, potential
+waiters queued".
+
+the APIs of 'struct mutex' have been streamlined:
+
+ DEFINE_MUTEX(name);
+
+ mutex_init(mutex);
+
+ void mutex_lock(struct mutex *lock);
+ int mutex_lock_interruptible(struct mutex *lock);
+ int mutex_trylock(struct mutex *lock);
+ void mutex_unlock(struct mutex *lock);
+ int mutex_is_locked(struct mutex *lock);
+
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index c30a16df6440..e8a53552b13d 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -222,6 +222,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
+ if (!enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page+numpages));
+
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
*/
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1f3507c75e90..d2ba358c6e38 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -137,7 +137,7 @@ spufs_delete_inode(struct inode *inode)
static void spufs_prune_dir(struct dentry *dir)
{
struct dentry *dentry, *tmp;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
@@ -154,7 +154,7 @@ static void spufs_prune_dir(struct dentry *dir)
}
}
shrink_dcache_parent(dir);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
}
static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
@@ -162,15 +162,15 @@ static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
struct spu_context *ctx;
/* remove all entries */
- down(&root->i_sem);
+ mutex_lock(&root->i_mutex);
spufs_prune_dir(dir_dentry);
- up(&root->i_sem);
+ mutex_unlock(&root->i_mutex);
/* We have to give up the mm_struct */
ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
spu_forget(ctx);
- /* XXX Do we need to hold i_sem here ? */
+ /* XXX Do we need to hold i_mutex here ? */
return simple_rmdir(root, dir_dentry);
}
@@ -330,7 +330,7 @@ long spufs_create_thread(struct nameidata *nd,
out_dput:
dput(dentry);
out_dir:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
out:
return ret;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a452b13620a2..864729046e22 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -215,7 +215,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
unsigned offset, bv_offs;
int len, ret;
- down(&mapping->host->i_sem);
+ mutex_lock(&mapping->host->i_mutex);
index = pos >> PAGE_CACHE_SHIFT;
offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
bv_offs = bvec->bv_offset;
@@ -278,7 +278,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
}
ret = 0;
out:
- up(&mapping->host->i_sem);
+ mutex_unlock(&mapping->host->i_mutex);
return ret;
unlock:
unlock_page(page);
@@ -527,12 +527,12 @@ static int loop_make_request(request_queue_t *q, struct bio *old_bio)
lo->lo_pending++;
loop_add_bio(lo, old_bio);
spin_unlock_irq(&lo->lo_lock);
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
return 0;
out:
if (lo->lo_pending == 0)
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
return 0;
@@ -593,23 +593,20 @@ static int loop_thread(void *data)
lo->lo_pending = 1;
/*
- * up sem, we are running
+ * complete it, we are running
*/
- up(&lo->lo_sem);
+ complete(&lo->lo_done);
for (;;) {
int pending;
- /*
- * interruptible just to not contribute to load avg
- */
- if (down_interruptible(&lo->lo_bh_mutex))
+ if (wait_for_completion_interruptible(&lo->lo_bh_done))
continue;
spin_lock_irq(&lo->lo_lock);
/*
- * could be upped because of tear-down, not pending work
+ * could be completed because of tear-down, not pending work
*/
if (unlikely(!lo->lo_pending)) {
spin_unlock_irq(&lo->lo_lock);
@@ -632,7 +629,7 @@ static int loop_thread(void *data)
break;
}
- up(&lo->lo_sem);
+ complete(&lo->lo_done);
return 0;
}
@@ -843,7 +840,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
set_blocksize(bdev, lo_blocksize);
kernel_thread(loop_thread, lo, CLONE_KERNEL);
- down(&lo->lo_sem);
+ wait_for_completion(&lo->lo_done);
return 0;
out_putf:
@@ -909,10 +906,10 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
lo->lo_state = Lo_rundown;
lo->lo_pending--;
if (!lo->lo_pending)
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
spin_unlock_irq(&lo->lo_lock);
- down(&lo->lo_sem);
+ wait_for_completion(&lo->lo_done);
lo->lo_backing_file = NULL;
@@ -1289,8 +1286,8 @@ static int __init loop_init(void)
if (!lo->lo_queue)
goto out_mem4;
init_MUTEX(&lo->lo_ctl_mutex);
- init_MUTEX_LOCKED(&lo->lo_sem);
- init_MUTEX_LOCKED(&lo->lo_bh_mutex);
+ init_completion(&lo->lo_done);
+ init_completion(&lo->lo_bh_done);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index c0cdc182a8b0..4bdf95716e2b 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -27,8 +27,8 @@
#include <linux/time.h>
#include <linux/hdreg.h>
#include <linux/dma-mapping.h>
+#include <linux/completion.h>
#include <asm/io.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
#if 0
@@ -303,7 +303,7 @@ struct carm_host {
struct work_struct fsm_task;
- struct semaphore probe_sem;
+ struct completion probe_comp;
};
struct carm_response {
@@ -1346,7 +1346,7 @@ static void carm_fsm_task (void *_data)
}
case HST_PROBE_FINISHED:
- up(&host->probe_sem);
+ complete(&host->probe_comp);
break;
case HST_ERROR:
@@ -1622,7 +1622,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
INIT_WORK(&host->fsm_task, carm_fsm_task, host);
- init_MUTEX_LOCKED(&host->probe_sem);
+ init_completion(&host->probe_comp);
for (i = 0; i < ARRAY_SIZE(host->req); i++)
host->req[i].tag = i;
@@ -1691,8 +1691,8 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_free_irq;
- DPRINTK("waiting for probe_sem\n");
- down(&host->probe_sem);
+ DPRINTK("waiting for probe_comp\n");
+ wait_for_completion(&host->probe_comp);
printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5b2d18035073..704c3c07f0ab 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -741,7 +741,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
{
loff_t ret;
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
@@ -756,7 +756,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
default:
ret = -EINVAL;
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 145275ebdd7e..5765f672e853 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -153,6 +153,21 @@ static struct sysrq_key_op sysrq_mountro_op = {
/* END SYNC SYSRQ HANDLERS BLOCK */
+#ifdef CONFIG_DEBUG_MUTEXES
+
+static void
+sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, struct tty_struct *tty)
+{
+ mutex_debug_show_all_locks();
+}
+
+static struct sysrq_key_op sysrq_showlocks_op = {
+ .handler = sysrq_handle_showlocks,
+ .help_msg = "show-all-locks(D)",
+ .action_msg = "Show Locks Held",
+};
+
+#endif
/* SHOW SYSRQ HANDLERS BLOCK */
@@ -294,7 +309,11 @@ static struct sysrq_key_op *sysrq_key_table[SYSRQ_KEY_TABLE_LENGTH] = {
#else
/* c */ NULL,
#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+/* d */ &sysrq_showlocks_op,
+#else
/* d */ NULL,
+#endif
/* e */ &sysrq_term_op,
/* f */ &sysrq_moom_op,
/* g */ NULL,
diff --git a/drivers/char/watchdog/cpu5wdt.c b/drivers/char/watchdog/cpu5wdt.c
index e75045fe2641..3e8410b5a65e 100644
--- a/drivers/char/watchdog/cpu5wdt.c
+++ b/drivers/char/watchdog/cpu5wdt.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
+#include <linux/completion.h>
#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -57,7 +58,7 @@ static int ticks = 10000;
/* some device data */
static struct {
- struct semaphore stop;
+ struct completion stop;
volatile int running;
struct timer_list timer;
volatile int queue;
@@ -85,7 +86,7 @@ static void cpu5wdt_trigger(unsigned long unused)
}
else {
/* ticks doesn't matter anyway */
- up(&cpu5wdt_device.stop);
+ complete(&cpu5wdt_device.stop);
}
}
@@ -239,7 +240,7 @@ static int __devinit cpu5wdt_init(void)
if ( !val )
printk(KERN_INFO PFX "sorry, was my fault\n");
- init_MUTEX_LOCKED(&cpu5wdt_device.stop);
+ init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
clear_bit(0, &cpu5wdt_device.inuse);
@@ -269,7 +270,7 @@ static void __devexit cpu5wdt_exit(void)
{
if ( cpu5wdt_device.queue ) {
cpu5wdt_device.queue = 0;
- down(&cpu5wdt_device.stop);
+ wait_for_completion(&cpu5wdt_device.stop);
}
misc_deregister(&cpu5wdt_misc);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 1ddaa71a8f45..7cb2d86601db 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -655,7 +655,7 @@ static void hwif_release_dev (struct device *dev)
{
ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
- up(&hwif->gendev_rel_sem);
+ complete(&hwif->gendev_rel_comp);
}
static void hwif_register (ide_hwif_t *hwif)
@@ -1327,7 +1327,7 @@ static void drive_release_dev (struct device *dev)
drive->queue = NULL;
spin_unlock_irq(&ide_lock);
- up(&drive->gendev_rel_sem);
+ complete(&drive->gendev_rel_comp);
}
/*
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index b069b13b75a7..ec5a4cb173b0 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -222,7 +222,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
hwif->mwdma_mask = 0x80; /* disable all mwdma */
hwif->swdma_mask = 0x80; /* disable all swdma */
- sema_init(&hwif->gendev_rel_sem, 0);
+ init_completion(&hwif->gendev_rel_comp);
default_hwif_iops(hwif);
default_hwif_transport(hwif);
@@ -245,7 +245,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
drive->is_flash = 0;
drive->vdma = 0;
INIT_LIST_HEAD(&drive->list);
- sema_init(&drive->gendev_rel_sem, 0);
+ init_completion(&drive->gendev_rel_comp);
}
}
@@ -602,7 +602,7 @@ void ide_unregister(unsigned int index)
}
spin_unlock_irq(&ide_lock);
device_unregister(&drive->gendev);
- down(&drive->gendev_rel_sem);
+ wait_for_completion(&drive->gendev_rel_comp);
spin_lock_irq(&ide_lock);
}
hwif->present = 0;
@@ -662,7 +662,7 @@ void ide_unregister(unsigned int index)
/* More messed up locking ... */
spin_unlock_irq(&ide_lock);
device_unregister(&hwif->gendev);
- down(&hwif->gendev_rel_sem);
+ wait_for_completion(&hwif->gendev_rel_comp);
/*
* Remove us from the kernel's knowledge
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 207cae366256..0a37aded4b54 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -138,7 +138,7 @@ static struct dentry *get_node(int num)
{
char s[10];
struct dentry *root = capifs_root;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
return lookup_one_len(s, root, sprintf(s, "%d", num));
}
@@ -159,7 +159,7 @@ void capifs_new_ncci(unsigned int number, dev_t device)
dentry = get_node(number);
if (!IS_ERR(dentry) && !dentry->d_inode)
d_instantiate(dentry, inode);
- up(&capifs_root->d_inode->i_sem);
+ mutex_unlock(&capifs_root->d_inode->i_mutex);
}
void capifs_free_ncci(unsigned int number)
@@ -175,7 +175,7 @@ void capifs_free_ncci(unsigned int number)
}
dput(dentry);
}
- up(&capifs_root->d_inode->i_sem);
+ mutex_unlock(&capifs_root->d_inode->i_mutex);
}
static int __init capifs_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0e481512f918..5c210b0a4cb0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -837,9 +837,9 @@ static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- down(&md->suspended_bdev->bd_inode->i_sem);
+ mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- up(&md->suspended_bdev->bd_inode->i_sem);
+ mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
}
static int __bind(struct mapped_device *md, struct dm_table *t)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e423a16ba3c9..0302723fa21f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3460,9 +3460,9 @@ static int update_size(mddev_t *mddev, unsigned long size)
bdev = bdget_disk(mddev->gendisk, 0);
if (bdev) {
- down(&bdev->bd_inode->i_sem);
+ mutex_lock(&bdev->bd_inode->i_mutex);
i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
}
@@ -3486,9 +3486,9 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
bdev = bdget_disk(mddev->gendisk, 0);
if (bdev) {
- down(&bdev->bd_inode->i_sem);
+ mutex_lock(&bdev->bd_inode->i_mutex);
i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9eb465727fce..9cb6dd0834be 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -25,7 +25,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
loff_t new = -1;
struct inode *inode = file->f_dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
switch (whence) {
case 0:
new = off;
@@ -41,7 +41,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
new = -EINVAL;
else
file->f_pos = new;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return new;
}
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 4ddc453023a2..3cf945cc5b9a 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -184,13 +184,13 @@ static void update_bus(struct dentry *bus)
bus->d_inode->i_gid = busgid;
bus->d_inode->i_mode = S_IFDIR | busmode;
- down(&bus->d_inode->i_sem);
+ mutex_lock(&bus->d_inode->i_mutex);
list_for_each_entry(dev, &bus->d_subdirs, d_u.d_child)
if (dev->d_inode)
update_dev(dev);
- up(&bus->d_inode->i_sem);
+ mutex_unlock(&bus->d_inode->i_mutex);
}
static void update_sb(struct super_block *sb)
@@ -201,7 +201,7 @@ static void update_sb(struct super_block *sb)
if (!root)
return;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) {
if (bus->d_inode) {
@@ -219,7 +219,7 @@ static void update_sb(struct super_block *sb)
}
}
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
}
static int remount(struct super_block *sb, int *flags, char *data)
@@ -333,10 +333,10 @@ static int usbfs_empty (struct dentry *dentry)
static int usbfs_unlink (struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
dentry->d_inode->i_nlink--;
dput(dentry);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
d_delete(dentry);
return 0;
}
@@ -346,7 +346,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
int error = -ENOTEMPTY;
struct inode * inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
dentry_unhash(dentry);
if (usbfs_empty(dentry)) {
dentry->d_inode->i_nlink -= 2;
@@ -355,7 +355,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
dir->i_nlink--;
error = 0;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (!error)
d_delete(dentry);
dput(dentry);
@@ -380,7 +380,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
{
loff_t retval = -EINVAL;
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch(orig) {
case 0:
if (offset > 0) {
@@ -397,7 +397,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
default:
break;
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return retval;
}
@@ -480,7 +480,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
}
*dentry = NULL;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -489,7 +489,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
error = usbfs_create (parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -528,7 +528,7 @@ static void fs_remove_file (struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (usbfs_positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -538,7 +538,7 @@ static void fs_remove_file (struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 0cea9782d7d4..de59c58896d6 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1891,7 +1891,7 @@ static int fsync_sub(struct lun *curlun)
return -EINVAL;
inode = filp->f_dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
current->flags |= PF_SYNCWRITE;
rc = filemap_fdatawrite(inode->i_mapping);
err = filp->f_op->fsync(filp, filp->f_dentry, 1);
@@ -1901,7 +1901,7 @@ static int fsync_sub(struct lun *curlun)
if (!rc)
rc = err;
current->flags &= ~PF_SYNCWRITE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
VLDBG(curlun, "fdatasync -> %d\n", rc);
return rc;
}
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 5c40980a5bd9..c6c279de832e 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1562,10 +1562,10 @@ restart:
spin_unlock_irq (&dev->lock);
/* break link to dcache */
- down (&parent->i_sem);
+ mutex_lock (&parent->i_mutex);
d_delete (dentry);
dput (dentry);
- up (&parent->i_sem);
+ mutex_unlock (&parent->i_mutex);
/* fds may still be open */
goto restart;
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 9ebe881c6786..44d439cb69f4 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -244,10 +244,10 @@ affs_put_inode(struct inode *inode)
pr_debug("AFFS: put_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
affs_free_prealloc(inode);
if (atomic_read(&inode->i_count) == 1) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (inode->i_size != AFFS_I(inode)->mmu_private)
affs_truncate(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index a1ab1c0ed215..808134a5a2fa 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -229,9 +229,9 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr
dentry->d_flags |= DCACHE_AUTOFS_PENDING;
d_add(dentry, NULL);
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
autofs_revalidate(dentry, nd);
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* If we are still pending, check if we had to handle
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2241405ffc41..541b19e6fec9 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -489,9 +489,9 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
d_add(dentry, NULL);
if (dentry->d_op && dentry->d_op->d_revalidate) {
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
(dentry->d_op->d_revalidate)(dentry, nd);
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
}
/*
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 2568eb41cb3a..9ccc7d8275b8 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -588,11 +588,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
case 2: set_bit(Enabled, &e->flags);
break;
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
kill_node(e);
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
break;
default: return res;
@@ -622,7 +622,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
return PTR_ERR(e);
root = dget(sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
@@ -658,7 +658,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
out2:
dput(dentry);
out:
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
if (err) {
@@ -703,12 +703,12 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
case 1: enabled = 0; break;
case 2: enabled = 1; break;
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
while (!list_empty(&entries))
kill_node(list_entry(entries.next, Node, list));
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
default: return res;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e0df94c37b7e..6e50346fb1ee 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -202,7 +202,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
loff_t size;
loff_t retval;
- down(&bd_inode->i_sem);
+ mutex_lock(&bd_inode->i_mutex);
size = i_size_read(bd_inode);
switch (origin) {
@@ -219,7 +219,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
- up(&bd_inode->i_sem);
+ mutex_unlock(&bd_inode->i_mutex);
return retval;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 55f0975a9b15..6466bc8a3dc7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -352,11 +352,11 @@ static long do_fsync(unsigned int fd, int datasync)
* We need to protect against concurrent writers,
* which could cause livelocks in fsync_buffers_list
*/
- down(&mapping->host->i_sem);
+ mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
- up(&mapping->host->i_sem);
+ mutex_unlock(&mapping->host->i_mutex);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
@@ -2338,7 +2338,7 @@ int generic_commit_write(struct file *file, struct page *page,
__block_commit_write(inode,page,from,to);
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_sem.
+ * cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2a13a2bac8f1..e10213b7541e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -860,9 +860,9 @@ static int cifs_oplock_thread(void * dummyarg)
DeleteOplockQEntry(oplock_item);
/* can not grab inode sem here since it would
deadlock when oplock received on delete
- since vfs_unlink holds the i_sem across
+ since vfs_unlink holds the i_mutex across
the call */
- /* down(&inode->i_sem);*/
+ /* mutex_lock(&inode->i_mutex);*/
if (S_ISREG(inode->i_mode)) {
rc = filemap_fdatawrite(inode->i_mapping);
if(CIFS_I(inode)->clientCanCacheRead == 0) {
@@ -871,7 +871,7 @@ static int cifs_oplock_thread(void * dummyarg)
}
} else
rc = 0;
- /* up(&inode->i_sem);*/
+ /* mutex_unlock(&inode->i_mutex);*/
if (rc)
CIFS_I(inode)->write_behind_rc = rc;
cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9558f51bca55..3ebce9430f4a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1040,9 +1040,9 @@ int cifs_revalidate(struct dentry *direntry)
}
/* can not grab this sem since kernel filesys locking documentation
- indicates i_sem may be taken by the kernel on lookup and rename
- which could deadlock if we grab the i_sem here as well */
-/* down(&direntry->d_inode->i_sem);*/
+ indicates i_mutex may be taken by the kernel on lookup and rename
+ which could deadlock if we grab the i_mutex here as well */
+/* mutex_lock(&direntry->d_inode->i_mutex);*/
/* need to write out dirty pages here */
if (direntry->d_inode->i_mapping) {
/* do we need to lock inode until after invalidate completes
@@ -1066,7 +1066,7 @@ int cifs_revalidate(struct dentry *direntry)
}
}
}
-/* up(&direntry->d_inode->i_sem); */
+/* mutex_unlock(&direntry->d_inode->i_mutex); */
kfree(full_path);
FreeXid(xid);
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 2391766e9c7c..8f1a517f8b4e 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -453,7 +453,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
coda_vfs_stat.readdir++;
host_inode = host_file->f_dentry->d_inode;
- down(&host_inode->i_sem);
+ mutex_lock(&host_inode->i_mutex);
host_file->f_pos = coda_file->f_pos;
if (!host_file->f_op->readdir) {
@@ -475,7 +475,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
}
out:
coda_file->f_pos = host_file->f_pos;
- up(&host_inode->i_sem);
+ mutex_unlock(&host_inode->i_mutex);
return ret;
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index e6bc022568f3..30b4630bd735 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -77,14 +77,14 @@ coda_file_write(struct file *coda_file, const char __user *buf, size_t count, lo
return -EINVAL;
host_inode = host_file->f_dentry->d_inode;
- down(&coda_inode->i_sem);
+ mutex_lock(&coda_inode->i_mutex);
ret = host_file->f_op->write(host_file, buf, count, ppos);
coda_inode->i_size = host_inode->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
- up(&coda_inode->i_sem);
+ mutex_unlock(&coda_inode->i_mutex);
return ret;
}
@@ -272,9 +272,9 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
if (host_file->f_op && host_file->f_op->fsync) {
host_dentry = host_file->f_dentry;
host_inode = host_dentry->d_inode;
- down(&host_inode->i_sem);
+ mutex_lock(&host_inode->i_mutex);
err = host_file->f_op->fsync(host_file, host_dentry, datasync);
- up(&host_inode->i_sem);
+ mutex_unlock(&host_inode->i_mutex);
}
if ( !err && !datasync ) {
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index e48b539243a1..b668ec61527e 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -288,10 +288,10 @@ static struct dentry * configfs_lookup(struct inode *dir,
/*
* Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are
- * attributes and are removed by rmdir(). We recurse, taking i_sem
+ * attributes and are removed by rmdir(). We recurse, taking i_mutex
* on all children that are candidates for default detach. If the
* result is clean, then configfs_detach_group() will handle dropping
- * i_sem. If there is an error, the caller will clean up the i_sem
+ * i_mutex. If there is an error, the caller will clean up the i_mutex
* holders via configfs_detach_rollback().
*/
static int configfs_detach_prep(struct dentry *dentry)
@@ -309,8 +309,8 @@ static int configfs_detach_prep(struct dentry *dentry)
if (sd->s_type & CONFIGFS_NOT_PINNED)
continue;
if (sd->s_type & CONFIGFS_USET_DEFAULT) {
- down(&sd->s_dentry->d_inode->i_sem);
- /* Mark that we've taken i_sem */
+ mutex_lock(&sd->s_dentry->d_inode->i_mutex);
+ /* Mark that we've taken i_mutex */
sd->s_type |= CONFIGFS_USET_DROPPING;
ret = configfs_detach_prep(sd->s_dentry);
@@ -327,7 +327,7 @@ out:
}
/*
- * Walk the tree, dropping i_sem wherever CONFIGFS_USET_DROPPING is
+ * Walk the tree, dropping i_mutex wherever CONFIGFS_USET_DROPPING is
* set.
*/
static void configfs_detach_rollback(struct dentry *dentry)
@@ -341,7 +341,7 @@ static void configfs_detach_rollback(struct dentry *dentry)
if (sd->s_type & CONFIGFS_USET_DROPPING) {
sd->s_type &= ~CONFIGFS_USET_DROPPING;
- up(&sd->s_dentry->d_inode->i_sem);
+ mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
}
}
}
@@ -424,11 +424,11 @@ static void detach_groups(struct config_group *group)
/*
* From rmdir/unregister, a configfs_detach_prep() pass
- * has taken our i_sem for us. Drop it.
+ * has taken our i_mutex for us. Drop it.
* From mkdir/register cleanup, there is no sem held.
*/
if (sd->s_type & CONFIGFS_USET_DROPPING)
- up(&child->d_inode->i_sem);
+ mutex_unlock(&child->d_inode->i_mutex);
d_delete(child);
dput(child);
@@ -493,11 +493,11 @@ static int populate_groups(struct config_group *group)
/* FYI, we're faking mkdir here
* I'm not sure we need this semaphore, as we're called
* from our parent's mkdir. That holds our parent's
- * i_sem, so afaik lookup cannot continue through our
+ * i_mutex, so afaik lookup cannot continue through our
* parent to find us, let alone mess with our tree.
- * That said, taking our i_sem is closer to mkdir
+ * That said, taking our i_mutex is closer to mkdir
* emulation, and shouldn't hurt. */
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
for (i = 0; group->default_groups[i]; i++) {
new_group = group->default_groups[i];
@@ -507,7 +507,7 @@ static int populate_groups(struct config_group *group)
break;
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
}
if (ret)
@@ -856,7 +856,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
down_write(&configfs_rename_sem);
parent = item->parent->dentry;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
if (!IS_ERR(new_dentry)) {
@@ -872,7 +872,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
error = -EEXIST;
dput(new_dentry);
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
up_write(&configfs_rename_sem);
return error;
@@ -884,9 +884,9 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct configfs_dirent * parent_sd = dentry->d_fsdata;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
file->private_data = configfs_new_dirent(parent_sd, NULL);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return file->private_data ? 0 : -ENOMEM;
@@ -897,9 +897,9 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct configfs_dirent * cursor = file->private_data;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
list_del_init(&cursor->s_sibling);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
release_configfs_dirent(cursor);
@@ -975,7 +975,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
{
struct dentry * dentry = file->f_dentry;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
@@ -983,7 +983,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -1007,7 +1007,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
list_add_tail(&cursor->s_sibling, p);
}
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return offset;
}
@@ -1037,7 +1037,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
sd = configfs_sb->s_root->d_fsdata;
link_group(to_config_group(sd->s_element), group);
- down(&configfs_sb->s_root->d_inode->i_sem);
+ mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
name.name = group->cg_item.ci_name;
name.len = strlen(name.name);
@@ -1057,7 +1057,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
else
d_delete(dentry);
- up(&configfs_sb->s_root->d_inode->i_sem);
+ mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
if (dentry) {
dput(dentry);
@@ -1079,18 +1079,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
return;
}
- down(&configfs_sb->s_root->d_inode->i_sem);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
+ mutex_lock(&dentry->d_inode->i_mutex);
if (configfs_detach_prep(dentry)) {
printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n");
}
configfs_detach_group(&group->cg_item);
dentry->d_inode->i_flags |= S_DEAD;
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
d_delete(dentry);
- up(&configfs_sb->s_root->d_inode->i_sem);
+ mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
dput(dentry);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index af1ffc9a15c0..c26cd61f13af 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -336,9 +336,9 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
int error = 0;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 6b274c6d428f..6577c588de9d 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -122,7 +122,7 @@ const unsigned char * configfs_get_name(struct configfs_dirent *sd)
/*
* Unhashes the dentry corresponding to given configfs_dirent
- * Called with parent inode's i_sem held.
+ * Called with parent inode's i_mutex held.
*/
void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
{
@@ -145,7 +145,7 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
struct configfs_dirent * sd;
struct configfs_dirent * parent_sd = dir->d_fsdata;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
@@ -156,7 +156,7 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
break;
}
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index a86ac4aeaedb..d4f1a2cddd47 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -146,7 +146,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
}
*dentry = NULL;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -155,7 +155,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
error = debugfs_create(parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -273,7 +273,7 @@ void debugfs_remove(struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (debugfs_positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -283,7 +283,7 @@ void debugfs_remove(struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove);
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index 1274422a5384..b621521e09d4 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -2162,27 +2162,27 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
*
* make sure that
* d_instantiate always runs under lock
- * we release i_sem lock before going to sleep
+ * we release i_mutex lock before going to sleep
*
* unfortunately sometimes d_revalidate is called with
- * and sometimes without i_sem lock held. The following checks
+ * and sometimes without i_mutex lock held. The following checks
* attempt to deduce when we need to add (and drop resp.) lock
* here. This relies on current (2.6.2) calling coventions:
*
- * lookup_hash is always run under i_sem and is passing NULL
+ * lookup_hash is always run under i_mutex and is passing NULL
* as nd
*
- * open(...,O_CREATE,...) calls _lookup_hash under i_sem
+ * open(...,O_CREATE,...) calls _lookup_hash under i_mutex
* and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
*
* all other invocations of ->d_revalidate seem to happen
- * outside of i_sem
+ * outside of i_mutex
*/
need_lock = nd &&
(!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
if (need_lock)
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
if (is_devfsd_or_child(fs_info)) {
devfs_handle_t de = lookup_info->de;
@@ -2221,9 +2221,9 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
add_wait_queue(&lookup_info->wait_queue, &wait);
read_unlock(&parent->u.dir.lock);
/* at this point it is always (hopefully) locked */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
schedule();
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* This does not need nor should remove wait from wait_queue.
* Wait queue head is never reused - nothing is ever added to it
@@ -2238,7 +2238,7 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
out:
if (need_lock)
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return 1;
} /* End Function devfs_d_revalidate_wait */
@@ -2284,9 +2284,9 @@ static struct dentry *devfs_lookup(struct inode *dir, struct dentry *dentry,
/* Unlock directory semaphore, which will release any waiters. They
will get the hashed dentry, and may be forced to wait for
revalidation */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
wait_for_devfsd_finished(fs_info); /* If I'm not devfsd, must wait */
- down(&dir->i_sem); /* Grab it again because them's the rules */
+ mutex_lock(&dir->i_mutex); /* Grab it again because them's the rules */
de = lookup_info.de;
/* If someone else has been so kind as to make the inode, we go home
early */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index f2be44d4491f..bfb8a230bac9 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -130,7 +130,7 @@ static struct dentry *get_node(int num)
{
char s[12];
struct dentry *root = devpts_root;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
return lookup_one_len(s, root, sprintf(s, "%d", num));
}
@@ -161,7 +161,7 @@ int devpts_pty_new(struct tty_struct *tty)
if (!IS_ERR(dentry) && !dentry->d_inode)
d_instantiate(dentry, inode);
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
return 0;
}
@@ -178,7 +178,7 @@ struct tty_struct *devpts_get_tty(int number)
dput(dentry);
}
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
return tty;
}
@@ -196,7 +196,7 @@ void devpts_pty_kill(int number)
}
dput(dentry);
}
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
}
static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3931e7f1e6bf..30dbbd1df511 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -56,7 +56,7 @@
* lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
* This determines whether we need to do the fancy locking which prevents
* direct-IO from being able to read uninitialised disk blocks. If its zero
- * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_sem is
+ * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
* not held for the entire direct write (taken briefly, initially, during a
* direct read though, but its never held for the duration of a direct-IO).
*/
@@ -930,7 +930,7 @@ out:
}
/*
- * Releases both i_sem and i_alloc_sem
+ * Releases both i_mutex and i_alloc_sem
*/
static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
@@ -1062,11 +1062,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
/*
* All block lookups have been performed. For READ requests
- * we can let i_sem go now that its achieved its purpose
+ * we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
- up(&dio->inode->i_sem);
+ mutex_unlock(&dio->inode->i_mutex);
/*
* OK, all BIOs are submitted, so we can decrement bio_count to truly
@@ -1145,18 +1145,18 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
* The locking rules are governed by the dio_lock_type parameter.
*
* DIO_NO_LOCKING (no locking, for raw block device access)
- * For writes, i_sem is not held on entry; it is never taken.
+ * For writes, i_mutex is not held on entry; it is never taken.
*
* DIO_LOCKING (simple locking for regular files)
- * For writes we are called under i_sem and return with i_sem held, even though
+ * For writes we are called under i_mutex and return with i_mutex held, even though
* it is internally dropped.
- * For reads, i_sem is not held on entry, but it is taken and dropped before
+ * For reads, i_mutex is not held on entry, but it is taken and dropped before
* returning.
*
* DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
* uninitialised data, allowing parallel direct readers and writers)
- * For writes we are called without i_sem, return without it, never touch it.
- * For reads, i_sem is held on entry and will be released before returning.
+ * For writes we are called without i_mutex, return without it, never touch it.
+ * For reads, i_mutex is held on entry and will be released before returning.
*
* Additional i_alloc_sem locking requirements described inline below.
*/
@@ -1214,11 +1214,11 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* For block device access DIO_NO_LOCKING is used,
* neither readers nor writers do any locking at all
* For regular files using DIO_LOCKING,
- * readers need to grab i_sem and i_alloc_sem
- * writers need to grab i_alloc_sem only (i_sem is already held)
+ * readers need to grab i_mutex and i_alloc_sem
+ * writers need to grab i_alloc_sem only (i_mutex is already held)
* For regular files using DIO_OWN_LOCKING,
* neither readers nor writers take any locks here
- * (i_sem is already held and release for writers here)
+ * (i_mutex is already held and release for writers here)
*/
dio->lock_type = dio_lock_type;
if (dio_lock_type != DIO_NO_LOCKING) {
@@ -1228,7 +1228,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
mapping = iocb->ki_filp->f_mapping;
if (dio_lock_type != DIO_OWN_LOCKING) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
reader_with_isem = 1;
}
@@ -1240,7 +1240,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
if (dio_lock_type == DIO_OWN_LOCKING) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reader_with_isem = 0;
}
}
@@ -1266,7 +1266,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
out:
if (reader_with_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (rw & WRITE)
current->flags &= ~PF_SYNCWRITE;
return retval;
diff --git a/fs/dquot.c b/fs/dquot.c
index 2a62b3dc20ec..cb6d5bfbdfd5 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -100,7 +100,7 @@
* operation is just reading pointers from inode (or not using them at all) the
* read lock is enough. If pointers are altered function must hold write lock
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
- * for altering the flag i_sem is also needed). If operation is holding
+ * for altering the flag i_mutex is also needed). If operation is holding
* reference to dquot in other way (e.g. quotactl ops) it must be guarded by
* dqonoff_sem.
* This locking assures that:
@@ -117,9 +117,9 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_sem > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
+ * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
* > dquot->dq_lock > dqio_sem
- * i_sem on quota files is special (it's below dqio_sem)
+ * i_mutex on quota files is special (it's below dqio_sem)
*/
static DEFINE_SPINLOCK(dq_list_lock);
@@ -1369,11 +1369,11 @@ int vfs_quota_off(struct super_block *sb, int type)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
- down(&toputinode[cnt]->i_sem);
+ mutex_lock(&toputinode[cnt]->i_mutex);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
- up(&toputinode[cnt]->i_sem);
+ mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
iput(toputinode[cnt]);
}
@@ -1417,7 +1417,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
write_inode_now(inode, 1);
/* And now flush the block cache so that kernel sees the changes */
invalidate_bdev(sb->s_bdev, 0);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
down(&dqopt->dqonoff_sem);
if (sb_has_quota_enabled(sb, type)) {
error = -EBUSY;
@@ -1449,7 +1449,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
goto out_file_init;
}
up(&dqopt->dqio_sem);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
set_enable_flags(dqopt, type);
add_dquot_ref(sb, type);
@@ -1470,7 +1470,7 @@ out_lock:
inode->i_flags |= oldflags;
up_write(&dqopt->dqptr_sem);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_fmt:
put_quota_format(fmt);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index c49d6254379a..5bfe40085fbc 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -177,9 +177,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct dentry *ppd;
struct dentry *npd;
- down(&pd->d_inode->i_sem);
+ mutex_lock(&pd->d_inode->i_mutex);
ppd = CALL(nops,get_parent)(pd);
- up(&pd->d_inode->i_sem);
+ mutex_unlock(&pd->d_inode->i_mutex);
if (IS_ERR(ppd)) {
err = PTR_ERR(ppd);
@@ -201,9 +201,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
break;
}
dprintk("find_exported_dentry: found name: %s\n", nbuf);
- down(&ppd->d_inode->i_sem);
+ mutex_lock(&ppd->d_inode->i_mutex);
npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
- up(&ppd->d_inode->i_sem);
+ mutex_unlock(&ppd->d_inode->i_mutex);
if (IS_ERR(npd)) {
err = PTR_ERR(npd);
dprintk("find_exported_dentry: lookup failed: %d\n", err);
@@ -242,9 +242,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct dentry *nresult;
err = CALL(nops,get_name)(target_dir, nbuf, result);
if (!err) {
- down(&target_dir->d_inode->i_sem);
+ mutex_lock(&target_dir->d_inode->i_mutex);
nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
- up(&target_dir->d_inode->i_sem);
+ mutex_unlock(&target_dir->d_inode->i_mutex);
if (!IS_ERR(nresult)) {
if (nresult->d_inode) {
dput(result);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 6af2f4130290..239133d01d91 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -149,7 +149,7 @@ ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
}
/*
- * inode->i_sem: don't care
+ * inode->i_mutex: don't care
*/
static struct posix_acl *
ext2_get_acl(struct inode *inode, int type)
@@ -211,7 +211,7 @@ ext2_get_acl(struct inode *inode, int type)
}
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
static int
ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
@@ -301,8 +301,8 @@ ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
/*
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
*
- * dir->i_sem: down
- * inode->i_sem: up (access to inode is still exclusive)
+ * dir->i_mutex: down
+ * inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext2_init_acl(struct inode *inode, struct inode *dir)
@@ -361,7 +361,7 @@ cleanup:
* for directories) are added. There are no more bits available in the
* file mode.
*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
ext2_acl_chmod(struct inode *inode)
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index e977f8566d14..00de0a7312a2 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -53,7 +53,7 @@ struct ext2_inode_info {
#ifdef CONFIG_EXT2_FS_XATTR
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_sem even when reading would cause contention
+ * data. Taking i_mutex even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 522fa70dd8ea..8d6819846fc9 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1152,7 +1152,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
struct buffer_head tmp_bh;
struct buffer_head *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1189,7 +1189,7 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 0099462d4271..f7a3b5fee274 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -325,7 +325,7 @@ cleanup:
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_sem: don't care
+ * dentry->d_inode->i_mutex: don't care
*/
ssize_t
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 3ac38266fc9e..9ed132c96034 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -152,7 +152,7 @@ ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
/*
* Inode operation get_posix_acl().
*
- * inode->i_sem: don't care
+ * inode->i_mutex: don't care
*/
static struct posix_acl *
ext3_get_acl(struct inode *inode, int type)
@@ -216,7 +216,7 @@ ext3_get_acl(struct inode *inode, int type)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_sem: down unless called from ext3_new_inode
+ * inode->i_mutex: down unless called from ext3_new_inode
*/
static int
ext3_set_acl(handle_t *handle, struct inode *inode, int type,
@@ -306,8 +306,8 @@ ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
/*
* Initialize the ACLs of a new inode. Called from ext3_new_inode.
*
- * dir->i_sem: down
- * inode->i_sem: up (access to inode is still exclusive)
+ * dir->i_mutex: down
+ * inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
@@ -368,7 +368,7 @@ cleanup:
* for directories) are added. There are no more bits available in the
* file mode.
*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
ext3_acl_chmod(struct inode *inode)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7c45acf94589..56bf76586019 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2150,7 +2150,7 @@ int ext3_force_commit(struct super_block *sb)
static void ext3_write_super (struct super_block * sb)
{
- if (down_trylock(&sb->s_lock) == 0)
+ if (mutex_trylock(&sb->s_lock) != 0)
BUG();
sb->s_dirt = 0;
}
@@ -2601,7 +2601,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2644,7 +2644,7 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext3_mark_inode_dirty(handle, inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 430de9f63be3..238199d82ce5 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -140,7 +140,7 @@ ext3_xattr_handler(int name_index)
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_sem: don't care
+ * dentry->d_inode->i_mutex: don't care
*/
ssize_t
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index eef1b81aa294..db0de5c621c7 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -729,13 +729,13 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
buf.dirent = d1;
buf.result = 0;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = -ENOENT;
if (!IS_DEADDIR(inode)) {
ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
short_only, both);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret >= 0)
ret = buf.result;
return ret;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 9b07c328a6fc..d30876cf35f5 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -41,7 +41,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
if (err)
return err;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (IS_RDONLY(inode)) {
err = -EROFS;
@@ -103,7 +103,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED;
mark_inode_dirty(inode);
up:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
default:
diff --git a/fs/fifo.c b/fs/fifo.c
index 5455916241f0..923371b753ab 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -35,7 +35,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
int ret;
ret = -ERESTARTSYS;
- if (down_interruptible(PIPE_SEM(*inode)))
+ if (mutex_lock_interruptible(PIPE_MUTEX(*inode)))
goto err_nolock_nocleanup;
if (!inode->i_pipe) {
@@ -119,7 +119,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
}
/* Ok! */
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
err_rd:
@@ -139,7 +139,7 @@ err:
free_pipe_info(inode);
err_nocleanup:
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
err_nolock_nocleanup:
return ret;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 05dedddf4289..63d2980df5c9 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -560,9 +560,9 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
struct inode *inode = file->f_dentry->d_inode;
ssize_t res;
/* Don't allow parallel writes to the same file */
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = fuse_direct_io(file, buf, count, ppos, 1);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index d499393a8ae7..050a49276499 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -547,13 +547,13 @@ static int hfs_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
hfs_file_truncate(inode);
//if (inode->i_flags & S_DEAD) {
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
// hfs_delete_inode(inode);
//}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return 0;
}
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index c7d316455fa0..9fb51632303c 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -29,7 +29,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
- down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
(filler_t *)mapping->a_ops->readpage, NULL);
@@ -143,7 +143,7 @@ done:
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
- up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
}
@@ -164,7 +164,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
return -2;
- down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
@@ -215,7 +215,7 @@ out:
kunmap(page);
HFSPLUS_SB(sb).free_blocks += len;
sb->s_dirt = 1;
- up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return 0;
}
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fc98583cf045..983bcd02ac1c 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -276,13 +276,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
hfsplus_file_truncate(inode);
if (inode->i_flags & S_DEAD) {
hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
hfsplus_delete_inode(inode);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return 0;
}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 0217c3a04441..5591f9623aa2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -32,19 +32,19 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
- down(&i->i_sem);
+ mutex_lock(&i->i_mutex);
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
while (pos != new_off) {
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
else goto fail;
if (pos == 12) goto fail;
}
- up(&i->i_sem);
+ mutex_unlock(&i->i_mutex);
ok:
unlock_kernel();
return filp->f_pos = new_off;
fail:
- up(&i->i_sem);
+ mutex_unlock(&i->i_mutex);
/*printk("illegal lseek: %016llx\n", new_off);*/
unlock_kernel();
return -ESPIPE;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 52930915bad8..a44dc5897399 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -171,12 +171,12 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
err = -ENOMEM;
parent = HPPFS_I(ino)->proc_dentry;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
proc_dentry = d_lookup(parent, &dentry->d_name);
if(proc_dentry == NULL){
proc_dentry = d_alloc(parent, &dentry->d_name);
if(proc_dentry == NULL){
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
goto out;
}
new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
@@ -186,7 +186,7 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
proc_dentry = new;
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
if(IS_ERR(proc_dentry))
return(proc_dentry);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8c41315a6e42..ff1b7d108bd0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -118,7 +118,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
file_accessed(file);
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
@@ -133,7 +133,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (inode->i_size < len)
inode->i_size = len;
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
diff --git a/fs/inode.c b/fs/inode.c
index fd568caf7f74..e08767fd57b0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -192,7 +192,7 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
- sema_init(&inode->i_sem, 1);
+ mutex_init(&inode->i_mutex);
init_rwsem(&inode->i_alloc_sem);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 2559ee10beda..fc3855a1aef3 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -1415,7 +1415,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
* This will never trigger with sane page sizes. leave it in
* anyway, since I'm thinking about how to merge larger writes
* (the current idea is to poke a thread that does the actual
- * I/O and starts by doing a down(&inode->i_sem). then we
+ * I/O and starts by doing a mutex_lock(&inode->i_mutex). then we
* would need to get the page cache pages and have a list of
* I/O requests and do write-merging here.
* -- prumpf
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index c0fd7b3eadc6..dc21a5bd54d4 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -58,7 +58,7 @@ struct jfs_inode_info {
/*
* rdwrlock serializes xtree between reads & writes and synchronizes
* changes to special inodes. It's use would be redundant on
- * directories since the i_sem taken in the VFS is sufficient.
+ * directories since the i_mutex taken in the VFS is sufficient.
*/
struct rw_semaphore rdwrlock;
/*
@@ -68,7 +68,7 @@ struct jfs_inode_info {
* inode is blocked in txBegin or TxBeginAnon
*/
struct semaphore commit_sem;
- /* xattr_sem allows us to access the xattrs without taking i_sem */
+ /* xattr_sem allows us to access the xattrs without taking i_mutex */
struct rw_semaphore xattr_sem;
lid_t xtlid; /* lid of xtree lock on directory */
#ifdef CONFIG_JFS_POSIX_ACL
diff --git a/fs/libfs.c b/fs/libfs.c
index 9c50523382e7..63c020e6589e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -74,7 +74,7 @@ int dcache_dir_close(struct inode *inode, struct file *file)
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
{
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
@@ -82,7 +82,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -106,7 +106,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
spin_unlock(&dcache_lock);
}
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return offset;
}
@@ -356,7 +356,7 @@ int simple_commit_write(struct file *file, struct page *page,
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_sem.
+ * cannot change under us because we hold the i_mutex.
*/
if (pos > inode->i_size)
i_size_write(inode, pos);
diff --git a/fs/namei.c b/fs/namei.c
index 300eae088d5f..0a8f073435af 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -438,7 +438,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
struct dentry * result;
struct inode *dir = parent->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* First re-do the cached lookup just in case it was created
* while we waited for the directory semaphore..
@@ -464,7 +464,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
else
result = dentry;
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return result;
}
@@ -472,7 +472,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
* Uhhuh! Nasty case: the cache was re-populated while
* we waited on the semaphore. Need to revalidate.
*/
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
if (result->d_op && result->d_op->d_revalidate) {
if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
dput(result);
@@ -1366,7 +1366,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
struct dentry *p;
if (p1 == p2) {
- down(&p1->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
return NULL;
}
@@ -1374,30 +1374,30 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
for (p = p1; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p2) {
- down(&p2->d_inode->i_sem);
- down(&p1->d_inode->i_sem);
+ mutex_lock(&p2->d_inode->i_mutex);
+ mutex_lock(&p1->d_inode->i_mutex);
return p;
}
}
for (p = p2; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p1) {
- down(&p1->d_inode->i_sem);
- down(&p2->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
+ mutex_lock(&p2->d_inode->i_mutex);
return p;
}
}
- down(&p1->d_inode->i_sem);
- down(&p2->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
+ mutex_lock(&p2->d_inode->i_mutex);
return NULL;
}
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
- up(&p1->d_inode->i_sem);
+ mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
- up(&p2->d_inode->i_sem);
+ mutex_unlock(&p2->d_inode->i_mutex);
up(&p1->d_inode->i_sb->s_vfs_rename_sem);
}
}
@@ -1563,14 +1563,14 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
dir = nd->dentry;
nd->flags &= ~LOOKUP_PARENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
do_last:
error = PTR_ERR(path.dentry);
if (IS_ERR(path.dentry)) {
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
@@ -1579,7 +1579,7 @@ do_last:
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current->fs->umask;
error = vfs_create(dir->d_inode, path.dentry, mode, nd);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->dentry);
nd->dentry = path.dentry;
if (error)
@@ -1593,7 +1593,7 @@ do_last:
/*
* It already exists.
*/
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
error = -EEXIST;
if (flag & O_EXCL)
@@ -1665,7 +1665,7 @@ do_link:
goto exit;
}
dir = nd->dentry;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
__putname(nd->last.name);
@@ -1680,13 +1680,13 @@ do_link:
* Simple function to lookup and return a dentry and create it
* if it doesn't exist. Is SMP-safe.
*
- * Returns with nd->dentry->d_inode->i_sem locked.
+ * Returns with nd->dentry->d_inode->i_mutex locked.
*/
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
@@ -1784,7 +1784,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
}
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
@@ -1836,7 +1836,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
@@ -1885,7 +1885,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
if (d_mountpoint(dentry))
error = -EBUSY;
@@ -1897,7 +1897,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_flags |= S_DEAD;
}
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
if (!error) {
d_delete(dentry);
}
@@ -1932,14 +1932,14 @@ asmlinkage long sys_rmdir(const char __user * pathname)
error = -EBUSY;
goto exit1;
}
- down(&nd.dentry->d_inode->i_sem);
+ mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
error = vfs_rmdir(nd.dentry->d_inode, dentry);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
exit1:
path_release(&nd);
exit:
@@ -1959,7 +1959,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
else {
@@ -1967,7 +1967,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!error)
error = dir->i_op->unlink(dir, dentry);
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
@@ -1979,7 +1979,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
/*
* Make sure that the actual truncation of the file will occur outside its
- * directory's i_sem. Truncate can take a long time if there is a lot of
+ * directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
@@ -2001,7 +2001,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
- down(&nd.dentry->d_inode->i_sem);
+ mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
@@ -2015,7 +2015,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
exit2:
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
@@ -2075,7 +2075,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(to);
@@ -2113,10 +2113,10 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
if (error)
return error;
- down(&old_dentry->d_inode->i_sem);
+ mutex_lock(&old_dentry->d_inode->i_mutex);
DQUOT_INIT(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
- up(&old_dentry->d_inode->i_sem);
+ mutex_unlock(&old_dentry->d_inode->i_mutex);
if (!error)
fsnotify_create(dir, new_dentry->d_name.name);
return error;
@@ -2157,7 +2157,7 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
dput(new_dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
out_release:
path_release(&nd);
out:
@@ -2178,7 +2178,7 @@ exit:
* sb->s_vfs_rename_sem. We might be more accurate, but that's another
* story.
* c) we have to lock _three_ objects - parents and victim (if it exists).
- * And that - after we got ->i_sem on parents (until then we don't know
+ * And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_sem _and_ that parent of the object we
@@ -2195,9 +2195,9 @@ exit:
* stuff into VFS), but the former is not going away. Solution: the same
* trick as in rmdir().
* e) conversion from fhandle to dentry may come in the wrong moment - when
- * we are removing the target. Solution: we will have to grab ->i_sem
+ * we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
- * ->i_sem on parents, which works but leads to some truely excessive
+ * ->i_mutex on parents, which works but leads to some truely excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
@@ -2222,7 +2222,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
target = new_dentry->d_inode;
if (target) {
- down(&target->i_sem);
+ mutex_lock(&target->i_mutex);
dentry_unhash(new_dentry);
}
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
@@ -2232,7 +2232,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
if (target) {
if (!error)
target->i_flags |= S_DEAD;
- up(&target->i_sem);
+ mutex_unlock(&target->i_mutex);
if (d_unhashed(new_dentry))
d_rehash(new_dentry);
dput(new_dentry);
@@ -2255,7 +2255,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
dget(new_dentry);
target = new_dentry->d_inode;
if (target)
- down(&target->i_sem);
+ mutex_lock(&target->i_mutex);
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
else
@@ -2266,7 +2266,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
d_move(old_dentry, new_dentry);
}
if (target)
- up(&target->i_sem);
+ mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 3e8fb61ad597..f0e353f5bc30 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -814,7 +814,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
return -ENOTDIR;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out_unlock;
@@ -826,7 +826,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
if (!err)
security_sb_post_addmount(mnt, nd);
return err;
@@ -962,7 +962,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
goto out;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out1;
@@ -1004,7 +1004,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
list_del_init(&old_nd.mnt->mnt_expire);
spin_unlock(&vfsmount_lock);
out1:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
out:
up_write(&namespace_sem);
if (!err)
@@ -1573,7 +1573,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
user_nd.dentry = dget(current->fs->root);
read_unlock(&current->fs->lock);
down_write(&namespace_sem);
- down(&old_nd.dentry->d_inode->i_sem);
+ mutex_lock(&old_nd.dentry->d_inode->i_mutex);
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.mnt) ||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
@@ -1626,7 +1626,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
path_release(&root_parent);
path_release(&parent_nd);
out2:
- up(&old_nd.dentry->d_inode->i_sem);
+ mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e9255198f767..a1554bead692 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -194,7 +194,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
spin_unlock(&inode->i_lock);
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
- * through inode->i_sem or some other mechanism.
+ * through inode->i_mutex or some other mechanism.
*/
if (page->index == 0)
invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
@@ -573,7 +573,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
- down(&filp->f_dentry->d_inode->i_sem);
+ mutex_lock(&filp->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += filp->f_pos;
@@ -589,7 +589,7 @@ loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
}
out:
- up(&filp->f_dentry->d_inode->i_sem);
+ mutex_unlock(&filp->f_dentry->d_inode->i_mutex);
return offset;
}
@@ -1001,7 +1001,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
openflags &= ~(O_CREAT|O_TRUNC);
/*
- * Note: we're not holding inode->i_sem and so may be racing with
+ * Note: we're not holding inode->i_mutex and so may be racing with
* operations that change the directory. We therefore save the
* change attribute *before* we do the RPC call.
*/
@@ -1051,7 +1051,7 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
return dentry;
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
- /* Note: caller is already holding the dir->i_sem! */
+ /* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
return NULL;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 954cf893d50c..be963a133aaa 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -121,9 +121,9 @@ out:
static void
nfsd4_sync_rec_dir(void)
{
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
nfsd_sync_dir(rec_dir.dentry);
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
}
int
@@ -143,7 +143,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
nfs4_save_user(&uid, &gid);
/* lock the parent */
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
@@ -159,7 +159,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
out_put:
dput(dentry);
out_unlock:
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (status == 0) {
clp->cl_firststate = 1;
nfsd4_sync_rec_dir();
@@ -259,9 +259,9 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
printk("nfsd4: non-file found in client recovery directory\n");
return -EINVAL;
}
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
status = vfs_unlink(dir->d_inode, dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
@@ -274,9 +274,9 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
* any regular files anyway, just in case the directory was created by
* a kernel from the future.... */
nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
status = vfs_rmdir(dir->d_inode, dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
@@ -288,9 +288,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
return status;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index df4019f04560..bb36b4304491 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -390,12 +390,12 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
error = -EOPNOTSUPP;
if (inode->i_op && inode->i_op->setxattr) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
security_inode_setxattr(dentry, key, buf, len, 0);
error = inode->i_op->setxattr(dentry, key, buf, len, 0);
if (!error)
security_inode_post_setxattr(dentry, key, buf, len, 0);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
out:
kfree(buf);
@@ -739,9 +739,9 @@ nfsd_sync(struct file *filp)
int err;
struct inode *inode = filp->f_dentry->d_inode;
dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
@@ -885,9 +885,9 @@ static void kill_suid(struct dentry *dentry)
struct iattr ia;
ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &ia);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
}
static inline int
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index eda056bac256..9480a0526cd3 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1532,7 +1532,7 @@ int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
- * Locking: - The caller must hold i_sem on the inode.
+ * Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
@@ -1728,7 +1728,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
/*
* This needs to be last since the address space operations ->readpage
* and ->writepage can run concurrently with us as they are not
- * serialized on i_sem. Note, we are not allowed to fail once we flip
+ * serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 795c3d1930f5..b0690d4c8906 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -69,7 +69,7 @@ ntfschar I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
* work but we don't care for how quickly one can access them. This also fixes
* the dcache aliasing issues.
*
- * Locking: - Caller must hold i_sem on the directory.
+ * Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
@@ -1085,11 +1085,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
* While this will return the names in random order this doesn't matter for
* ->readdir but OTOH results in a faster ->readdir.
*
- * VFS calls ->readdir without BKL but with i_sem held. This protects the VFS
+ * VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS
* parts (e.g. ->f_pos and ->i_size, and it also protects against directory
* modifications).
*
- * Locking: - Caller must hold i_sem on the directory.
+ * Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
@@ -1520,7 +1520,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
- * Locking: Caller must hold i_sem on the inode.
+ * Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 727533891813..30f71acdc1cb 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -106,7 +106,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
* this is the case, the necessary zeroing will also have happened and that all
* metadata is self-consistent.
*
- * Locking: i_sem on the vfs inode corrseponsind to the ntfs inode @ni must be
+ * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
* held by the caller.
*/
static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
@@ -473,7 +473,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
* @bytes: number of bytes to be written
*
* This is called for non-resident attributes from ntfs_file_buffered_write()
- * with i_sem held on the inode (@pages[0]->mapping->host). There are
+ * with i_mutex held on the inode (@pages[0]->mapping->host). There are
* @nr_pages pages in @pages which are locked but not kmap()ped. The source
* data has not yet been copied into the @pages.
*
@@ -1637,7 +1637,7 @@ err_out:
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
- * This is called from ntfs_file_buffered_write() with i_sem held on the inode
+ * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
* (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
* locked but not kmap()ped. The source data has already been copied into the
* @page. ntfs_prepare_pages_for_non_resident_write() has been called before
@@ -1814,7 +1814,7 @@ err_out:
/**
* ntfs_file_buffered_write -
*
- * Locking: The vfs is holding ->i_sem on the inode.
+ * Locking: The vfs is holding ->i_mutex on the inode.
*/
static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs,
@@ -2196,9 +2196,9 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
BUG_ON(iocb->ki_pos != pos);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, pos, ret);
if (err < 0)
@@ -2221,12 +2221,12 @@ static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
struct kiocb kiocb;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
init_sync_kiocb(&kiocb, file);
ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
if (ret == -EIOCBQUEUED)
ret = wait_on_sync_kiocb(&kiocb);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, *ppos - ret, ret);
if (err < 0)
@@ -2269,7 +2269,7 @@ static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
- * Locking: Caller must hold i_sem on the inode.
+ * Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 8f2d5727546f..9f5427c2d105 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -32,7 +32,7 @@
* Allocate a new index context, initialize it with @idx_ni and return it.
* Return NULL if allocation failed.
*
- * Locking: Caller must hold i_sem on the index inode.
+ * Locking: Caller must hold i_mutex on the index inode.
*/
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
{
@@ -50,7 +50,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
*
* Release the index context @ictx, releasing all associated resources.
*
- * Locking: Caller must hold i_sem on the index inode.
+ * Locking: Caller must hold i_mutex on the index inode.
*/
void ntfs_index_ctx_put(ntfs_index_context *ictx)
{
@@ -106,7 +106,7 @@ void ntfs_index_ctx_put(ntfs_index_context *ictx)
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
* ensure that the changes are written to disk.
*
- * Locking: - Caller must hold i_sem on the index inode.
+ * Locking: - Caller must hold i_mutex on the index inode.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index b24f4c4b2c5c..bda7a08911a5 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2125,13 +2125,13 @@ void ntfs_put_inode(struct inode *vi)
ntfs_inode *ni = NTFS_I(vi);
if (NInoIndexAllocPresent(ni)) {
struct inode *bvi = NULL;
- down(&vi->i_sem);
+ mutex_lock(&vi->i_mutex);
if (atomic_read(&vi->i_count) == 2) {
bvi = ni->itype.index.bmp_ino;
if (bvi)
ni->itype.index.bmp_ino = NULL;
}
- up(&vi->i_sem);
+ mutex_unlock(&vi->i_mutex);
if (bvi)
iput(bvi);
}
@@ -2311,7 +2311,7 @@ static const char *es = " Leaving inconsistent metadata. Unmount and run "
*
* Returns 0 on success or -errno on error.
*
- * Called with ->i_sem held. In all but one case ->i_alloc_sem is held for
+ * Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for
* writing. The only case in the kernel where ->i_alloc_sem is not held is
* mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
* with the current i_size as the offset. The analogous place in NTFS is in
@@ -2831,7 +2831,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
* We also abort all changes of user, group, and mode as we do not implement
* the NTFS ACLs yet.
*
- * Called with ->i_sem held. For the ATTR_SIZE (i.e. ->truncate) case, also
+ * Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also
* called with ->i_alloc_sem held for writing.
*
* Basically this is a copy of generic notify_change() and inode_setattr()
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 351dbc3b6e40..5ea9eb93af62 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -96,7 +96,7 @@
* name. We then convert the name to the current NLS code page, and proceed
* searching for a dentry with this name, etc, as in case 2), above.
*
- * Locking: Caller must hold i_sem on the directory.
+ * Locking: Caller must hold i_mutex on the directory.
*/
static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
struct nameidata *nd)
@@ -254,7 +254,7 @@ handle_name:
nls_name.hash = full_name_hash(nls_name.name, nls_name.len);
/*
- * Note: No need for dent->d_lock lock as i_sem is held on the
+ * Note: No need for dent->d_lock lock as i_mutex is held on the
* parent inode.
*/
@@ -374,7 +374,7 @@ struct inode_operations ntfs_dir_inode_ops = {
* The code is based on the ext3 ->get_parent() implementation found in
* fs/ext3/namei.c::ext3_get_parent().
*
- * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_sem down.
+ * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down.
*
* Return the dentry of the parent directory on success or the error code on
* error (IS_ERR() is true).
diff --git a/fs/ntfs/quota.c b/fs/ntfs/quota.c
index 833df2a4e9fb..d0ef4182147b 100644
--- a/fs/ntfs/quota.c
+++ b/fs/ntfs/quota.c
@@ -48,7 +48,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
ntfs_error(vol->sb, "Quota inodes are not open.");
return FALSE;
}
- down(&vol->quota_q_ino->i_sem);
+ mutex_lock(&vol->quota_q_ino->i_mutex);
ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
if (!ictx) {
ntfs_error(vol->sb, "Failed to get index context.");
@@ -98,7 +98,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
ntfs_index_entry_mark_dirty(ictx);
set_done:
ntfs_index_ctx_put(ictx);
- up(&vol->quota_q_ino->i_sem);
+ mutex_unlock(&vol->quota_q_ino->i_mutex);
/*
* We set the flag so we do not try to mark the quotas out of date
* again on remount.
@@ -110,7 +110,7 @@ done:
err_out:
if (ictx)
ntfs_index_ctx_put(ictx);
- up(&vol->quota_q_ino->i_sem);
+ mutex_unlock(&vol->quota_q_ino->i_mutex);
return FALSE;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 6c16db9e1a8a..280e383fc84e 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1213,10 +1213,10 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
* Find the inode number for the hibernation file by looking up the
* filename hiberfil.sys in the root directory.
*/
- down(&vol->root_ino->i_sem);
+ mutex_lock(&vol->root_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->root_ino), hiberfil, 12,
&name);
- up(&vol->root_ino->i_sem);
+ mutex_unlock(&vol->root_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
ret = MREF_ERR(mref);
/* If the file does not exist, Windows is not hibernated. */
@@ -1307,10 +1307,10 @@ static BOOL load_and_init_quota(ntfs_volume *vol)
* Find the inode number for the quota file by looking up the filename
* $Quota in the extended system files directory $Extend.
*/
- down(&vol->extend_ino->i_sem);
+ mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
&name);
- up(&vol->extend_ino->i_sem);
+ mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, quotas are disabled and have
@@ -1390,10 +1390,10 @@ static BOOL load_and_init_usnjrnl(ntfs_volume *vol)
* Find the inode number for the transaction log file by looking up the
* filename $UsnJrnl in the extended system files directory $Extend.
*/
- down(&vol->extend_ino->i_sem);
+ mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), UsnJrnl, 8,
&name);
- up(&vol->extend_ino->i_sem);
+ mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, transaction logging is disabled,
@@ -2312,9 +2312,9 @@ static void ntfs_put_super(struct super_block *sb)
if (!list_empty(&sb->s_dirty)) {
const char *s1, *s2;
- down(&vol->mft_ino->i_sem);
+ mutex_lock(&vol->mft_ino->i_mutex);
truncate_inode_pages(vol->mft_ino->i_mapping, 0);
- up(&vol->mft_ino->i_sem);
+ mutex_unlock(&vol->mft_ino->i_mutex);
write_inode_now(vol->mft_ino, 1);
if (!list_empty(&sb->s_dirty)) {
static const char *_s1 = "inodes";
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 465f797451ee..6b9812db3779 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -966,7 +966,7 @@ static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
mlog_entry("start_blk = %"MLFu64", num_clusters = %u\n", start_blk,
num_clusters);
- BUG_ON(!down_trylock(&tl_inode->i_sem));
+ BUG_ON(mutex_trylock(&tl_inode->i_mutex));
start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
@@ -1108,7 +1108,7 @@ bail:
return status;
}
-/* Expects you to already be holding tl_inode->i_sem */
+/* Expects you to already be holding tl_inode->i_mutex */
static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
{
int status;
@@ -1123,7 +1123,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
mlog_entry_void();
- BUG_ON(!down_trylock(&tl_inode->i_sem));
+ BUG_ON(mutex_trylock(&tl_inode->i_mutex));
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
@@ -1198,9 +1198,9 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
int status;
struct inode *tl_inode = osb->osb_tl_inode;
- down(&tl_inode->i_sem);
+ mutex_lock(&tl_inode->i_mutex);
status = __ocfs2_flush_truncate_log(osb);
- up(&tl_inode->i_sem);
+ mutex_unlock(&tl_inode->i_mutex);
return status;
}
@@ -1363,7 +1363,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
mlog(0, "cleanup %u records from %"MLFu64"\n", num_recs,
tl_copy->i_blkno);
- down(&tl_inode->i_sem);
+ mutex_lock(&tl_inode->i_mutex);
for(i = 0; i < num_recs; i++) {
if (ocfs2_truncate_log_needs_flush(osb)) {
status = __ocfs2_flush_truncate_log(osb);
@@ -1395,7 +1395,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
}
bail_up:
- up(&tl_inode->i_sem);
+ mutex_unlock(&tl_inode->i_mutex);
mlog_exit(status);
return status;
@@ -1840,7 +1840,7 @@ start:
mlog(0, "clusters_to_del = %u in this pass\n", clusters_to_del);
- down(&tl_inode->i_sem);
+ mutex_lock(&tl_inode->i_mutex);
tl_sem = 1;
/* ocfs2_truncate_log_needs_flush guarantees us at least one
* record is free for use. If there isn't any, we flush to get
@@ -1875,7 +1875,7 @@ start:
goto bail;
}
- up(&tl_inode->i_sem);
+ mutex_unlock(&tl_inode->i_mutex);
tl_sem = 0;
ocfs2_commit_trans(handle);
@@ -1890,7 +1890,7 @@ bail:
ocfs2_schedule_truncate_log_flush(osb, 1);
if (tl_sem)
- up(&tl_inode->i_sem);
+ mutex_unlock(&tl_inode->i_mutex);
if (handle)
ocfs2_commit_trans(handle);
@@ -1994,7 +1994,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
goto bail;
}
- down(&ext_alloc_inode->i_sem);
+ mutex_lock(&ext_alloc_inode->i_mutex);
(*tc)->tc_ext_alloc_inode = ext_alloc_inode;
status = ocfs2_meta_lock(ext_alloc_inode,
@@ -2026,7 +2026,7 @@ static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
if (tc->tc_ext_alloc_locked)
ocfs2_meta_unlock(tc->tc_ext_alloc_inode, 1);
- up(&tc->tc_ext_alloc_inode->i_sem);
+ mutex_unlock(&tc->tc_ext_alloc_inode->i_mutex);
iput(tc->tc_ext_alloc_inode);
}
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 5fd60c105913..cf7828f23361 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -653,7 +653,7 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
struct config_group *o2hb_group = NULL, *ret = NULL;
void *defs = NULL;
- /* this runs under the parent dir's i_sem; there can be only
+ /* this runs under the parent dir's i_mutex; there can be only
* one caller in here at a time */
if (o2nm_single_cluster)
goto out; /* ENOSPC */
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 856e20ae8263..57158fa75d91 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -202,7 +202,7 @@ bail:
}
/*
- * NOTE: this should always be called with parent dir i_sem taken.
+ * NOTE: this should always be called with parent dir i_mutex taken.
*/
int ocfs2_find_files_on_disk(const char *name,
int namelen,
@@ -245,7 +245,7 @@ leave:
* Return 0 if the name does not exist
* Return -EEXIST if the directory contains the name
*
- * Callers should have i_sem + a cluster lock on dir
+ * Callers should have i_mutex + a cluster lock on dir
*/
int ocfs2_check_dir_for_entry(struct inode *dir,
const char *name,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 72ae9e3306f4..ca5f9f90d794 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -492,7 +492,7 @@ restart_all:
}
/* blocks peope in read/write from reading our allocation
- * until we're done changing it. We depend on i_sem to block
+ * until we're done changing it. We depend on i_mutex to block
* other extend/truncate calls while we're here. Ordering wrt
* start_trans is important here -- always do it before! */
down_write(&OCFS2_I(inode)->ip_alloc_sem);
@@ -958,8 +958,8 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
filp->f_flags &= ~O_DIRECT;
#endif
- down(&inode->i_sem);
- /* to match setattr's i_sem -> i_alloc_sem -> rw_lock ordering */
+ mutex_lock(&inode->i_mutex);
+ /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
if (filp->f_flags & O_DIRECT) {
have_alloc_sem = 1;
down_read(&inode->i_alloc_sem);
@@ -1123,7 +1123,7 @@ out:
up_read(&inode->i_alloc_sem);
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
mlog_exit(ret);
return ret;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a91ba4dec936..d4ecc0627716 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -485,10 +485,10 @@ static int ocfs2_remove_inode(struct inode *inode,
goto bail;
}
- down(&inode_alloc_inode->i_sem);
+ mutex_lock(&inode_alloc_inode->i_mutex);
status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1);
if (status < 0) {
- up(&inode_alloc_inode->i_sem);
+ mutex_unlock(&inode_alloc_inode->i_mutex);
mlog_errno(status);
goto bail;
@@ -536,7 +536,7 @@ bail_commit:
ocfs2_commit_trans(handle);
bail_unlock:
ocfs2_meta_unlock(inode_alloc_inode, 1);
- up(&inode_alloc_inode->i_sem);
+ mutex_unlock(&inode_alloc_inode->i_mutex);
brelse(inode_alloc_bh);
bail:
iput(inode_alloc_inode);
@@ -567,10 +567,10 @@ static int ocfs2_wipe_inode(struct inode *inode,
/* Lock the orphan dir. The lock will be held for the entire
* delete_inode operation. We do this now to avoid races with
* recovery completion on other nodes. */
- down(&orphan_dir_inode->i_sem);
+ mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1);
if (status < 0) {
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
mlog_errno(status);
goto bail;
@@ -593,7 +593,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
bail_unlock_dir:
ocfs2_meta_unlock(orphan_dir_inode, 1);
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
brelse(orphan_dir_bh);
bail:
iput(orphan_dir_inode);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 04428042e5e5..303c8d96457f 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -216,7 +216,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
atomic_inc(&inode->i_count);
/* we're obviously changing it... */
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
/* sanity check */
BUG_ON(OCFS2_I(inode)->ip_handle);
@@ -241,7 +241,7 @@ static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
OCFS2_I(inode)->ip_handle = NULL;
list_del_init(&OCFS2_I(inode)->ip_handle_list);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
iput(inode);
}
}
@@ -1433,10 +1433,10 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
goto out;
}
- down(&orphan_dir_inode->i_sem);
+ mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0);
if (status < 0) {
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
mlog_errno(status);
goto out;
}
@@ -1451,7 +1451,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
if (!bh)
status = -EINVAL;
if (status < 0) {
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
if (bh)
brelse(bh);
mlog_errno(status);
@@ -1465,7 +1465,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
if (!ocfs2_check_dir_entry(orphan_dir_inode,
de, bh, local)) {
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
status = -EINVAL;
mlog_errno(status);
brelse(bh);
@@ -1509,7 +1509,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
}
brelse(bh);
}
- up(&orphan_dir_inode->i_sem);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
ocfs2_meta_unlock(orphan_dir_inode, 0);
have_disk_lock = 0;
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index fe373a2101d9..149b35181666 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -334,7 +334,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
goto bail;
}
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno,
&alloc_bh, 0, inode);
@@ -367,7 +367,7 @@ bail:
brelse(alloc_bh);
if (inode) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
iput(inode);
}
@@ -446,7 +446,7 @@ bail:
/*
* make sure we've got at least bitswanted contiguous bits in the
- * local alloc. You lose them when you drop i_sem.
+ * local alloc. You lose them when you drop i_mutex.
*
* We will add ourselves to the transaction passed in, but may start
* our own in order to shift windows.
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 48bf7f0ce544..364d64bd5f10 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -169,7 +169,7 @@ static match_table_t tokens = {
*/
static void ocfs2_write_super(struct super_block *sb)
{
- if (down_trylock(&sb->s_lock) == 0)
+ if (mutex_trylock(&sb->s_lock) != 0)
BUG();
sb->s_dirt = 0;
}
diff --git a/fs/open.c b/fs/open.c
index 75f3329e8a67..a3b3a9b5c2ff 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -211,9 +211,9 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
newattrs.ia_valid |= ATTR_FILE;
}
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
err = notify_change(dentry, &newattrs);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return err;
}
@@ -398,9 +398,9 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
out:
@@ -451,9 +451,9 @@ long do_utimes(char __user * filename, struct timeval * times)
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
out:
@@ -620,13 +620,13 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
err = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out_putf;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
err = notify_change(dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_putf:
fput(file);
@@ -654,13 +654,13 @@ asmlinkage long sys_chmod(const char __user * filename, mode_t mode)
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto dput_and_out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
@@ -696,9 +696,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |= ATTR_KILL_SUID|ATTR_KILL_SGID;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return error;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 66aa0b938d6a..acb030b61fb0 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -44,10 +44,10 @@ void pipe_wait(struct inode * inode)
* is considered a noninteractive wait:
*/
prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
schedule();
finish_wait(PIPE_WAIT(*inode), &wait);
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
}
static inline int
@@ -136,7 +136,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
for (;;) {
int bufs = info->nrbufs;
@@ -200,7 +200,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
pipe_wait(inode);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
@@ -237,7 +237,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
if (!PIPE_READERS(*inode)) {
@@ -341,7 +341,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
PIPE_WAITING_WRITERS(*inode)--;
}
out:
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
@@ -381,7 +381,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
switch (cmd) {
case FIONREAD:
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
count = 0;
buf = info->curbuf;
@@ -390,7 +390,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
count += info->bufs[buf].len;
buf = (buf+1) & (PIPE_BUFFERS-1);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return put_user(count, (int __user *)arg);
default:
return -EINVAL;
@@ -433,7 +433,7 @@ pipe_poll(struct file *filp, poll_table *wait)
static int
pipe_release(struct inode *inode, int decr, int decw)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode) -= decr;
PIPE_WRITERS(*inode) -= decw;
if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
@@ -443,7 +443,7 @@ pipe_release(struct inode *inode, int decr, int decw)
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -454,9 +454,9 @@ pipe_read_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -471,9 +471,9 @@ pipe_write_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -488,14 +488,14 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
if (retval >= 0)
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -534,9 +534,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
{
/* We could have perhaps used atomic_t, but this and friends
below are the only places. So it doesn't seem worthwhile. */
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -544,9 +544,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
static int
pipe_write_open(struct inode *inode, struct file *filp)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_WRITERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -554,12 +554,12 @@ pipe_write_open(struct inode *inode, struct file *filp)
static int
pipe_rdwr_open(struct inode *inode, struct file *filp)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
if (filp->f_mode & FMODE_READ)
PIPE_READERS(*inode)++;
if (filp->f_mode & FMODE_WRITE)
PIPE_WRITERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
diff --git a/fs/quota.c b/fs/quota.c
index 612e04db4b93..d14d872646d4 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -168,7 +168,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
sync_blockdev(sb->s_bdev);
/* Now when everything is written we can discard the pagecache so
- * that userspace sees the changes. We need i_sem and so we could
+ * that userspace sees the changes. We need i_mutex and so we could
* not do it inside dqonoff_sem. Moreover we need to be carefull
* about races with quotaoff() (that is the reason why we have own
* reference to inode). */
@@ -184,9 +184,9 @@ static void quota_sync_sb(struct super_block *sb, int type)
up(&sb_dqopt(sb)->dqonoff_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (discard[cnt]) {
- down(&discard[cnt]->i_sem);
+ mutex_lock(&discard[cnt]->i_mutex);
truncate_inode_pages(&discard[cnt]->i_data, 0);
- up(&discard[cnt]->i_sem);
+ mutex_unlock(&discard[cnt]->i_mutex);
iput(discard[cnt]);
}
}
diff --git a/fs/read_write.c b/fs/read_write.c
index df3468a22fea..3f7a1a62165f 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -33,7 +33,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
long long retval;
struct inode *inode = file->f_mapping->host;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
switch (origin) {
case 2:
offset += inode->i_size;
@@ -49,7 +49,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return retval;
}
diff --git a/fs/readdir.c b/fs/readdir.c
index b03579bc0210..b6109329b607 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -30,13 +30,13 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
if (res)
goto out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
res = file->f_op->readdir(file, buf, filler);
file_accessed(file);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return res;
}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 7892a865b58a..127e7d2cabdd 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -49,7 +49,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
}
reiserfs_write_lock(inode->i_sb);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
/* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
@@ -100,7 +100,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
err = reiserfs_truncate_file(inode, 0);
}
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return err;
}
@@ -1342,7 +1342,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
return -EFAULT;
- down(&inode->i_sem); // locks the entire file for just us
+ mutex_lock(&inode->i_mutex); // locks the entire file for just us
pos = *ppos;
@@ -1532,12 +1532,12 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
generic_osync_inode(inode, file->f_mapping,
OSYNC_METADATA | OSYNC_DATA);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_async_progress_wait(inode->i_sb);
return (already_written != 0) ? already_written : res;
out:
- up(&inode->i_sem); // unlock the file on exit.
+ mutex_unlock(&inode->i_mutex); // unlock the file on exit.
return res;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a5e3a0ddbe53..ffa34b861bdb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -40,12 +40,12 @@ void reiserfs_delete_inode(struct inode *inode)
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
reiserfs_delete_xattrs(inode);
if (journal_begin(&th, inode->i_sb, jbegin_count)) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
goto out;
}
reiserfs_update_inode_transaction(inode);
@@ -59,11 +59,11 @@ void reiserfs_delete_inode(struct inode *inode)
DQUOT_FREE_INODE(inode);
if (journal_end(&th, inode->i_sb, jbegin_count)) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
goto out;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
/* check return value from reiserfs_delete_object after
* ending the transaction
@@ -551,7 +551,7 @@ static int convert_tail_for_hole(struct inode *inode,
/* we don't have to make sure the conversion did not happen while
** we were locking the page because anyone that could convert
- ** must first take i_sem.
+ ** must first take i_mutex.
**
** We must fix the tail page for writing because it might have buffers
** that are mapped, but have a block number of 0. This indicates tail
@@ -586,7 +586,7 @@ static inline int _allocate_block(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
#ifdef REISERFS_PREALLOCATE
- if (!(flags & GET_BLOCK_NO_ISEM)) {
+ if (!(flags & GET_BLOCK_NO_IMUX)) {
return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
path, block);
}
@@ -2318,7 +2318,7 @@ static int map_block_for_writepage(struct inode *inode,
/* this is where we fill in holes in the file. */
if (use_get_block) {
retval = reiserfs_get_block(inode, block, bh_result,
- GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM
+ GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
| GET_BLOCK_NO_DANGLE);
if (!retval) {
if (!buffer_mapped(bh_result)
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 81fc00285f60..ba8bf8df6dc7 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -120,7 +120,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
/* we need to make sure nobody is changing the file size beneath
** us
*/
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
@@ -156,7 +156,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
page_cache_release(page);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 42afb5bef111..397d9590c8f2 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2211,7 +2211,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct buffer_head tmp_bh, *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2250,7 +2250,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index c92e124f628e..196e971c03c9 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -205,7 +205,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in
1) * p_s_sb->s_blocksize;
pos1 = pos;
- // we are protected by i_sem. The tail can not disapper, not
+ // we are protected by i_mutex. The tail can not disapper, not
// append can be done either
// we are in truncate or packing tail in file_release
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 02091eaac0b4..f1895f0a278e 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -67,11 +67,11 @@ static struct dentry *create_xa_root(struct super_block *sb)
goto out;
} else if (!xaroot->d_inode) {
int err;
- down(&privroot->d_inode->i_sem);
+ mutex_lock(&privroot->d_inode->i_mutex);
err =
privroot->d_inode->i_op->mkdir(privroot->d_inode, xaroot,
0700);
- up(&privroot->d_inode->i_sem);
+ mutex_unlock(&privroot->d_inode->i_mutex);
if (err) {
dput(xaroot);
@@ -219,7 +219,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
} else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
goto out;
} else {
- /* inode->i_sem is down, so nothing else can try to create
+ /* inode->i_mutex is down, so nothing else can try to create
* the same xattr */
err = xadir->d_inode->i_op->create(xadir->d_inode, xafile,
0700 | S_IFREG, NULL);
@@ -268,7 +268,7 @@ static struct file *open_xa_file(const struct inode *inode, const char *name,
* and don't mess with f->f_pos, but the idea is the same. Do some
* action on each and every entry in the directory.
*
- * we're called with i_sem held, so there are no worries about the directory
+ * we're called with i_mutex held, so there are no worries about the directory
* changing underneath us.
*/
static int __xattr_readdir(struct file *filp, void *dirent, filldir_t filldir)
@@ -426,7 +426,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
int res = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
goto out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
// down(&inode->i_zombie);
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
@@ -435,7 +435,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
unlock_kernel();
}
// up(&inode->i_zombie);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return res;
}
@@ -480,7 +480,7 @@ static inline __u32 xattr_hash(const char *msg, int len)
/* Generic extended attribute operations that can be used by xa plugins */
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
@@ -535,7 +535,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
/* Resize it so we're ok to write there */
newattrs.ia_size = buffer_size;
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
- down(&xinode->i_sem);
+ mutex_lock(&xinode->i_mutex);
err = notify_change(fp->f_dentry, &newattrs);
if (err)
goto out_filp;
@@ -598,7 +598,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
}
out_filp:
- up(&xinode->i_sem);
+ mutex_unlock(&xinode->i_mutex);
fput(fp);
out:
@@ -606,7 +606,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
}
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
reiserfs_xattr_get(const struct inode *inode, const char *name, void *buffer,
@@ -793,7 +793,7 @@ reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
}
-/* This is called w/ inode->i_sem downed */
+/* This is called w/ inode->i_mutex downed */
int reiserfs_delete_xattrs(struct inode *inode)
{
struct file *fp;
@@ -946,7 +946,7 @@ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
/*
* Inode operation getxattr()
- * Preliminary locking: we down dentry->d_inode->i_sem
+ * Preliminary locking: we down dentry->d_inode->i_mutex
*/
ssize_t
reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
@@ -970,7 +970,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
/*
* Inode operation setxattr()
*
- * dentry->d_inode->i_sem down
+ * dentry->d_inode->i_mutex down
*/
int
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
@@ -1008,7 +1008,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
/*
* Inode operation removexattr()
*
- * dentry->d_inode->i_sem down
+ * dentry->d_inode->i_mutex down
*/
int reiserfs_removexattr(struct dentry *dentry, const char *name)
{
@@ -1091,7 +1091,7 @@ reiserfs_listxattr_filler(void *buf, const char *name, int namelen,
/*
* Inode operation listxattr()
*
- * Preliminary locking: we down dentry->d_inode->i_sem
+ * Preliminary locking: we down dentry->d_inode->i_mutex
*/
ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
{
@@ -1289,9 +1289,9 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (!IS_ERR(dentry)) {
if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) {
struct inode *inode = dentry->d_parent->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
err = inode->i_op->mkdir(inode, dentry, 0700);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (err) {
dput(dentry);
dentry = NULL;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index a47ac9aac8b2..2dc953504cc0 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -174,7 +174,7 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
/*
* Inode operation get_posix_acl().
*
- * inode->i_sem: down
+ * inode->i_mutex: down
* BKL held [before 2.5.x]
*/
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
@@ -237,7 +237,7 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
/*
* Inode operation set_posix_acl().
*
- * inode->i_sem: down
+ * inode->i_mutex: down
* BKL held [before 2.5.x]
*/
static int
@@ -312,7 +312,7 @@ reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
}
-/* dir->i_sem: down,
+/* dir->i_mutex: locked,
* inode is new and not released into the wild yet */
int
reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
diff --git a/fs/relayfs/inode.c b/fs/relayfs/inode.c
index 7b7f2cb5f0e1..383523011aad 100644
--- a/fs/relayfs/inode.c
+++ b/fs/relayfs/inode.c
@@ -109,7 +109,7 @@ static struct dentry *relayfs_create_entry(const char *name,
}
parent = dget(parent);
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
d = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(d)) {
d = NULL;
@@ -139,7 +139,7 @@ release_mount:
simple_release_fs(&relayfs_mount, &relayfs_mount_count);
exit:
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
return d;
}
@@ -204,7 +204,7 @@ int relayfs_remove(struct dentry *dentry)
return -EINVAL;
parent = dget(parent);
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
error = simple_rmdir(parent->d_inode, dentry);
@@ -215,7 +215,7 @@ int relayfs_remove(struct dentry *dentry)
}
if (!error)
dput(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
if (!error)
@@ -476,7 +476,7 @@ static ssize_t relay_file_read(struct file *filp,
ssize_t ret = 0;
void *from;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if(!relay_file_read_avail(buf, *ppos))
goto out;
@@ -494,7 +494,7 @@ static ssize_t relay_file_read(struct file *filp,
relay_file_read_consume(buf, read_start, count);
*ppos = relay_file_read_end_pos(buf, read_start, count);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
diff --git a/fs/super.c b/fs/super.c
index 0a30e51692cf..c177b92419c5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -72,7 +72,7 @@ static struct super_block *alloc_super(void)
INIT_HLIST_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
init_rwsem(&s->s_umount);
- sema_init(&s->s_lock, 1);
+ mutex_init(&s->s_lock);
down_write(&s->s_umount);
s->s_count = S_BIAS;
atomic_set(&s->s_active, 1);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index d36780382176..49bd219275db 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -99,7 +99,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
- down(&p->d_inode->i_sem);
+ mutex_lock(&p->d_inode->i_mutex);
*d = lookup_one_len(n, p, strlen(n));
if (!IS_ERR(*d)) {
error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, SYSFS_DIR);
@@ -122,7 +122,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
dput(*d);
} else
error = PTR_ERR(*d);
- up(&p->d_inode->i_sem);
+ mutex_unlock(&p->d_inode->i_mutex);
return error;
}
@@ -246,7 +246,7 @@ static void remove_dir(struct dentry * d)
struct dentry * parent = dget(d->d_parent);
struct sysfs_dirent * sd;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
d_delete(d);
sd = d->d_fsdata;
list_del_init(&sd->s_sibling);
@@ -257,7 +257,7 @@ static void remove_dir(struct dentry * d)
pr_debug(" o %s removing done (%d)\n",d->d_name.name,
atomic_read(&d->d_count));
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
}
@@ -286,7 +286,7 @@ void sysfs_remove_dir(struct kobject * kobj)
return;
pr_debug("sysfs %s: removing dir\n",dentry->d_name.name);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
parent_sd = dentry->d_fsdata;
list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
if (!sd->s_element || !(sd->s_type & SYSFS_NOT_PINNED))
@@ -295,7 +295,7 @@ void sysfs_remove_dir(struct kobject * kobj)
sysfs_drop_dentry(sd, dentry);
sysfs_put(sd);
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
remove_dir(dentry);
/**
@@ -318,7 +318,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
down_write(&sysfs_rename_sem);
parent = kobj->parent->dentry;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
if (!IS_ERR(new_dentry)) {
@@ -334,7 +334,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
error = -EEXIST;
dput(new_dentry);
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
up_write(&sysfs_rename_sem);
return error;
@@ -345,9 +345,9 @@ static int sysfs_dir_open(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
file->private_data = sysfs_new_dirent(parent_sd, NULL);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return file->private_data ? 0 : -ENOMEM;
@@ -358,9 +358,9 @@ static int sysfs_dir_close(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct sysfs_dirent * cursor = file->private_data;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
list_del_init(&cursor->s_sibling);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
release_sysfs_dirent(cursor);
@@ -436,7 +436,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
{
struct dentry * dentry = file->f_dentry;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
@@ -444,7 +444,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -468,7 +468,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
list_add_tail(&cursor->s_sibling, p);
}
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return offset;
}
@@ -483,4 +483,3 @@ struct file_operations sysfs_dir_operations = {
EXPORT_SYMBOL_GPL(sysfs_create_dir);
EXPORT_SYMBOL_GPL(sysfs_remove_dir);
EXPORT_SYMBOL_GPL(sysfs_rename_dir);
-
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 4013d7905e84..d0e3d8495165 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -364,9 +364,9 @@ int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type)
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
int error = 0;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
@@ -398,7 +398,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
struct dentry * victim;
int res = -ENOENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
if (!IS_ERR(victim)) {
/* make sure dentry is really there */
@@ -420,7 +420,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
*/
dput(victim);
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return res;
}
@@ -441,22 +441,22 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
struct iattr newattrs;
int res = -ENOENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
if (!IS_ERR(victim)) {
if (victim->d_inode &&
(victim->d_parent->d_inode == dir->d_inode)) {
inode = victim->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
newattrs.ia_mode = (mode & S_IALLUGO) |
(inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
res = notify_change(victim, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
dput(victim);
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return res;
}
@@ -480,4 +480,3 @@ void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
EXPORT_SYMBOL_GPL(sysfs_create_file);
EXPORT_SYMBOL_GPL(sysfs_remove_file);
EXPORT_SYMBOL_GPL(sysfs_update_file);
-
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 970a33f03299..c3133219941c 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -201,7 +201,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
/*
* Unhashes the dentry corresponding to given sysfs_dirent
- * Called with parent inode's i_sem held.
+ * Called with parent inode's i_mutex held.
*/
void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
{
@@ -232,7 +232,7 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
/* no inode means this hasn't been made visible yet */
return;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
@@ -243,7 +243,5 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
break;
}
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
}
-
-
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index de402fa915f2..e38d6338a20d 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -86,9 +86,9 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
BUG_ON(!kobj || !kobj->dentry || !name);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
error = sysfs_add_link(dentry, name, target);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return error;
}
@@ -177,4 +177,3 @@ struct inode_operations sysfs_symlink_inode_operations = {
EXPORT_SYMBOL_GPL(sysfs_create_link);
EXPORT_SYMBOL_GPL(sysfs_remove_link);
-
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 2ba11a9aa995..e9a42c711a9e 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1275,7 +1275,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct buffer_head *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1297,7 +1297,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
}
out:
if (len == towrite) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
if (inode->i_size < off+len-towrite)
@@ -1305,7 +1305,7 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index bcc2156d4d28..386a532ee5a9 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -51,7 +51,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
}
}
- down(&d->d_inode->i_sem);
+ mutex_lock(&d->d_inode->i_mutex);
error = security_inode_setxattr(d, kname, kvalue, size, flags);
if (error)
goto out;
@@ -73,7 +73,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
fsnotify_xattr(d);
}
out:
- up(&d->d_inode->i_sem);
+ mutex_unlock(&d->d_inode->i_mutex);
kfree(kvalue);
return error;
}
@@ -323,9 +323,9 @@ removexattr(struct dentry *d, char __user *name)
error = security_inode_removexattr(d, kname);
if (error)
goto out;
- down(&d->d_inode->i_sem);
+ mutex_lock(&d->d_inode->i_mutex);
error = d->d_inode->i_op->removexattr(d, kname);
- up(&d->d_inode->i_sem);
+ mutex_unlock(&d->d_inode->i_mutex);
if (!error)
fsnotify_xattr(d);
}
diff --git a/fs/xfs/linux-2.6/mutex.h b/fs/xfs/linux-2.6/mutex.h
index ce773d89a923..d3369b6ca168 100644
--- a/fs/xfs/linux-2.6/mutex.h
+++ b/fs/xfs/linux-2.6/mutex.h
@@ -19,7 +19,7 @@
#define __XFS_SUPPORT_MUTEX_H__
#include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/*
* Map the mutex'es from IRIX to Linux semaphores.
@@ -28,12 +28,8 @@
* callers.
*/
#define MUTEX_DEFAULT 0x0
-typedef struct semaphore mutex_t;
-#define mutex_init(lock, type, name) sema_init(lock, 1)
-#define mutex_destroy(lock) sema_init(lock, -99)
-#define mutex_lock(lock, num) down(lock)
-#define mutex_trylock(lock) (down_trylock(lock) ? 0 : 1)
-#define mutex_unlock(lock) up(lock)
+typedef struct mutex mutex_t;
+//#define mutex_destroy(lock) do{}while(0)
#endif /* __XFS_SUPPORT_MUTEX_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 14215a7db59f..41c478bb1ffc 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -203,7 +203,7 @@ validate_fields(
ip->i_nlink = va.va_nlink;
ip->i_blocks = va.va_nblocks;
- /* we're under i_sem so i_size can't change under us */
+ /* we're under i_mutex so i_size can't change under us */
if (i_size_read(ip) != va.va_size)
i_size_write(ip, va.va_size);
}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 279e9bc92aba..5675117ef227 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -254,7 +254,7 @@ xfs_read(
}
if (unlikely(ioflags & IO_ISDIRECT))
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
@@ -286,7 +286,7 @@ xfs_read(
unlock_isem:
if (unlikely(ioflags & IO_ISDIRECT))
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
@@ -655,7 +655,7 @@ relock:
iolock = XFS_IOLOCK_EXCL;
locktype = VRWLOCK_WRITE;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
} else {
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
@@ -686,7 +686,7 @@ start:
int dmflags = FILP_DELAY_FLAG(file);
if (need_isem)
- dmflags |= DM_FLAGS_ISEM;
+ dmflags |= DM_FLAGS_IMUX;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
@@ -772,7 +772,7 @@ retry:
if (need_isem) {
/* demote the lock now the cached pages are gone */
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
@@ -817,14 +817,14 @@ retry:
xfs_rwunlock(bdp, locktype);
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
goto out_nounlocks;
if (need_isem)
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
xfs_rwlock(bdp, locktype);
pos = xip->i_d.di_size;
ret = 0;
@@ -926,7 +926,7 @@ retry:
xfs_rwunlock(bdp, locktype);
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
error = sync_page_range(inode, mapping, pos, ret);
if (!error)
@@ -938,7 +938,7 @@ retry:
xfs_rwunlock(bdp, locktype);
out_unlock_isem:
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_nounlocks:
return -error;
}
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 00b5043dfa5a..772ac48329ea 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -104,7 +104,7 @@ xfs_qm_dqinit(
*/
if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp;
- mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq");
+ mutex_init(&dqp->q_qlock);
initnsema(&dqp->q_flock, 1, "fdq");
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
@@ -1382,7 +1382,7 @@ void
xfs_dqlock(
xfs_dquot_t *dqp)
{
- mutex_lock(&(dqp->q_qlock), PINOD);
+ mutex_lock(&(dqp->q_qlock));
}
void
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 5328a2937127..bb6991a7a617 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -167,7 +167,7 @@ xfs_Gqm_init(void)
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
xqm->qm_nrefs = 0;
#ifdef DEBUG
- mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
+ xfs_mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
#endif
return xqm;
}
@@ -1166,7 +1166,7 @@ xfs_qm_init_quotainfo(
qinf->qi_dqreclaims = 0;
/* mutex used to serialize quotaoffs */
- mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff");
+ mutex_init(&qinf->qi_quotaofflock);
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -1285,7 +1285,7 @@ xfs_qm_list_init(
char *str,
int n)
{
- mutex_init(&list->qh_lock, MUTEX_DEFAULT, str);
+ mutex_init(&list->qh_lock);
list->qh_next = NULL;
list->qh_version = 0;
list->qh_nelems = 0;
@@ -2762,7 +2762,7 @@ STATIC void
xfs_qm_freelist_init(xfs_frlist_t *ql)
{
ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
- mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf");
+ mutex_init(&ql->qh_lock);
ql->qh_version = 0;
ql->qh_nelems = 0;
}
@@ -2772,7 +2772,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
{
xfs_dquot_t *dqp, *nextdqp;
- mutex_lock(&ql->qh_lock, PINOD);
+ mutex_lock(&ql->qh_lock);
for (dqp = ql->qh_next;
dqp != (xfs_dquot_t *)ql; ) {
xfs_dqlock(dqp);
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 12da259f2fcb..4568deb6da86 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_IWARNLIMIT 5
#define XFS_QM_RTBWARNLIMIT 5
-#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD))
+#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index d9d2993de435..90402a1c3983 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -363,7 +363,7 @@ xfs_qm_init(void)
KERN_INFO "SGI XFS Quota Management subsystem\n";
printk(message);
- mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
+ mutex_init(&xfs_Gqm_lock);
vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs);
xfs_qm_init_procfs();
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 24690e1af659..86a1d09f48d5 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -233,7 +233,7 @@ xfs_qm_scall_quotaoff(
*/
ASSERT(mp->m_quotainfo);
if (mp->m_quotainfo)
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
ASSERT(mp->m_quotainfo);
@@ -508,7 +508,7 @@ xfs_qm_scall_quotaon(
/*
* Switch on quota enforcement in core.
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
@@ -617,7 +617,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent).
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
/*
* Get the dquot (locked), and join it to the transaction.
@@ -1426,7 +1426,7 @@ xfs_qm_internalqcheck(
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
- mutex_lock(&qcheck_lock, PINOD);
+ mutex_lock(&qcheck_lock);
/* There should be absolutely no quota activity while this
is going on. */
qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index 7a9f3beb818c..b7ddd04aae32 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -51,7 +51,7 @@
#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
-#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD))
+#define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
#ifdef DEBUG
struct xfs_dqhash;
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index 70ce40914c8a..69ec4f540c3a 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -24,7 +24,7 @@ static uuid_t *uuid_table;
void
uuid_init(void)
{
- mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor");
+ mutex_init(&uuid_monitor);
}
/*
@@ -94,7 +94,7 @@ uuid_table_insert(uuid_t *uuid)
{
int i, hole;
- mutex_lock(&uuid_monitor, PVFS);
+ mutex_lock(&uuid_monitor);
for (i = 0, hole = -1; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i])) {
hole = i;
@@ -122,7 +122,7 @@ uuid_table_remove(uuid_t *uuid)
{
int i;
- mutex_lock(&uuid_monitor, PVFS);
+ mutex_lock(&uuid_monitor);
for (i = 0; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i]))
continue;
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 864bf6955689..b4c7f2bc55a0 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -152,7 +152,7 @@ typedef enum {
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
-#define DM_FLAGS_ISEM 0x004 /* thread holds i_sem */
+#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
@@ -161,21 +161,21 @@ typedef enum {
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_ISEM : 0)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
+ DM_FLAGS_IMUX : 0)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22))
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_ISEM)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
+ DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_IMUX)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- 0 : DM_FLAGS_ISEM)
-#define DM_SEM_FLAG_WR (DM_FLAGS_ISEM)
+ 0 : DM_FLAGS_IMUX)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IMUX)
#endif
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 541d5dd474be..303af86739bf 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -117,7 +117,7 @@ xfs_mount_init(void)
AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
spinlock_init(&mp->m_sb_lock, "xfs_sb");
- mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock");
+ mutex_init(&mp->m_ilock);
initnsema(&mp->m_growlock, 1, "xfs_grow");
/*
* Initialize the AIL.
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 08b2e0a5d807..3432fd5a3986 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -533,7 +533,7 @@ typedef struct xfs_mod_sb {
int msb_delta; /* Change to make to specified field */
} xfs_mod_sb_t;
-#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock), PINOD)
+#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock)
#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s))
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index cb03bbe92cdf..fc77f7413083 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -176,6 +176,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-alpha/mutex.h b/include/asm-alpha/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-alpha/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f72b63309bc5..3d7283d84405 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -175,6 +175,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* __LINUX_ARM_ARCH__ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
new file mode 100644
index 000000000000..6caa59f1f595
--- /dev/null
+++ b/include/asm-arm/mutex.h
@@ -0,0 +1,128 @@
+/*
+ * include/asm-arm/mutex.h
+ *
+ * ARM optimized mutex locking primitives
+ *
+ * Please look into asm-generic/mutex-xchg.h for a formal definition.
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#if __LINUX_ARM_ARCH__ < 6
+/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+# include <asm-generic/mutex-xchg.h>
+#else
+
+/*
+ * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+ * atomic decrement (it is not a reliable atomic decrement but it satisfies
+ * the defined semantics for our purpose, while being smaller and faster
+ * than a real atomic decrement or atomic swap. The idea is to attempt
+ * decrementing the lock value only once. If once decremented it isn't zero,
+ * or if its store-back fails due to a dispute on the exclusive store, we
+ * simply bail out immediately through the slow path where the lock will be
+ * reattempted until it succeeds.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ int __ex_flag, __res; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%2] \n" \
+ "sub %0, %0, #1 \n" \
+ "strex %1, %0, [%2] \n" \
+ \
+ : "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ if (unlikely(__res || __ex_flag)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_fastpath_lock_retval(count, fail_fn) \
+({ \
+ int __ex_flag, __res; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%2] \n" \
+ "sub %0, %0, #1 \n" \
+ "strex %1, %0, [%2] \n" \
+ \
+ : "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ __res |= __ex_flag; \
+ if (unlikely(__res != 0)) \
+ __res = fail_fn(count); \
+ __res; \
+})
+
+/*
+ * Same trick is used for the unlock fast path. However the original value,
+ * rather than the result, is used to test for success in order to have
+ * better generated assembly.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ int __ex_flag, __res, __orig; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%3] \n" \
+ "add %1, %0, #1 \n" \
+ "strex %2, %1, [%3] \n" \
+ \
+ : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ if (unlikely(__orig || __ex_flag)) \
+ fail_fn(count); \
+} while (0)
+
+/*
+ * If the unlock was done on a contended lock, or if the unlock simply fails
+ * then the mutex remains locked.
+ */
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/*
+ * For __mutex_fastpath_trylock we use another construct which could be
+ * described as a "single value cmpxchg".
+ *
+ * This provides the needed trylock semantics like cmpxchg would, but it is
+ * lighter and less generic than a true cmpxchg implementation.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ int __ex_flag, __res, __orig;
+
+ __asm__ (
+
+ "1: ldrex %0, [%3] \n"
+ "subs %1, %0, #1 \n"
+ "strexeq %2, %1, [%3] \n"
+ "movlt %0, #0 \n"
+ "cmpeq %2, #0 \n"
+ "bgt 1b \n"
+
+ : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+ : "r" (&count->counter)
+ : "cc", "memory" );
+
+ return __orig;
+}
+
+#endif
+#endif
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 3074b0e76343..1552c8653990 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -76,6 +76,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 2df2c7aa19b7..0b51a87e5532 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -136,6 +136,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-cris/mutex.h b/include/asm-cris/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-cris/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 9c9e9499cfd8..a59f684b4f33 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -328,6 +328,7 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#endif
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-frv/mutex.h b/include/asm-frv/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-frv/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
new file mode 100644
index 000000000000..74b18cda169f
--- /dev/null
+++ b/include/asm-generic/mutex-dec.h
@@ -0,0 +1,110 @@
+/*
+ * asm-generic/mutex-dec.h
+ *
+ * Generic implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ */
+#ifndef _ASM_GENERIC_MUTEX_DEC_H
+#define _ASM_GENERIC_MUTEX_DEC_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ if (unlikely(atomic_dec_return(count) < 0)) \
+ fail_fn(count); \
+ else \
+ smp_mb(); \
+} while (0)
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ smp_mb(); \
+ if (unlikely(atomic_inc_return(count) <= 0)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0)) == 1) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
new file mode 100644
index 000000000000..5cf8b7ce0c45
--- /dev/null
+++ b/include/asm-generic/mutex-null.h
@@ -0,0 +1,24 @@
+/*
+ * asm-generic/mutex-null.h
+ *
+ * Generic implementation of the mutex fastpath, based on NOP :-)
+ *
+ * This is used by the mutex-debugging infrastructure, but it can also
+ * be used by architectures that (for whatever reason) want to use the
+ * spinlock based slowpath.
+ */
+#ifndef _ASM_GENERIC_MUTEX_NULL_H
+#define _ASM_GENERIC_MUTEX_NULL_H
+
+/* extra parameter only needed for mutex debugging: */
+#ifndef __IP__
+# define __IP__
+#endif
+
+#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
+#define __mutex_slowpath_needs_to_unlock() 1
+
+#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
new file mode 100644
index 000000000000..1d24f47e6c48
--- /dev/null
+++ b/include/asm-generic/mutex-xchg.h
@@ -0,0 +1,117 @@
+/*
+ * asm-generic/mutex-xchg.h
+ *
+ * Generic implementation of the mutex fastpath, based on xchg().
+ *
+ * NOTE: An xchg based implementation is less optimal than an atomic
+ * decrement/increment based implementation. If your architecture
+ * has a reasonable atomic dec/inc then you should probably use
+ * asm-generic/mutex-dec.h instead, or you could open-code an
+ * optimized version in asm/mutex.h.
+ */
+#ifndef _ASM_GENERIC_MUTEX_XCHG_H
+#define _ASM_GENERIC_MUTEX_XCHG_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ if (unlikely(atomic_xchg(count, 0) != 1)) \
+ fail_fn(count); \
+ else \
+ smp_mb(); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than one.
+ * If the implementation sets it to a value of lower than one, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ smp_mb(); \
+ if (unlikely(atomic_xchg(count, 1) != 0)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 0
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: spinlock based trylock implementation
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ int prev = atomic_xchg(count, 0);
+
+ if (unlikely(prev < 0)) {
+ /*
+ * The lock was marked contended so we must restore that
+ * state. If while doing so we get back a prev value of 1
+ * then we just own it.
+ *
+ * [ In the rare case of the mutex going to 1, to 0, to -1
+ * and then back to 0 in this few-instructions window,
+ * this has the potential to trigger the slowpath for the
+ * owner's unlock path needlessly, but that's not a problem
+ * in practice. ]
+ */
+ prev = atomic_xchg(count, prev);
+ if (prev < 0)
+ prev = 0;
+ }
+ smp_mb();
+
+ return prev;
+}
+
+#endif
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index d891541e89c3..21f54428c86b 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -95,6 +95,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-h8300/mutex.h b/include/asm-h8300/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-h8300/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 7a5472d77091..de649d3aa2d4 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -216,6 +216,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
}
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
new file mode 100644
index 000000000000..4e5e3de1b9a6
--- /dev/null
+++ b/include/asm-i386/mutex.h
@@ -0,0 +1,124 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%eax) \n" \
+ " js "#fail_fn" \n" \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value
+ * to 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%eax) \n" \
+ " jle "#fail_fn" \n" \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
+ return 1;
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 15cf7984c48e..d3e0dfa99e1f 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -89,6 +89,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
}
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-ia64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index 70761278b6cb..3122fe106f05 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -243,6 +243,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-m32r/mutex.h b/include/asm-m32r/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m32r/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index b8a4e75d679d..a4a84d5c65d5 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -140,6 +140,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-m68k/mutex.h b/include/asm-m68k/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m68k/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 1702dbe9318c..6c4e4b63e454 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -129,6 +129,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-m68knommu/mutex.h b/include/asm-m68knommu/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-m68knommu/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 92256e43a938..94a95872d727 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -289,6 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-mips/mutex.h b/include/asm-mips/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-mips/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 64ebd086c40d..2ca56d34aaad 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -165,6 +165,7 @@ static __inline__ int atomic_read(const atomic_t *v)
/* exported interface */
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-parisc/mutex.h b/include/asm-parisc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-parisc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ae395a0632a6..248f9aec959c 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -165,6 +165,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-powerpc/mutex.h b/include/asm-powerpc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-powerpc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index d82aedf616fe..be6fefe223d6 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -75,6 +75,8 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
__CS_LOOP(v, mask, "or");
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
{
__asm__ __volatile__(" cs %0,%3,0(%2)\n"
diff --git a/include/asm-s390/mutex.h b/include/asm-s390/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-s390/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 618d8e0de348..fb627de217f2 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -101,6 +101,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-sh/mutex.h b/include/asm-sh/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sh/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index f3ce5c0df13a..28f2ea9b567b 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -113,6 +113,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-sh64/mutex.h b/include/asm-sh64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sh64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index accb4967e9d2..e1033170bd3a 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -20,6 +20,7 @@ typedef struct { volatile int counter; } atomic_t;
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
diff --git a/include/asm-sparc/mutex.h b/include/asm-sparc/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sparc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 11f5aa5d108c..25256bdc8aae 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-sparc64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-um/mutex.h b/include/asm-um/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-um/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index f5b9ab6f4e70..166df00457ea 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -104,6 +104,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
diff --git a/include/asm-v850/mutex.h b/include/asm-v850/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-v850/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 72eb071488c7..6b540237a2f8 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -389,6 +389,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
new file mode 100644
index 000000000000..818abfd262d1
--- /dev/null
+++ b/include/asm-x86_64/mutex.h
@@ -0,0 +1,113 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - decrement and call function if negative
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is negative
+ *
+ * Atomically decrements @v and calls <fail_fn> if the result is negative.
+ */
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%rdi) \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - increment and call function if nonpositive
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is nonpositive
+ *
+ * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
+ */
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%rdi) \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
+ * if it wasn't 1 originally. [the fallback function is never used on
+ * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
+ return 1;
+ else
+ return 0;
+}
+
+#endif
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index e2ce06b101ad..fe105a123924 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -224,6 +224,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-xtensa/mutex.h b/include/asm-xtensa/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/include/asm-xtensa/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 2914f7b07156..e71dd98dbcae 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@ struct ext3_inode_info {
#ifdef CONFIG_EXT3_FS_XATTR
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_sem even when reading would cause contention
+ * data. Taking i_mutex even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4c82219b0fae..92ae3e2067b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -219,6 +219,7 @@ extern int dir_notify_enable;
#include <linux/prio_tree.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/mutex.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
@@ -484,7 +485,7 @@ struct inode {
unsigned long i_blocks;
unsigned short i_bytes;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
- struct semaphore i_sem;
+ struct mutex i_mutex;
struct rw_semaphore i_alloc_sem;
struct inode_operations *i_op;
struct file_operations *i_fop; /* former ->i_op->default_file_ops */
@@ -820,7 +821,7 @@ struct super_block {
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
- struct semaphore s_lock;
+ struct mutex s_lock;
int s_count;
int s_syncing;
int s_need_sync_fs;
@@ -892,13 +893,13 @@ static inline int has_fs_excl(void)
static inline void lock_super(struct super_block * sb)
{
get_fs_excl();
- down(&sb->s_lock);
+ mutex_lock(&sb->s_lock);
}
static inline void unlock_super(struct super_block * sb)
{
put_fs_excl();
- up(&sb->s_lock);
+ mutex_unlock(&sb->s_lock);
}
/*
@@ -1191,7 +1192,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
* directory. The name should be stored in the @name (with the
* understanding that it is already pointing to a a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_sem held.
+ * or error. @get_name will be called without @parent->i_mutex held.
*
* get_parent:
* @get_parent should find the parent directory for the given @child which
@@ -1213,7 +1214,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
* nfsd_find_fh_dentry() in either the @obj or @parent parameters.
*
* Locking rules:
- * get_parent is called with child->d_inode->i_sem down
+ * get_parent is called with child->d_inode->i_mutex down
* get_name is not (which is possibly inconsistent)
*/
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ef8d0cbb832f..9a8c05dbe4f3 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/completion.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -638,7 +639,7 @@ typedef struct ide_drive_s {
int crc_count; /* crc counter to reduce drive speed */
struct list_head list;
struct device gendev;
- struct semaphore gendev_rel_sem; /* to deal with device release() */
+ struct completion gendev_rel_comp; /* to deal with device release() */
} ide_drive_t;
#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
@@ -794,7 +795,7 @@ typedef struct hwif_s {
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
struct device gendev;
- struct semaphore gendev_rel_sem; /* To deal with device release() */
+ struct completion gendev_rel_comp; /* To deal with device release() */
void *hwif_data; /* extra hwif data */
diff --git a/include/linux/jffs2_fs_i.h b/include/linux/jffs2_fs_i.h
index ef85ab56302b..ad565bf9dcc1 100644
--- a/include/linux/jffs2_fs_i.h
+++ b/include/linux/jffs2_fs_i.h
@@ -8,11 +8,11 @@
#include <asm/semaphore.h>
struct jffs2_inode_info {
- /* We need an internal semaphore similar to inode->i_sem.
+ /* We need an internal mutex similar to inode->i_mutex.
Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it
before letting GC proceed. Or we'd have to put ugliness
- into the GC code so it didn't attempt to obtain the i_sem
+ into the GC code so it didn't attempt to obtain the i_mutex
for the inode(s) which are already locked */
struct semaphore sem;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ca7ff8fdd090..d0e6ca3b00ef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -286,6 +286,15 @@ extern void dump_stack(void);
1; \
})
+/*
+ * Check at compile time that 'function' is a certain type, or is a pointer
+ * to that type (needs to use typedef for the function type.)
+ */
+#define typecheck_fn(type,function) \
+({ typeof(type) __tmp = function; \
+ (void)__tmp; \
+})
+
#endif /* __KERNEL__ */
#define SI_LOAD_SHIFT 16
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40f63c9879d2..f96506782ebe 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -58,9 +58,9 @@ struct loop_device {
struct bio *lo_bio;
struct bio *lo_biotail;
int lo_state;
- struct semaphore lo_sem;
+ struct completion lo_done;
+ struct completion lo_bh_done;
struct semaphore lo_ctl_mutex;
- struct semaphore lo_bh_mutex;
int lo_pending;
request_queue_t *lo_queue;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index df80e63903b5..3f1fafc0245e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -13,6 +13,7 @@
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
struct mempolicy;
struct anon_vma;
@@ -1024,6 +1025,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!PageHighMem(page) && !enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page + numpages));
}
#endif
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
new file mode 100644
index 000000000000..0ccd8f983b50
--- /dev/null
+++ b/include/linux/mutex-debug.h
@@ -0,0 +1,21 @@
+#ifndef __LINUX_MUTEX_DEBUG_H
+#define __LINUX_MUTEX_DEBUG_H
+
+/*
+ * Mutexes - debugging helpers:
+ */
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .held_list = LIST_HEAD_INIT(lockname.held_list), \
+ .name = #lockname , .magic = &lockname
+
+#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
+
+extern void FASTCALL(mutex_destroy(struct mutex *lock));
+
+extern void mutex_debug_show_all_locks(void);
+extern void mutex_debug_show_held_locks(struct task_struct *filter);
+extern void mutex_debug_check_no_locks_held(struct task_struct *task);
+extern void mutex_debug_check_no_locks_freed(const void *from, const void *to);
+
+#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
new file mode 100644
index 000000000000..9bce0fee68d4
--- /dev/null
+++ b/include/linux/mutex.h
@@ -0,0 +1,119 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains the main data structure and API definitions.
+ */
+#ifndef __LINUX_MUTEX_H
+#define __LINUX_MUTEX_H
+
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+#include <asm/atomic.h>
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in irq contexts
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ /* 1: unlocked, 0: locked, negative: locked, possible waiters */
+ atomic_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct thread_info *owner;
+ struct list_head held_list;
+ unsigned long acquire_ip;
+ const char *name;
+ void *magic;
+#endif
+};
+
+/*
+ * This is the control structure for tasks blocked on mutex,
+ * which resides on the blocked task's kernel stack:
+ */
+struct mutex_waiter {
+ struct list_head list;
+ struct task_struct *task;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct mutex *lock;
+ void *magic;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_MUTEXES
+# include <linux/mutex-debug.h>
+#else
+# define __DEBUG_MUTEX_INITIALIZER(lockname)
+# define mutex_init(mutex) __mutex_init(mutex, NULL)
+# define mutex_destroy(mutex) do { } while (0)
+# define mutex_debug_show_all_locks() do { } while (0)
+# define mutex_debug_show_held_locks(p) do { } while (0)
+# define mutex_debug_check_no_locks_held(task) do { } while (0)
+# define mutex_debug_check_no_locks_freed(from, to) do { } while (0)
+#endif
+
+#define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
+ __DEBUG_MUTEX_INITIALIZER(lockname) }
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void fastcall __mutex_init(struct mutex *lock, const char *name);
+
+/***
+ * mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int fastcall mutex_is_locked(struct mutex *lock)
+{
+ return atomic_read(&lock->count) != 1;
+}
+
+/*
+ * See kernel/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/mutex-design.txt.
+ */
+extern void fastcall mutex_lock(struct mutex *lock);
+extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+/*
+ * NOTE: mutex_trylock() follows the spin_trylock() convention,
+ * not the down_trylock() convention!
+ */
+extern int fastcall mutex_trylock(struct mutex *lock);
+extern void fastcall mutex_unlock(struct mutex *lock);
+
+#endif
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index bb842ea41033..0798b7781a6e 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -294,7 +294,7 @@ fill_post_wcc(struct svc_fh *fhp)
/*
* Lock a file handle/inode
* NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_sem's at once
+ * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
* so, any changes here should be reflected there.
*/
static inline void
@@ -317,7 +317,7 @@ fh_lock(struct svc_fh *fhp)
}
inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
fill_pre_wcc(fhp);
fhp->fh_locked = 1;
}
@@ -333,7 +333,7 @@ fh_unlock(struct svc_fh *fhp)
if (fhp->fh_locked) {
fill_post_wcc(fhp);
- up(&fhp->fh_dentry->d_inode->i_sem);
+ mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
fhp->fh_locked = 0;
}
}
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 1767073df26f..b12e59c75752 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -37,7 +37,7 @@ struct pipe_inode_info {
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
-#define PIPE_SEM(inode) (&(inode).i_sem)
+#define PIPE_MUTEX(inode) (&(inode).i_mutex)
#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
#define PIPE_READERS(inode) ((inode).i_pipe->readers)
#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 001ab82df051..e276c5ba2bb7 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1857,7 +1857,7 @@ void padd_item(char *item, int total_length, int length);
#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
-#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */
+#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
int restart_transaction(struct reiserfs_transaction_handle *th,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78eb92ae4d94..85b53f87c703 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -817,6 +817,11 @@ struct task_struct {
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+#endif
+
/* journalling filesystem info */
void *journal_info;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c8943b53d8e6..a8aa6152eea6 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -660,7 +660,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
if (fd < 0)
goto out_putname;
- down(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -697,7 +697,7 @@ out_putfd:
out_err:
fd = error;
out_upsem:
- up(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
out_putname:
putname(name);
return fd;
@@ -714,7 +714,7 @@ asmlinkage long sys_mq_unlink(const char __user *u_name)
if (IS_ERR(name))
return PTR_ERR(name);
- down(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -735,7 +735,7 @@ out_err:
dput(dentry);
out_unlock:
- up(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
putname(name);
if (inode)
iput(inode);
diff --git a/kernel/Makefile b/kernel/Makefile
index 4f5a1453093a..a940bac02837 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,8 +7,9 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o intermodule.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o
+ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o
+obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index eab64e23bcae..2a75e44e1a41 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1513,7 +1513,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
struct dentry *dentry;
int error;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
dentry = cpuset_get_dentry(dir, cft->name);
if (!IS_ERR(dentry)) {
error = cpuset_create_file(dentry, 0644 | S_IFREG);
@@ -1522,7 +1522,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
dput(dentry);
} else
error = PTR_ERR(dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
@@ -1793,7 +1793,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
/*
* Release manage_sem before cpuset_populate_dir() because it
- * will down() this new directory's i_sem and if we race with
+ * will down() this new directory's i_mutex and if we race with
* another mkdir, we might deadlock.
*/
up(&manage_sem);
@@ -1812,7 +1812,7 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct cpuset *c_parent = dentry->d_parent->d_fsdata;
- /* the vfs holds inode->i_sem already */
+ /* the vfs holds inode->i_mutex already */
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
}
@@ -1823,7 +1823,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
struct cpuset *parent;
char *pathbuf = NULL;
- /* the vfs holds both inode->i_sem already */
+ /* the vfs holds both inode->i_mutex already */
down(&manage_sem);
cpuset_update_task_memory_state();
diff --git a/kernel/exit.c b/kernel/exit.c
index caceabf3f230..309a46fa16f8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/cn_proc.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -869,6 +870,10 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+ /*
+ * If DEBUG_MUTEXES is on, make sure we are holding no locks:
+ */
+ mutex_debug_check_no_locks_held(tsk);
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
diff --git a/kernel/fork.c b/kernel/fork.c
index 72e3252c6763..b18d64554feb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -979,6 +979,10 @@ static task_t *copy_process(unsigned long clone_flags,
}
#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+ p->blocked_on = NULL; /* not blocked yet */
+#endif
+
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
new file mode 100644
index 000000000000..4fcb051a8b9e
--- /dev/null
+++ b/kernel/mutex-debug.c
@@ -0,0 +1,464 @@
+/*
+ * kernel/mutex-debug.c
+ *
+ * Debugging code for mutexes
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * lock debugging, locking tree, deadlock detection started by:
+ *
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Released under the General Public License (GPL).
+ */
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+
+#include <asm/mutex.h>
+
+#include "mutex-debug.h"
+
+/*
+ * We need a global lock when we walk through the multi-process
+ * lock tree. Only used in the deadlock-debugging case.
+ */
+DEFINE_SPINLOCK(debug_mutex_lock);
+
+/*
+ * All locks held by all tasks, in a single global list:
+ */
+LIST_HEAD(debug_mutex_held_locks);
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+/*
+ * "mutex debugging enabled" flag. We turn it off when we detect
+ * the first problem because we dont want to recurse back
+ * into the tracing code when doing error printk or
+ * executing a BUG():
+ */
+int debug_mutex_on = 1;
+
+static void printk_task(struct task_struct *p)
+{
+ if (p)
+ printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_ti(struct thread_info *ti)
+{
+ if (ti)
+ printk_task(ti->task);
+ else
+ printk("<none>");
+}
+
+static void printk_task_short(struct task_struct *p)
+{
+ if (p)
+ printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_lock(struct mutex *lock, int print_owner)
+{
+ printk(" [%p] {%s}\n", lock, lock->name);
+
+ if (print_owner && lock->owner) {
+ printk(".. held by: ");
+ printk_ti(lock->owner);
+ printk("\n");
+ }
+ if (lock->owner) {
+ printk("... acquired at: ");
+ print_symbol("%s\n", lock->acquire_ip);
+ }
+}
+
+/*
+ * printk locks held by a task:
+ */
+static void show_task_locks(struct task_struct *p)
+{
+ switch (p->state) {
+ case TASK_RUNNING: printk("R"); break;
+ case TASK_INTERRUPTIBLE: printk("S"); break;
+ case TASK_UNINTERRUPTIBLE: printk("D"); break;
+ case TASK_STOPPED: printk("T"); break;
+ case EXIT_ZOMBIE: printk("Z"); break;
+ case EXIT_DEAD: printk("X"); break;
+ default: printk("?"); break;
+ }
+ printk_task(p);
+ if (p->blocked_on) {
+ struct mutex *lock = p->blocked_on->lock;
+
+ printk(" blocked on mutex:");
+ printk_lock(lock, 1);
+ } else
+ printk(" (not blocked on mutex)\n");
+}
+
+/*
+ * printk all locks held in the system (if filter == NULL),
+ * or all locks belonging to a single task (if filter != NULL):
+ */
+void show_held_locks(struct task_struct *filter)
+{
+ struct list_head *curr, *cursor = NULL;
+ struct mutex *lock;
+ struct thread_info *t;
+ unsigned long flags;
+ int count = 0;
+
+ if (filter) {
+ printk("------------------------------\n");
+ printk("| showing all locks held by: | (");
+ printk_task_short(filter);
+ printk("):\n");
+ printk("------------------------------\n");
+ } else {
+ printk("---------------------------\n");
+ printk("| showing all locks held: |\n");
+ printk("---------------------------\n");
+ }
+
+ /*
+ * Play safe and acquire the global trace lock. We
+ * cannot printk with that lock held so we iterate
+ * very carefully:
+ */
+next:
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each(curr, &debug_mutex_held_locks) {
+ if (cursor && curr != cursor)
+ continue;
+ lock = list_entry(curr, struct mutex, held_list);
+ t = lock->owner;
+ if (filter && (t != filter->thread_info))
+ continue;
+ count++;
+ cursor = curr->next;
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("\n#%03d: ", count);
+ printk_lock(lock, filter ? 0 : 1);
+ goto next;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+ printk("\n");
+}
+
+void mutex_debug_show_all_locks(void)
+{
+ struct task_struct *g, *p;
+ int count = 10;
+ int unlock = 1;
+
+ printk("\nShowing all blocking locks in the system:\n");
+
+ /*
+ * Here we try to get the tasklist_lock as hard as possible,
+ * if not successful after 2 seconds we ignore it (but keep
+ * trying). This is to enable a debug printout even if a
+ * tasklist_lock-holding task deadlocks or crashes.
+ */
+retry:
+ if (!read_trylock(&tasklist_lock)) {
+ if (count == 10)
+ printk("hm, tasklist_lock locked, retrying... ");
+ if (count) {
+ count--;
+ printk(" #%d", 10-count);
+ mdelay(200);
+ goto retry;
+ }
+ printk(" ignoring it.\n");
+ unlock = 0;
+ }
+ if (count != 10)
+ printk(" locked it.\n");
+
+ do_each_thread(g, p) {
+ show_task_locks(p);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+ printk("\n");
+ show_held_locks(NULL);
+ printk("=============================================\n\n");
+
+ if (unlock)
+ read_unlock(&tasklist_lock);
+}
+
+static void report_deadlock(struct task_struct *task, struct mutex *lock,
+ struct mutex *lockblk, unsigned long ip)
+{
+ printk("\n%s/%d is trying to acquire this lock:\n",
+ current->comm, current->pid);
+ printk_lock(lock, 1);
+ printk("... trying at: ");
+ print_symbol("%s\n", ip);
+ show_held_locks(current);
+
+ if (lockblk) {
+ printk("but %s/%d is deadlocking current task %s/%d!\n\n",
+ task->comm, task->pid, current->comm, current->pid);
+ printk("\n%s/%d is blocked on this lock:\n",
+ task->comm, task->pid);
+ printk_lock(lockblk, 1);
+
+ show_held_locks(task);
+
+ printk("\n%s/%d's [blocked] stackdump:\n\n",
+ task->comm, task->pid);
+ show_stack(task, NULL);
+ }
+
+ printk("\n%s/%d's [current] stackdump:\n\n",
+ current->comm, current->pid);
+ dump_stack();
+ mutex_debug_show_all_locks();
+ printk("[ turning off deadlock detection. Please report this. ]\n\n");
+ local_irq_disable();
+}
+
+/*
+ * Recursively check for mutex deadlocks:
+ */
+static int check_deadlock(struct mutex *lock, int depth,
+ struct thread_info *ti, unsigned long ip)
+{
+ struct mutex *lockblk;
+ struct task_struct *task;
+
+ if (!debug_mutex_on)
+ return 0;
+
+ ti = lock->owner;
+ if (!ti)
+ return 0;
+
+ task = ti->task;
+ lockblk = NULL;
+ if (task->blocked_on)
+ lockblk = task->blocked_on->lock;
+
+ /* Self-deadlock: */
+ if (current == task) {
+ DEBUG_OFF();
+ if (depth)
+ return 1;
+ printk("\n==========================================\n");
+ printk( "[ BUG: lock recursion deadlock detected! |\n");
+ printk( "------------------------------------------\n");
+ report_deadlock(task, lock, NULL, ip);
+ return 0;
+ }
+
+ /* Ugh, something corrupted the lock data structure? */
+ if (depth > 20) {
+ DEBUG_OFF();
+ printk("\n===========================================\n");
+ printk( "[ BUG: infinite lock dependency detected!? |\n");
+ printk( "-------------------------------------------\n");
+ report_deadlock(task, lock, lockblk, ip);
+ return 0;
+ }
+
+ /* Recursively check for dependencies: */
+ if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) {
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk( "--------------------------------------------\n");
+ report_deadlock(task, lock, lockblk, ip);
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Called when a task exits, this function checks whether the
+ * task is holding any locks, and reports the first one if so:
+ */
+void mutex_debug_check_no_locks_held(struct task_struct *task)
+{
+ struct list_head *curr, *next;
+ struct thread_info *t;
+ unsigned long flags;
+ struct mutex *lock;
+
+ if (!debug_mutex_on)
+ return;
+
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each_safe(curr, next, &debug_mutex_held_locks) {
+ lock = list_entry(curr, struct mutex, held_list);
+ t = lock->owner;
+ if (t != task->thread_info)
+ continue;
+ list_del_init(curr);
+ DEBUG_OFF();
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("BUG: %s/%d, lock held at task exit time!\n",
+ task->comm, task->pid);
+ printk_lock(lock, 1);
+ if (lock->owner != task->thread_info)
+ printk("exiting task is not even the owner??\n");
+ return;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+}
+
+/*
+ * Called when kernel memory is freed (or unmapped), or if a mutex
+ * is destroyed or reinitialized - this code checks whether there is
+ * any held lock in the memory range of <from> to <to>:
+ */
+void mutex_debug_check_no_locks_freed(const void *from, const void *to)
+{
+ struct list_head *curr, *next;
+ unsigned long flags;
+ struct mutex *lock;
+ void *lock_addr;
+
+ if (!debug_mutex_on)
+ return;
+
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each_safe(curr, next, &debug_mutex_held_locks) {
+ lock = list_entry(curr, struct mutex, held_list);
+ lock_addr = lock;
+ if (lock_addr < from || lock_addr >= to)
+ continue;
+ list_del_init(curr);
+ DEBUG_OFF();
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
+ current->comm, current->pid, lock, from, to);
+ dump_stack();
+ printk_lock(lock, 1);
+ if (lock->owner != current_thread_info())
+ printk("freeing task is not even the owner??\n");
+ return;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+}
+
+/*
+ * Must be called with lock->wait_lock held.
+ */
+void debug_mutex_set_owner(struct mutex *lock,
+ struct thread_info *new_owner __IP_DECL__)
+{
+ lock->owner = new_owner;
+ DEBUG_WARN_ON(!list_empty(&lock->held_list));
+ if (debug_mutex_on) {
+ list_add_tail(&lock->held_list, &debug_mutex_held_locks);
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_mutex_init_waiter(struct mutex_waiter *waiter)
+{
+ memset(waiter, 0x11, sizeof(*waiter));
+ waiter->magic = waiter;
+ INIT_LIST_HEAD(&waiter->list);
+}
+
+void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ DEBUG_WARN_ON(list_empty(&lock->wait_list));
+ DEBUG_WARN_ON(waiter->magic != waiter);
+ DEBUG_WARN_ON(list_empty(&waiter->list));
+}
+
+void debug_mutex_free_waiter(struct mutex_waiter *waiter)
+{
+ DEBUG_WARN_ON(!list_empty(&waiter->list));
+ memset(waiter, 0x22, sizeof(*waiter));
+}
+
+void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti __IP_DECL__)
+{
+ SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ check_deadlock(lock, 0, ti, ip);
+ /* Mark the current thread as blocked on the lock: */
+ ti->task->blocked_on = waiter;
+ waiter->lock = lock;
+}
+
+void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti)
+{
+ DEBUG_WARN_ON(list_empty(&waiter->list));
+ DEBUG_WARN_ON(waiter->task != ti->task);
+ DEBUG_WARN_ON(ti->task->blocked_on != waiter);
+ ti->task->blocked_on = NULL;
+
+ list_del_init(&waiter->list);
+ waiter->task = NULL;
+}
+
+void debug_mutex_unlock(struct mutex *lock)
+{
+ DEBUG_WARN_ON(lock->magic != lock);
+ DEBUG_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
+ DEBUG_WARN_ON(lock->owner != current_thread_info());
+ if (debug_mutex_on) {
+ DEBUG_WARN_ON(list_empty(&lock->held_list));
+ list_del_init(&lock->held_list);
+ }
+}
+
+void debug_mutex_init(struct mutex *lock, const char *name)
+{
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ mutex_debug_check_no_locks_freed((void *)lock, (void *)(lock + 1));
+ lock->owner = NULL;
+ INIT_LIST_HEAD(&lock->held_list);
+ lock->name = name;
+ lock->magic = lock;
+}
+
+/***
+ * mutex_destroy - mark a mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+void fastcall mutex_destroy(struct mutex *lock)
+{
+ DEBUG_WARN_ON(mutex_is_locked(lock));
+ lock->magic = NULL;
+}
+
+EXPORT_SYMBOL_GPL(mutex_destroy);
+
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
new file mode 100644
index 000000000000..fd384050acb1
--- /dev/null
+++ b/kernel/mutex-debug.h
@@ -0,0 +1,134 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains mutex debugging related internal declarations,
+ * prototypes and inline functions, for the CONFIG_DEBUG_MUTEXES case.
+ * More details are in kernel/mutex-debug.c.
+ */
+
+extern spinlock_t debug_mutex_lock;
+extern struct list_head debug_mutex_held_locks;
+extern int debug_mutex_on;
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+/*
+ * This must be called with lock->wait_lock held.
+ */
+extern void debug_mutex_set_owner(struct mutex *lock,
+ struct thread_info *new_owner __IP_DECL__);
+
+static inline void debug_mutex_clear_owner(struct mutex *lock)
+{
+ lock->owner = NULL;
+}
+
+extern void debug_mutex_init_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_wake_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter);
+extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_add_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter,
+ struct thread_info *ti __IP_DECL__);
+extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti);
+extern void debug_mutex_unlock(struct mutex *lock);
+extern void debug_mutex_init(struct mutex *lock, const char *name);
+
+#define debug_spin_lock(lock) \
+ do { \
+ local_irq_disable(); \
+ if (debug_mutex_on) \
+ spin_lock(lock); \
+ } while (0)
+
+#define debug_spin_unlock(lock) \
+ do { \
+ if (debug_mutex_on) \
+ spin_unlock(lock); \
+ local_irq_enable(); \
+ preempt_check_resched(); \
+ } while (0)
+
+#define debug_spin_lock_save(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ if (debug_mutex_on) \
+ spin_lock(lock); \
+ } while (0)
+
+#define debug_spin_lock_restore(lock, flags) \
+ do { \
+ if (debug_mutex_on) \
+ spin_unlock(lock); \
+ local_irq_restore(flags); \
+ preempt_check_resched(); \
+ } while (0)
+
+#define spin_lock_mutex(lock) \
+ do { \
+ struct mutex *l = container_of(lock, struct mutex, wait_lock); \
+ \
+ DEBUG_WARN_ON(in_interrupt()); \
+ debug_spin_lock(&debug_mutex_lock); \
+ spin_lock(lock); \
+ DEBUG_WARN_ON(l->magic != l); \
+ } while (0)
+
+#define spin_unlock_mutex(lock) \
+ do { \
+ spin_unlock(lock); \
+ debug_spin_unlock(&debug_mutex_lock); \
+ } while (0)
+
+#define DEBUG_OFF() \
+do { \
+ if (debug_mutex_on) { \
+ debug_mutex_on = 0; \
+ console_verbose(); \
+ if (spin_is_locked(&debug_mutex_lock)) \
+ spin_unlock(&debug_mutex_lock); \
+ } \
+} while (0)
+
+#define DEBUG_BUG() \
+do { \
+ if (debug_mutex_on) { \
+ DEBUG_OFF(); \
+ BUG(); \
+ } \
+} while (0)
+
+#define DEBUG_WARN_ON(c) \
+do { \
+ if (unlikely(c && debug_mutex_on)) { \
+ DEBUG_OFF(); \
+ WARN_ON(1); \
+ } \
+} while (0)
+
+# define DEBUG_BUG_ON(c) \
+do { \
+ if (unlikely(c)) \
+ DEBUG_BUG(); \
+} while (0)
+
+#ifdef CONFIG_SMP
+# define SMP_DEBUG_WARN_ON(c) DEBUG_WARN_ON(c)
+# define SMP_DEBUG_BUG_ON(c) DEBUG_BUG_ON(c)
+#else
+# define SMP_DEBUG_WARN_ON(c) do { } while (0)
+# define SMP_DEBUG_BUG_ON(c) do { } while (0)
+#endif
+
diff --git a/kernel/mutex.c b/kernel/mutex.c
new file mode 100644
index 000000000000..7eb960661441
--- /dev/null
+++ b/kernel/mutex.c
@@ -0,0 +1,325 @@
+/*
+ * kernel/mutex.c
+ *
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
+ * David Howells for suggestions and improvements.
+ *
+ * Also see Documentation/mutex-design.txt.
+ */
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+/*
+ * In the DEBUG case we are using the "NULL fastpath" for mutexes,
+ * which forces all calls into the slowpath:
+ */
+#ifdef CONFIG_DEBUG_MUTEXES
+# include "mutex-debug.h"
+# include <asm-generic/mutex-null.h>
+#else
+# include "mutex.h"
+# include <asm/mutex.h>
+#endif
+
+/***
+ * mutex_init - initialize the mutex
+ * @lock: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+void fastcall __mutex_init(struct mutex *lock, const char *name)
+{
+ atomic_set(&lock->count, 1);
+ spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+
+ debug_mutex_init(lock, name);
+}
+
+EXPORT_SYMBOL(__mutex_init);
+
+/*
+ * We split the mutex lock/unlock logic into separate fastpath and
+ * slowpath functions, to reduce the register pressure on the fastpath.
+ * We also put the fastpath first in the kernel image, to make sure the
+ * branch is predicted by the CPU as default-untaken.
+ */
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_lock - acquire the mutex
+ * @lock: the mutex to be acquired
+ *
+ * Lock the mutex exclusively for this task. If the mutex is not
+ * available right now, it will sleep until it can get it.
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. Recursive locking is not allowed. The task
+ * may not exit without first unlocking the mutex. Also, kernel
+ * memory where the mutex resides mutex must not be freed with
+ * the mutex still locked. The mutex must first be initialized
+ * (or statically defined) before it can be locked. memset()-ing
+ * the mutex to 0 is not allowed.
+ *
+ * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
+ * checks that will enforce the restrictions and will also do
+ * deadlock debugging. )
+ *
+ * This function is similar to (but not equivalent to) down().
+ */
+void fastcall __sched mutex_lock(struct mutex *lock)
+{
+ /*
+ * The locking fastpath is the 1->0 transition from
+ * 'unlocked' into 'locked' state.
+ *
+ * NOTE: if asm/mutex.h is included, then some architectures
+ * rely on mutex_lock() having _no other code_ here but this
+ * fastpath. That allows the assembly fastpath to do
+ * tail-merging optimizations. (If you want to put testcode
+ * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
+ */
+ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_lock);
+
+static void fastcall noinline __sched
+__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_unlock - release the mutex
+ * @lock: the mutex to be released
+ *
+ * Unlock a mutex that has been locked by this task previously.
+ *
+ * This function must not be used in interrupt context. Unlocking
+ * of a not locked mutex is not allowed.
+ *
+ * This function is similar to (but not equivalent to) up().
+ */
+void fastcall __sched mutex_unlock(struct mutex *lock)
+{
+ /*
+ * The unlocking fastpath is the 0->1 transition from 'locked'
+ * into 'unlocked' state:
+ *
+ * NOTE: no other code must be here - see mutex_lock() .
+ */
+ __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_unlock);
+
+/*
+ * Lock a mutex (possibly interruptible), slowpath:
+ */
+static inline int __sched
+__mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
+{
+ struct task_struct *task = current;
+ struct mutex_waiter waiter;
+ unsigned int old_val;
+
+ debug_mutex_init_waiter(&waiter);
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
+
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
+ list_add_tail(&waiter.list, &lock->wait_list);
+ waiter.task = task;
+
+ for (;;) {
+ /*
+ * Lets try to take the lock again - this is needed even if
+ * we get here for the first time (shortly after failing to
+ * acquire the lock), to make sure that we get a wakeup once
+ * it's unlocked. Later on, if we sleep, this is the
+ * operation that gives us the lock. We xchg it to -1, so
+ * that when we release the lock, we properly wake up the
+ * other waiters:
+ */
+ old_val = atomic_xchg(&lock->count, -1);
+ if (old_val == 1)
+ break;
+
+ /*
+ * got a signal? (This code gets eliminated in the
+ * TASK_UNINTERRUPTIBLE case.)
+ */
+ if (unlikely(state == TASK_INTERRUPTIBLE &&
+ signal_pending(task))) {
+ mutex_remove_waiter(lock, &waiter, task->thread_info);
+ spin_unlock_mutex(&lock->wait_lock);
+
+ debug_mutex_free_waiter(&waiter);
+ return -EINTR;
+ }
+ __set_task_state(task, state);
+
+ /* didnt get the lock, go to sleep: */
+ spin_unlock_mutex(&lock->wait_lock);
+ schedule();
+ spin_lock_mutex(&lock->wait_lock);
+ }
+
+ /* got the lock - rejoice! */
+ mutex_remove_waiter(lock, &waiter, task->thread_info);
+ debug_mutex_set_owner(lock, task->thread_info __IP__);
+
+ /* set it to 0 if there are no waiters left: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+
+ spin_unlock_mutex(&lock->wait_lock);
+
+ debug_mutex_free_waiter(&waiter);
+
+ DEBUG_WARN_ON(list_empty(&lock->held_list));
+ DEBUG_WARN_ON(lock->owner != task->thread_info);
+
+ return 0;
+}
+
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__);
+}
+
+/*
+ * Release the lock, slowpath:
+ */
+static fastcall noinline void
+__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ DEBUG_WARN_ON(lock->owner != current_thread_info());
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ /*
+ * some architectures leave the lock unlocked in the fastpath failure
+ * case, others need to leave it locked. In the later case we have to
+ * unlock it here
+ */
+ if (__mutex_slowpath_needs_to_unlock())
+ atomic_set(&lock->count, 1);
+
+ debug_mutex_unlock(lock);
+
+ if (!list_empty(&lock->wait_list)) {
+ /* get the first entry from the wait-list: */
+ struct mutex_waiter *waiter =
+ list_entry(lock->wait_list.next,
+ struct mutex_waiter, list);
+
+ debug_mutex_wake_waiter(lock, waiter);
+
+ wake_up_process(waiter->task);
+ }
+
+ debug_mutex_clear_owner(lock);
+
+ spin_unlock_mutex(&lock->wait_lock);
+}
+
+/*
+ * Here come the less common (and hence less performance-critical) APIs:
+ * mutex_lock_interruptible() and mutex_trylock().
+ */
+static int fastcall noinline __sched
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_lock_interruptible - acquire the mutex, interruptable
+ * @lock: the mutex to be acquired
+ *
+ * Lock the mutex like mutex_lock(), and return 0 if the mutex has
+ * been acquired or sleep until the mutex becomes available. If a
+ * signal arrives while waiting for the lock then this function
+ * returns -EINTR.
+ *
+ * This function is similar to (but not equivalent to) down_interruptible().
+ */
+int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
+{
+ /* NOTE: no other code must be here - see mutex_lock() */
+ return __mutex_fastpath_lock_retval
+ (&lock->count, __mutex_lock_interruptible_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_lock_interruptible);
+
+static int fastcall noinline __sched
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__);
+}
+
+/*
+ * Spinlock based trylock, we take the spinlock and check whether we
+ * can get the lock:
+ */
+static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+ int prev;
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ prev = atomic_xchg(&lock->count, -1);
+ if (likely(prev == 1))
+ debug_mutex_set_owner(lock, current_thread_info() __RET_IP__);
+ /* Set it back to 0 if there are no waiters: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+
+ spin_unlock_mutex(&lock->wait_lock);
+
+ return prev == 1;
+}
+
+/***
+ * mutex_trylock - try acquire the mutex, without waiting
+ * @lock: the mutex to be acquired
+ *
+ * Try to acquire the mutex atomically. Returns 1 if the mutex
+ * has been acquired successfully, and 0 on contention.
+ *
+ * NOTE: this function follows the spin_trylock() convention, so
+ * it is negated to the down_trylock() return values! Be careful
+ * about this when converting semaphore users to mutexes.
+ *
+ * This function must not be used in interrupt context. The
+ * mutex must be released by the same task that acquired it.
+ */
+int fastcall mutex_trylock(struct mutex *lock)
+{
+ return __mutex_fastpath_trylock(&lock->count,
+ __mutex_trylock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_trylock);
+
+
+
diff --git a/kernel/mutex.h b/kernel/mutex.h
new file mode 100644
index 000000000000..00fe84e7b672
--- /dev/null
+++ b/kernel/mutex.h
@@ -0,0 +1,35 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains mutex debugging related internal prototypes, for the
+ * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
+ */
+
+#define spin_lock_mutex(lock) spin_lock(lock)
+#define spin_unlock_mutex(lock) spin_unlock(lock)
+#define mutex_remove_waiter(lock, waiter, ti) \
+ __list_del((waiter)->list.prev, (waiter)->list.next)
+
+#define DEBUG_WARN_ON(c) do { } while (0)
+#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
+#define debug_mutex_clear_owner(lock) do { } while (0)
+#define debug_mutex_init_waiter(waiter) do { } while (0)
+#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
+#define debug_mutex_free_waiter(waiter) do { } while (0)
+#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0)
+#define debug_mutex_unlock(lock) do { } while (0)
+#define debug_mutex_init(lock, name) do { } while (0)
+
+/*
+ * Return-address parameters/declarations. They are very useful for
+ * debugging, but add overhead in the !DEBUG case - so we go the
+ * trouble of using this not too elegant but zero-cost solution:
+ */
+#define __IP_DECL__
+#define __IP__
+#define __RET_IP__
+
diff --git a/kernel/sched.c b/kernel/sched.c
index 92733091154c..34a945bcc022 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4386,6 +4386,7 @@ void show_state(void)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ mutex_debug_show_all_locks();
}
/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c48260fb8fd9..1fcd856edec1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -95,6 +95,14 @@ config DEBUG_PREEMPT
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.
+config DEBUG_MUTEXES
+ bool "Mutex debugging, deadlock detection"
+ default y
+ depends on DEBUG_KERNEL
+ help
+ This allows mutex semantics violations and mutex related deadlocks
+ (lockups) to be detected and reported automatically.
+
config DEBUG_SPINLOCK
bool "Spinlock debugging"
depends on DEBUG_KERNEL
diff --git a/mm/filemap.c b/mm/filemap.c
index 478f4c74cc31..5fca2737c971 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -61,7 +61,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
- * ->i_sem
+ * ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
@@ -73,9 +73,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->lock_page (access_process_vm)
*
* ->mmap_sem
- * ->i_sem (msync)
+ * ->i_mutex (msync)
*
- * ->i_sem
+ * ->i_mutex
* ->i_alloc_sem (various)
*
* ->inode_lock
@@ -276,7 +276,7 @@ static int wait_on_page_writeback_range(struct address_space *mapping,
* integrity" operation. It waits upon in-flight writeout before starting and
* waiting upon new writeout. If there was an IO error, return it.
*
- * We need to re-take i_sem during the generic_osync_inode list walk because
+ * We need to re-take i_mutex during the generic_osync_inode list walk because
* it is otherwise livelockable.
*/
int sync_page_range(struct inode *inode, struct address_space *mapping,
@@ -290,9 +290,9 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
return 0;
ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
if (ret == 0) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
if (ret == 0)
ret = wait_on_page_writeback_range(mapping, start, end);
@@ -301,7 +301,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);
/*
- * Note: Holding i_sem across sync_page_range_nolock is not a good idea
+ * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
*/
@@ -1892,7 +1892,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
/*
* Sync the fs metadata but not the minor inode changes and
* of course not the data as we did direct DMA for the IO.
- * i_sem is held, which protects generic_osync_inode() from
+ * i_mutex is held, which protects generic_osync_inode() from
* livelocking.
*/
if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
@@ -2195,10 +2195,10 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
BUG_ON(iocb->ki_pos != pos);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
&iocb->ki_pos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
ssize_t err;
@@ -2220,9 +2220,9 @@ ssize_t generic_file_write(struct file *file, const char __user *buf,
struct iovec local_iov = { .iov_base = (void __user *)buf,
.iov_len = count };
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
ssize_t err;
@@ -2256,9 +2256,9 @@ ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
struct inode *inode = mapping->host;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err;
@@ -2272,7 +2272,7 @@ ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
EXPORT_SYMBOL(generic_file_writev);
/*
- * Called under i_sem for writes to S_ISREG files. Returns -EIO if something
+ * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
* went wrong during pagecache shootdown.
*/
static ssize_t
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 9cf687e4a29a..e2b34e95913e 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -338,7 +338,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
*ppos = pos;
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_sem.
+ * cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
@@ -358,7 +358,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
loff_t pos;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (!access_ok(VERIFY_READ, buf, len)) {
ret=-EFAULT;
@@ -390,7 +390,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
out_backing:
current->backing_dev_info = NULL;
out_up:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(xip_file_write);
diff --git a/mm/memory.c b/mm/memory.c
index 3944fec38012..7a11ddd5060f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1784,13 +1784,13 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
if (!inode->i_op || !inode->i_op->truncate_range)
return -ENOSYS;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
down_write(&inode->i_alloc_sem);
unmap_mapping_range(mapping, offset, (end - offset), 1);
truncate_inode_pages_range(mapping, offset, end);
inode->i_op->truncate_range(inode, offset, end);
up_write(&inode->i_alloc_sem);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return 0;
}
diff --git a/mm/msync.c b/mm/msync.c
index 1b5b6f662dcf..3563a56e1a51 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -137,7 +137,7 @@ static int msync_interval(struct vm_area_struct *vma,
ret = filemap_fdatawrite(mapping);
if (file->f_op && file->f_op->fsync) {
/*
- * We don't take i_sem here because mmap_sem
+ * We don't take i_mutex here because mmap_sem
* is already held.
*/
err = file->f_op->fsync(file,file->f_dentry,1);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0e84924171b..a5e6891f7bb6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -415,6 +415,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int reserved = 0;
arch_free_page(page, order);
+ if (!PageHighMem(page))
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page+(1<<order)));
#ifndef CONFIG_MMU
for (i = 1 ; i < (1 << order) ; ++i)
diff --git a/mm/rmap.c b/mm/rmap.c
index 66ec43053a4d..dfbb89f99a15 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -20,13 +20,13 @@
/*
* Lock ordering in mm:
*
- * inode->i_sem (while writing or truncating, not reading or faulting)
+ * inode->i_mutex (while writing or truncating, not reading or faulting)
* inode->i_alloc_sem
*
* When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
- * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
- * taken together; in truncation, i_sem is taken outermost.
+ * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
+ * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
+ * taken together; in truncation, i_mutex is taken outermost.
*
* mm->mmap_sem
* page->flags PG_locked (lock_page)
diff --git a/mm/shmem.c b/mm/shmem.c
index a1f2f02af724..343b3c0937e5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1370,7 +1370,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
pos = *ppos;
written = 0;
@@ -1455,7 +1455,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
if (written)
err = written;
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
@@ -1491,7 +1491,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
/*
* We must evaluate after, since reads (unlike writes)
- * are called without i_sem protection against truncate
+ * are called without i_mutex protection against truncate
*/
nr = PAGE_CACHE_SIZE;
i_size = i_size_read(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 1c46c6383552..33aab345cd4a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3071,6 +3071,7 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp));
+ mutex_debug_check_no_locks_freed(objp, objp+obj_reallen(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 80f948a2028b..6544565a7c0f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1187,9 +1187,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
} else {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
err = 0;
@@ -1406,7 +1406,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
p->bdev = bdev;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
did_down = 1;
if (IS_SWAPFILE(inode)) {
error = -EBUSY;
@@ -1596,7 +1596,7 @@ out:
if (did_down) {
if (!error)
inode->i_flags |= S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return error;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index b1a463d0fe71..6cb3fff25f67 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -196,7 +196,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
*
- * Called under (and serialised by) inode->i_sem.
+ * Called under (and serialised by) inode->i_mutex.
*/
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index e14c1cae7460..9764c80ab0b2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -69,13 +69,13 @@ rpc_timeout_upcall_queue(void *data)
struct rpc_inode *rpci = (struct rpc_inode *)data;
struct inode *inode = &rpci->vfs_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
goto out;
if (rpci->nreaders == 0 && !list_empty(&rpci->pipe))
__rpc_purge_upcall(inode, -ETIMEDOUT);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
int
@@ -84,7 +84,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
struct rpc_inode *rpci = RPC_I(inode);
int res = -EPIPE;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
goto out;
if (rpci->nreaders) {
@@ -100,7 +100,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
res = 0;
}
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
wake_up(&rpci->waitq);
return res;
}
@@ -116,7 +116,7 @@ rpc_close_pipes(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops != NULL) {
rpci->nreaders = 0;
__rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE);
@@ -127,7 +127,7 @@ rpc_close_pipes(struct inode *inode)
rpci->ops = NULL;
}
rpc_inode_setowner(inode, NULL);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
cancel_delayed_work(&rpci->queue_timeout);
flush_scheduled_work();
}
@@ -154,7 +154,7 @@ rpc_pipe_open(struct inode *inode, struct file *filp)
struct rpc_inode *rpci = RPC_I(inode);
int res = -ENXIO;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops != NULL) {
if (filp->f_mode & FMODE_READ)
rpci->nreaders ++;
@@ -162,7 +162,7 @@ rpc_pipe_open(struct inode *inode, struct file *filp)
rpci->nwriters ++;
res = 0;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
@@ -172,7 +172,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
struct rpc_inode *rpci = RPC_I(inode);
struct rpc_pipe_msg *msg;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
goto out;
msg = (struct rpc_pipe_msg *)filp->private_data;
@@ -190,7 +190,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
if (rpci->ops->release_pipe)
rpci->ops->release_pipe(inode);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -202,7 +202,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
struct rpc_pipe_msg *msg;
int res = 0;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL) {
res = -EPIPE;
goto out_unlock;
@@ -229,7 +229,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
rpci->ops->destroy_msg(msg);
}
out_unlock:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
@@ -240,11 +240,11 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
struct rpc_inode *rpci = RPC_I(inode);
int res;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = -EPIPE;
if (rpci->ops != NULL)
res = rpci->ops->downcall(filp, buf, len);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
@@ -322,7 +322,7 @@ rpc_info_open(struct inode *inode, struct file *file)
if (!ret) {
struct seq_file *m = file->private_data;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
clnt = RPC_I(inode)->private;
if (clnt) {
atomic_inc(&clnt->cl_users);
@@ -331,7 +331,7 @@ rpc_info_open(struct inode *inode, struct file *file)
single_release(inode, file);
ret = -EINVAL;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return ret;
}
@@ -491,7 +491,7 @@ rpc_depopulate(struct dentry *parent)
struct dentry *dentry, *dvec[10];
int n = 0;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
repeat:
spin_lock(&dcache_lock);
list_for_each_safe(pos, next, &parent->d_subdirs) {
@@ -519,7 +519,7 @@ repeat:
} while (n);
goto repeat;
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
}
static int
@@ -532,7 +532,7 @@ rpc_populate(struct dentry *parent,
struct dentry *dentry;
int mode, i;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
for (i = start; i < eof; i++) {
dentry = d_alloc_name(parent, files[i].name);
if (!dentry)
@@ -552,10 +552,10 @@ rpc_populate(struct dentry *parent,
dir->i_nlink++;
d_add(dentry, inode);
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return 0;
out_bad:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
__FILE__, __FUNCTION__, parent->d_name.name);
return -ENOMEM;
@@ -609,7 +609,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
if ((error = rpc_lookup_parent(path, nd)) != 0)
return ERR_PTR(error);
dir = nd->dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(nd);
if (IS_ERR(dentry))
goto out_err;
@@ -620,7 +620,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
}
return dentry;
out_err:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(nd);
return dentry;
}
@@ -646,7 +646,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
if (error)
goto err_depopulate;
out:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return dentry;
err_depopulate:
@@ -671,7 +671,7 @@ rpc_rmdir(char *path)
if ((error = rpc_lookup_parent(path, &nd)) != 0)
return error;
dir = nd.dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -681,7 +681,7 @@ rpc_rmdir(char *path)
error = __rpc_rmdir(dir, dentry);
dput(dentry);
out_release:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return error;
}
@@ -710,7 +710,7 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
rpci->ops = ops;
inode_dir_notify(dir, DN_CREATE);
out:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return dentry;
err_dput:
@@ -732,7 +732,7 @@ rpc_unlink(char *path)
if ((error = rpc_lookup_parent(path, &nd)) != 0)
return error;
dir = nd.dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -746,7 +746,7 @@ rpc_unlink(char *path)
dput(dentry);
inode_dir_notify(dir, DN_DELETE);
out_release:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return error;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5f6ae79b8b16..1b5989b1b670 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -784,7 +784,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
dput(nd.dentry);
nd.dentry = dentry;
@@ -823,7 +823,7 @@ out:
out_mknod_dput:
dput(dentry);
out_mknod_unlock:
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out_mknod_parent:
if (err==-EEXIST)
diff --git a/security/inode.c b/security/inode.c
index a5964502ae30..0f77b0223662 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -172,7 +172,7 @@ static int create_by_name(const char *name, mode_t mode,
return -EFAULT;
}
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -181,7 +181,7 @@ static int create_by_name(const char *name, mode_t mode,
error = create(parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -302,7 +302,7 @@ void securityfs_remove(struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -312,7 +312,7 @@ void securityfs_remove(struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&mount, &mount_count);
}
EXPORT_SYMBOL_GPL(securityfs_remove);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 16df1246a131..7fd072392c7e 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2135,9 +2135,7 @@ static ssize_t snd_pcm_oss_write(struct file *file, const char __user *buf, size
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream == NULL)
return -ENXIO;
- up(&file->f_dentry->d_inode->i_sem);
result = snd_pcm_oss_write1(substream, buf, count);
- down(&file->f_dentry->d_inode->i_sem);
#ifdef OSS_DEBUG
printk("pcm_oss: write %li bytes (wrote %li bytes)\n", (long)count, (long)result);
#endif
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 9ee6c177db0c..40b4f679c80e 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -32,10 +32,6 @@
#include "seq_info.h"
#include "seq_lock.h"
-/* semaphore in struct file record */
-#define semaphore_of(fp) ((fp)->f_dentry->d_inode->i_sem)
-
-
static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
{
return pool->total_elements - atomic_read(&pool->counter);