summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-10-14 03:49:15 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-10-14 03:49:15 +0900
commitf7fcec93b619337feb9da829b8a9ab6ba86393bc (patch)
tree3598a9c02d14252150ee9b8c1cc7988dd4636f19 /arch/sh/mm
parent47da88f36639b8de57f6cdd680f8c27528ccd67c (diff)
downloadlinux-f7fcec93b619337feb9da829b8a9ab6ba86393bc.tar.bz2
sh: Fix up PMB locking.
This first converts the PMB locking over to raw spinlocks, and secondly fixes up a nested locking issue that was triggering lockdep early on: swapper/0 is trying to acquire lock: (&pmbe->lock){......}, at: [<806be9bc>] pmb_init+0xf4/0x4dc but task is already holding lock: (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc other info that might help us debug this: 1 lock held by swapper/0: #0: (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/pmb.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 233c011c4d22..b20b1b3eee4b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -40,7 +40,7 @@ struct pmb_entry {
unsigned long flags;
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
memset(pmbe, 0, sizeof(struct pmb_entry));
- spin_lock_init(&pmbe->lock);
+ raw_spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
{
unsigned long flags;
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
#endif /* CONFIG_PM */
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
return PTR_ERR(pmbe);
}
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = pmb_sizes[i].size;
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
* entries for easier tear-down.
*/
if (likely(pmbp)) {
- spin_lock(&pmbp->lock);
+ raw_spin_lock_nested(&pmbp->lock,
+ SINGLE_DEPTH_NESTING);
pmbp->link = pmbe;
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
i--;
mapped++;
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
continue;
}
- spin_lock_irqsave(&pmbe->lock, irqflags);
+ raw_spin_lock_irqsave(&pmbe->lock, irqflags);
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
if (pmbp) {
- spin_lock(&pmbp->lock);
-
+ raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
*/
if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
-
- spin_unlock(&pmbp->lock);
+ raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
- spin_unlock_irqrestore(&pmbe->lock, irqflags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
}
}
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
/*
* Found it, now resize it.
*/
- spin_lock_irqsave(&pmbe->lock, flags);
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK;
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
__set_pmb_entry(pmbe);
- spin_unlock_irqrestore(&pmbe->lock, flags);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
read_unlock(&pmb_rwlock);