summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-12-12 22:17:09 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-12-12 22:31:16 -0800
commitbf5421c309bb89e5106452bc840983b1b4754d61 (patch)
treedd1b97332a053197175b13a3fe7d62e568c6f3f1 /arch/x86_64
parent5e9ef02ec00c70840661d174dc2f4862db471bb6 (diff)
downloadlinux-bf5421c309bb89e5106452bc840983b1b4754d61.tar.bz2
[PATCH] i386/x86-64: Don't call change_page_attr with a spinlock held
It's illegal because it can sleep. Use a two step lookup scheme instead. First look up the vm_struct, then change the direct mapping, then finally unmap it. That's ok because nobody can change the particular virtual address range as long as the vm_struct is still in the global list. Also added some LinuxDoc documentation to iounmap. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/mm/ioremap.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ecf7acb5db9b..0d260e4492f7 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -247,9 +247,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
return __ioremap(phys_addr, size, _PAGE_PCD);
}
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
void iounmap(volatile void __iomem *addr)
{
- struct vm_struct *p;
+ struct vm_struct *p, *o;
if (addr <= high_memory)
return;
@@ -257,12 +263,31 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS))
return;
- write_lock(&vmlist_lock);
- p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
- if (!p)
+ addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
+ /* Use the vm area unlocked, assuming the caller
+ ensures there isn't another iounmap for the same address
+ in parallel. Reuse of the virtual address is prevented by
+ leaving it in the global lists until we're done with it.
+ cpa takes care of the direct mappings. */
+ read_lock(&vmlist_lock);
+ for (p = vmlist; p; p = p->next) {
+ if (p->addr == addr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!p) {
printk("iounmap: bad address %p\n", addr);
- else if (p->flags >> 20)
+ dump_stack();
+ return;
+ }
+
+ /* Reset the direct mapping. Can block */
+ if (p->flags >> 20)
ioremap_change_attr(p->phys_addr, p->size, 0);
- write_unlock(&vmlist_lock);
+
+ /* Finally remove it */
+ o = remove_vm_area((void *)addr);
+ BUG_ON(p != o || o == NULL);
kfree(p);
}