diff options
author | David Howells <dhowells@redhat.com> | 2006-10-16 14:10:49 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-16 08:32:29 -0700 |
commit | 29da7eb0ec69245c6e9b4eb5bdaa04af685f5c4f (patch) | |
tree | 14fb9ac86841af2cc17b999507b6879936616b1d | |
parent | 8741ca71a3f626a56595b88200ebf952ce77ceef (diff) | |
download | linux-29da7eb0ec69245c6e9b4eb5bdaa04af685f5c4f.tar.bz2 |
[PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co
Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually
add in the compiler barriers that were provided by the latter. This makes FRV
consistent with other archs.
Furthermore, the compiler barrier effects are now there unconditionally - at
least as far as preemption is concerned - because we don't want the compiler
moving memory accesses out of the section of code in which the mapping is in
force - in effect the kmap_atomic() must imply a LOCK-class barrier and the
kunmap_atomic() must imply an UNLOCK-class barrier to the compiler.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/asm-frv/highmem.h | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h index e2247c22a638..0f390f41f816 100644 --- a/include/asm-frv/highmem.h +++ b/include/asm-frv/highmem.h @@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr); dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ \ if (type != __KM_CACHE) \ - asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \ + asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \ else \ asm volatile("movgs %0,iampr"#ampr"\n" \ "movgs %0,dampr"#ampr"\n" \ - :: "r"(dampr) \ + :: "r"(dampr) : "memory" \ ); \ \ asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \ @@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr); asm volatile("movgs %0,tplr \n" \ "movgs %1,tppr \n" \ "tlbpr %0,gr0,#2,#1" \ - : : "r"(damlr), "r"(dampr)); \ + : : "r"(damlr), "r"(dampr) : "memory"); \ \ /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \ \ @@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) { unsigned long paddr; - preempt_disable(); + inc_preempt_count(); paddr = page_to_phys(page); switch (type) { @@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) } } -#define __kunmap_atomic_primary(type, ampr) \ -do { \ - asm volatile("movgs gr0,dampr"#ampr"\n"); \ - if (type == __KM_CACHE) \ - asm volatile("movgs gr0,iampr"#ampr"\n"); \ +#define __kunmap_atomic_primary(type, ampr) \ +do { \ + asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \ + if (type == __KM_CACHE) \ + asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \ } while(0) -#define __kunmap_atomic_secondary(slot, vaddr) \ -do { \ - asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \ +#define __kunmap_atomic_secondary(slot, vaddr) \ +do { \ + asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ } while(0) static inline void kunmap_atomic(void *kvaddr, enum km_type type) @@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) default: BUG(); } - preempt_enable(); + dec_preempt_count(); + preempt_check_resched(); } #endif /* !__ASSEMBLY__ */ |