summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAaro Koskinen <aaro.koskinen@iki.fi>2012-08-07 14:39:25 +0200
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-08-09 07:46:07 +0200
commite4ea6918c93b9f59d34e8ca2124b2b64b1afe73b (patch)
tree11c9dcf2e73344dc118e71804182049333bb1f37 /arch
parent39f78e70567a07a6fc0d7a4ca9e3331e44dd400d (diff)
downloadlinux-e4ea6918c93b9f59d34e8ca2124b2b64b1afe73b.tar.bz2
ARM: dma-mapping: fix atomic allocation alignment
The alignment mask is calculated incorrectly. Fixing the calculation makes strange hangs/lockups disappear during the boot with Amstrad E3 and 3.6-rc1 kernel. Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/dma-mapping.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 334dd79ad5e6..4bdeccd24d93 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -423,7 +423,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
unsigned int pageno;
unsigned long flags;
void *ptr = NULL;
- size_t align;
+ unsigned long align_mask;
if (!pool->vaddr) {
WARN(1, "coherent pool not initialised!\n");
@@ -435,11 +435,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
* small, so align them to their order in pages, minimum is a page
* size. This helps reduce fragmentation of the DMA space.
*/
- align = PAGE_SIZE << get_order(size);
+ align_mask = (1 << get_order(size)) - 1;
spin_lock_irqsave(&pool->lock, flags);
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
- 0, count, (1 << align) - 1);
+ 0, count, align_mask);
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;