summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 16:05:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 18:01:16 -0700
commita15bc0b89e8812d0db297bc771a85812c4fa83c1 (patch)
treeb13e084512724a270bc4ae3164f87fc0c02558a8 /drivers/base
parent3162bbd7e65b9cc57b660796dd3409807bfc9070 (diff)
downloadlinux-a15bc0b89e8812d0db297bc771a85812c4fa83c1.tar.bz2
DMA, CMA: support alignment constraint on CMA region
PPC KVM's CMA area management needs alignment constraint on CMA region. So support it to prepare generalization of CMA area management functionality. Additionally, add some comments which tell us why alignment constraint is needed on CMA region. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma-contiguous.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 9021762227a7..5f62c284072c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -32,6 +32,7 @@
#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
+#include <linux/log2.h>
struct cma {
unsigned long base_pfn;
@@ -215,17 +216,16 @@ core_initcall(cma_init_reserved_areas);
static int __init __dma_contiguous_reserve_area(phys_addr_t size,
phys_addr_t base, phys_addr_t limit,
+ phys_addr_t alignment,
struct cma **res_cma, bool fixed)
{
struct cma *cma = &cma_areas[cma_area_count];
- phys_addr_t alignment;
int ret = 0;
- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
- (unsigned long)size, (unsigned long)base,
- (unsigned long)limit);
+ pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
+ __func__, (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit, (unsigned long)alignment);
- /* Sanity checks */
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
@@ -234,8 +234,17 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
if (!size)
return -EINVAL;
- /* Sanitise input arguments */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ if (alignment && !is_power_of_2(alignment))
+ return -EINVAL;
+
+ /*
+ * Sanitise input arguments.
+ * Pages both ends in CMA area could be merged into adjacent unmovable
+ * migratetype page by page allocator's buddy algorithm. In the case,
+ * you couldn't get a contiguous memory, which is not what we want.
+ */
+ alignment = max(alignment,
+ (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
@@ -299,7 +308,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
{
int ret;
- ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
+ ret = __dma_contiguous_reserve_area(size, base, limit, 0,
+ res_cma, fixed);
if (ret)
return ret;