diff options
author | Jacopo Mondi <jacopo+renesas@jmondi.org> | 2018-04-18 11:42:35 +0200 |
---|---|---|
committer | Rich Felker <dalias@libc.org> | 2018-05-05 16:51:46 -0400 |
commit | b9826a4929bbd4dcb245429fd7434145c4fcdc9b (patch) | |
tree | 8749be4653afdecc99b127e272e27118e553157e /arch/sh | |
parent | 6cb465972c4eb6741b3094a58a65e527fc63c100 (diff) | |
download | linux-b9826a4929bbd4dcb245429fd7434145c4fcdc9b.tar.bz2 |
sh: mm: Fix unprotected access to struct device
With commit ce88313069c36eef80f21fd7 ("arch/sh: make the DMA mapping
operations observe dev->dma_pfn_offset") the generic DMA allocation
function on which the SH 'dma_alloc_coherent()' function relies on,
accesses the 'dma_pfn_offset' field of struct device.
Unfortunately the 'dma_generic_alloc_coherent()' function is called from
several places with a NULL struct device argument, halting the CPU
during the boot process.
This patch fixes the issue by protecting access to dev->dma_pfn_offset,
with a trivial check for validity. It also passes a valid 'struct device'
in the 'platform_resource_setup_memory()' function which is the main user
of 'dma_alloc_coherent()', and inserts a WARN_ON() check to remind to future
(and existing) bogus users of this function to provide a valid 'struct device'
whenever possible.
Fixes: ce88313069c36eef80f21fd7 ("arch/sh: make the DMA mapping operations observe dev->dma_pfn_offset")
Signed-off-by: Jacopo Mondi <jacopo+renesas@jmondi.org>
Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Signed-off-by: Rich Felker <dalias@libc.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/mm/consistent.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 8ce98691d822..f1b44697ad68 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -59,7 +59,9 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); - *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + *dma_handle = virt_to_phys(ret); + if (!WARN_ON(!dev)) + *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); return ret_nocache; } @@ -69,9 +71,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, unsigned long attrs) { int order = get_order(size); - unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset; + unsigned long pfn = dma_handle >> PAGE_SHIFT; int k; + if (!WARN_ON(!dev)) + pfn += dev->dma_pfn_offset; + for (k = 0; k < (1 << order); k++) __free_pages(pfn_to_page(pfn + k), 0); @@ -143,7 +148,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev, if (!memsize) return 0; - buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); + buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL); if (!buf) { pr_warning("%s: unable to allocate memory\n", name); return -ENOMEM; |