summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2022-05-20 18:10:13 +0100
committerChristoph Hellwig <hch@lst.de>2022-05-23 15:25:40 +0200
commit4a37f3dd9a83186cb88d44808ab35b78375082c9 (patch)
treeba5dc0010beb736ab375eecc72890bb71a4cb131 /kernel/dma
parent82806744fd7dde603b64c151eeddaa4ee62193fd (diff)
downloadlinux-4a37f3dd9a83186cb88d44808ab35b78375082c9.tar.bz2
dma-direct: don't over-decrypt memory
The original x86 sev_alloc() only called set_memory_decrypted() on memory returned by alloc_pages_node(), so the page order calculation fell out of that logic. However, the common dma-direct code has several potential allocators, not all of which are guaranteed to round up the underlying allocation to a power-of-two size, so carrying over that calculation for the encryption/decryption size was a mistake. Fix it by rounding to a *number* of pages, rather than an order. Until recently there was an even worse interaction with DMA_DIRECT_REMAP where we could have ended up decrypting part of the next adjacent vmalloc area, only averted by no architecture actually supporting both configs at once. Don't ask how I found that one out... Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code") Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: David Rientjes <rientjes@google.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/direct.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 3e7f4aab740e..e978f36e6be8 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -79,7 +79,7 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
{
if (!force_dma_unencrypted(dev))
return 0;
- return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+ return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
}
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
@@ -88,7 +88,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
if (!force_dma_unencrypted(dev))
return 0;
- ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+ ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
if (ret)
pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
return ret;