summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-05-11 08:24:10 +0200
committerChristoph Hellwig <hch@lst.de>2022-05-13 12:49:27 +0200
commit1b8e5d1a53696d92374acce2b19a649427f1ec1e (patch)
tree2cfd32b0c4fc1fa4206212b791983f394e55980e /kernel/dma
parenta5e891321a219679d5a2828150a7dda29a47d8a6 (diff)
downloadlinux-1b8e5d1a53696d92374acce2b19a649427f1ec1e.tar.bz2
swiotlb: use the right nslabs-derived sizes in swiotlb_init_late
nslabs can shrink when allocations or the remap don't succeed, so make sure to use it for all sizing. For that remove the bytes value that can get stale and replace it with local calculations and a boolean to indicate if the originally requested size could not be allocated. Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl") Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/swiotlb.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 113e1e8aaca3..d6e62a6a42ce 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -297,9 +297,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
- unsigned long bytes;
unsigned char *vstart = NULL;
unsigned int order;
+ bool retried = false;
int rc = 0;
if (swiotlb_force_disable)
@@ -308,7 +308,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
- bytes = nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
@@ -316,16 +315,13 @@ retry:
if (vstart)
break;
order--;
+ nslabs = SLABS_PER_PAGE << order;
+ retried = true;
}
if (!vstart)
return -ENOMEM;
- if (order != get_order(bytes)) {
- pr_warn("only able to allocate %ld MB\n",
- (PAGE_SIZE << order) >> 20);
- nslabs = SLABS_PER_PAGE << order;
- }
if (remap)
rc = remap(vstart, nslabs);
if (rc) {
@@ -334,9 +330,15 @@ retry:
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs < IO_TLB_MIN_SLABS)
return rc;
+ retried = true;
goto retry;
}
+ if (retried) {
+ pr_warn("only able to allocate %ld MB\n",
+ (PAGE_SIZE << order) >> 20);
+ }
+
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(array_size(sizeof(*mem->slots), nslabs)));
if (!mem->slots) {
@@ -344,7 +346,8 @@ retry:
return -ENOMEM;
}
- set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+ set_memory_decrypted((unsigned long)vstart,
+ (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
swiotlb_print_info();