diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-22 14:10:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-22 14:10:27 -0700 |
commit | 070c95d457267eefecd70f5dd434740201d5083c (patch) | |
tree | 3a423f9556d886ef2b305f97adb072a5601737ae | |
parent | dcacc4864f03e138a8b757ce75142b602d7f4389 (diff) | |
parent | 84c11e4df5aa4955acaa441f0cf1cb2e50daf64b (diff) | |
download | linux-070c95d457267eefecd70f5dd434740201d5083c.tar.bz2 |
Merge tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu fixes from Joerg Roedel:
- AMD IOMMU fix for sg-mapping with sg->offset > PAGE_SIZE
- Fix for IOVA code to trigger the slow-path less often
- Two fixes for Intel VT-d to avoid writing to read-only registers and
to flush the right domain id for the default domains in scalable mode
* tag 'iommu-fixes-v5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/vt-d: Save the right domain ID used by hardware
iommu/vt-d: Check capability before disabling protected memory
iommu/iova: Fix tracking of recently failed iova address
iommu/amd: fix sg->dma_address for sg->offset bigger than PAGE_SIZE
-rw-r--r-- | drivers/iommu/amd_iommu.c | 7 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 5 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 5 |
3 files changed, 13 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b319e51c379b..21cb088d6687 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, /* Everything is mapped - write the right values into s->dma_address */ for_each_sg(sglist, s, nelems, i) { - s->dma_address += address + s->offset; + /* + * Add in the remaining piece of the scatter-gather offset that + * was masked out when we were determining the physical address + * via (sg_phys(s) & PAGE_MASK) earlier. + */ + s->dma_address += address + (s->offset & ~PAGE_MASK); s->dma_length = s->length; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 87274b54febd..28cb713d728c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) u32 pmen; unsigned long flags; + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flags); pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen &= ~DMA_PMEN_EPM; @@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd ctx_lo = context[0].lo; - sdev->did = domain->iommu_did[iommu->seq_id]; + sdev->did = FLPT_DEFAULT_DID; sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f8d3ba247523..2de8122e218f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr_iova = rb_entry(curr, struct iova, node); } while (curr && new_pfn <= curr_iova->pfn_hi); - if (limit_pfn < size || new_pfn < iovad->start_pfn) + if (limit_pfn < size || new_pfn < iovad->start_pfn) { + iovad->max32_alloc_size = size; goto iova32_full; + } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = new_pfn; @@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; iova32_full: - iovad->max32_alloc_size = size; spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } |