summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/amd_iommu.c71
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/dma-iommu.c54
-rw-r--r--drivers/iommu/dmar.c10
-rw-r--r--drivers/iommu/intel-iommu.c101
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/qcom_iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/iommu/tegra-gart.c15
12 files changed, 148 insertions, 137 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index df171cb85822..c76157e57f6b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64))
+ depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -42,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
config IOMMU_IO_PGTABLE_ARMV7S
bool "ARMv7/v8 Short Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
+ depends on ARM || ARM64 || COMPILE_TEST
help
Enable support for the ARM Short-descriptor pagetable format.
This supports 32-bit virtual and physical addresses mapped using
@@ -376,7 +376,6 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on HAS_DMA
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2a99f0f14795..8cb28def43e3 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,7 +83,6 @@
static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
static DEFINE_SPINLOCK(pd_bitmap_lock);
-static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */
static LLIST_HEAD(dev_data_list);
@@ -545,7 +544,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
struct device *dev = iommu->iommu.dev;
- int type, devid, domid, flags;
+ int type, devid, pasid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
u64 address;
@@ -553,7 +552,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ pasid = PPR_PASID(*(u64 *)&event[0]);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
@@ -568,7 +567,7 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, domid, address, flags);
+ amd_iommu_report_page_fault(devid, pasid, address, flags);
return;
} else {
dev_err(dev, "AMD-Vi: Event logged [");
@@ -576,10 +575,9 @@ retry:
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ pasid, address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
@@ -589,34 +587,38 @@ retry:
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
+ pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
dump_command(address);
break;
case EVENT_TYPE_CMD_HARD_ERR:
- dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx "
- "flags=0x%04x]\n", address, flags);
+ dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n",
+ address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
- "address=0x%016llx]\n",
+ dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
+ dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ pasid, address, flags);
+ break;
+ case EVENT_TYPE_INV_PPR_REQ:
+ pasid = ((event[0] >> 16) & 0xFFFF)
+ | ((event[1] << 6) & 0xF0000);
+ tag = event[1] & 0x03FF;
+ dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pasid, address, flags);
break;
default:
- dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x "
- "event[2]=0x%08x event[3]=0x%08x\n",
+ dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
event[0], event[1], event[2], event[3]);
}
@@ -1912,15 +1914,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu;
u16 alias;
- /*
- * First check if the device is still attached. It might already
- * be detached from its domain because the generic
- * iommu_detach_group code detached it and we try again here in
- * our alias handling.
- */
- if (!dev_data->domain)
- return;
-
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
@@ -1940,8 +1933,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
}
/*
- * If a device is not yet associated with a domain, this function does
- * assigns it visible for the hardware
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
*/
static int __attach_device(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
@@ -2062,8 +2055,8 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
}
/*
- * If a device is not yet associated with a domain, this function
- * assigns it visible for the hardware
+ * If a device is not yet associated with a domain, this function makes the
+ * device visible in the domain
*/
static int attach_device(struct device *dev,
struct protection_domain *domain)
@@ -2125,9 +2118,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/
WARN_ON(!irqs_disabled());
- if (WARN_ON(!dev_data->domain))
- return;
-
domain = dev_data->domain;
spin_lock(&domain->lock);
@@ -2149,6 +2139,15 @@ static void detach_device(struct device *dev)
dev_data = get_dev_data(dev);
domain = dev_data->domain;
+ /*
+ * First check if the device is still attached. It might already
+ * be detached from its domain because the generic
+ * iommu_detach_group code detached it and we try again here in
+ * our alias handling.
+ */
+ if (WARN_ON(!dev_data->domain))
+ return;
+
/* lock device table */
spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
@@ -2794,6 +2793,7 @@ static void cleanup_domain(struct protection_domain *domain)
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
struct iommu_dev_data, list);
+ BUG_ON(!entry->domain);
__detach_device(entry);
}
@@ -3562,6 +3562,7 @@ EXPORT_SYMBOL(amd_iommu_device_info);
*****************************************************************************/
static struct irq_chip amd_ir_chip;
+static DEFINE_SPINLOCK(iommu_table_lock);
static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
{
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 1c9b080276c9..986cbe0cc189 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -133,6 +133,7 @@
#define EVENT_TYPE_CMD_HARD_ERR 0x6
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f05f3cf90756..ddcbbdb5d658 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
* @list: Reserved region list from iommu_get_resv_regions()
*
* IOMMU drivers can use this to implement their .get_resv_regions callback
- * for general non-IOMMU-specific reservations. Currently, this covers host
- * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
- * based ARM platforms that may require HW MSI reservation.
+ * for general non-IOMMU-specific reservations. Currently, this covers GICv3
+ * ITS region reservation on ACPI based ARM platforms that may require HW MSI
+ * reservation.
*/
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
- struct pci_host_bridge *bridge;
- struct resource_entry *window;
-
- if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
- iort_iommu_msi_get_resv_regions(dev, list) < 0)
- return;
-
- if (!dev_is_pci(dev))
- return;
-
- bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
- resource_list_for_each_entry(window, &bridge->windows) {
- struct iommu_resv_region *region;
- phys_addr_t start;
- size_t length;
-
- if (resource_type(window->res) != IORESOURCE_MEM)
- continue;
- start = window->res->start - window->offset;
- length = window->res->end - window->res->start + 1;
- region = iommu_alloc_resv_region(start, length, 0,
- IOMMU_RESV_RESERVED);
- if (!region)
- return;
+ if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
+ iort_iommu_msi_get_resv_regions(dev, list);
- list_add_tail(&region->list, list);
- }
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
+static void iova_reserve_pci_windows(struct pci_dev *dev,
+ struct iova_domain *iovad)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ struct resource_entry *window;
+ unsigned long lo, hi;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_type(window->res) != IORESOURCE_MEM)
+ continue;
+
+ lo = iova_pfn(iovad, window->res->start - window->offset);
+ hi = iova_pfn(iovad, window->res->end - window->offset);
+ reserve_iova(iovad, lo, hi);
+ }
+}
+
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{
@@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions);
int ret = 0;
+ if (dev_is_pci(dev))
+ iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index accf58388bdb..4321f7704b23 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
struct qi_desc desc;
if (mask) {
- BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else
@@ -1618,17 +1618,13 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
int reg, fault_index;
u32 fault_status;
unsigned long flag;
- bool ratelimited;
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- /* Disable printing, simply clear the fault when ratelimited */
- ratelimited = !__ratelimit(&rs);
-
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status && !ratelimited)
+ if (fault_status && __ratelimit(&rs))
pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
@@ -1638,6 +1634,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_index = dma_fsts_fault_record_index(fault_status);
reg = cap_fault_reg_offset(iommu->cap);
while (1) {
+ /* Disable printing, simply clear the fault when ratelimited */
+ bool ratelimited = !__ratelimit(&rs);
u8 fault_reason;
u16 source_id;
u64 guest_addr;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..d79e3ebbe437 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -485,37 +485,14 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
-static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-/* Broadwell and Skylake have broken ECS support — normal so-called "second
- * level" translation of DMA requests-without-PASID doesn't actually happen
- * unless you also set the NESTE bit in an extended context-entry. Which of
- * course means that SVM doesn't work because it's trying to do nested
- * translation of the physical addresses it finds in the process page tables,
- * through the IOVA->phys mapping found in the "second level" page tables.
- *
- * The VT-d specification was retroactively changed to change the definition
- * of the capability bits and pretend that Broadwell/Skylake never happened...
- * but unfortunately the wrong bit was changed. It's ECS which is broken, but
- * for some reason it was the PASID capability bit which was redefined (from
- * bit 28 on BDW/SKL to bit 40 in future).
- *
- * So our test for ECS needs to eschew those implementations which set the old
- * PASID capabiity bit 28, since those are the ones on which ECS is broken.
- * Unless we are working around the 'pasid28' limitations, that is, by putting
- * the device into passthrough mode for normal DMA and thus masking the bug.
- */
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
- (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
-/* PASID support is thus enabled if ECS is enabled and *either* of the old
- * or new capability bits are set. */
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
- (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -578,11 +555,6 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
- } else if (!strncmp(str, "pasid28", 7)) {
- printk(KERN_INFO
- "Intel-IOMMU: enable pre-production PASID support\n");
- intel_iommu_pasid28 = 1;
- iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -1606,6 +1578,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
iommu_flush_dev_iotlb(domain, addr, mask);
}
+/* Notification for newly created mappings */
+static inline void __mapping_notify_one(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
+{
+ /* It's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
+ else
+ iommu_flush_write_buffer(iommu);
+}
+
static void iommu_flush_iova(struct iova_domain *iovad)
{
struct dmar_domain *domain;
@@ -2340,18 +2324,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
+static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+
+ return 0;
+}
+
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
@@ -2533,7 +2546,7 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
struct device_domain_info *info = NULL;
struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
- u16 req_id, dma_alias;
+ u16 dma_alias;
unsigned long flags;
u8 bus, devfn;
@@ -2541,8 +2554,6 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
if (!iommu)
return NULL;
- req_id = ((u16)bus << 8) | devfn;
-
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2656,9 +2667,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ return __domain_mapping(domain, first_vpfn, NULL,
+ first_vpfn, last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
}
static int domain_prepare_identity_map(struct device *dev,
@@ -3625,14 +3636,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
if (ret)
goto error;
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
-
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
@@ -3819,12 +3822,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
-
return nelems;
}
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index e8cd984cf9c8..45f6e581cd56 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -319,7 +319,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} else
pasid_max = 1 << 20;
- if ((flags & SVM_FLAG_SUPERVISOR_MODE)) {
+ if (flags & SVM_FLAG_SUPERVISOR_MODE) {
if (!ecap_srs(iommu->ecap))
return -EINVAL;
} else if (pasid) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 66f69af2c219..3062a154a9fb 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
/* Update the hardware only if the interrupt is in remapped mode. */
- if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
modify_irte(&ir_data->irq_2_iommu, irte);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d2aa23202bb9..63b37563db7e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -116,9 +116,11 @@ static void __iommu_detach_group(struct iommu_domain *domain,
static int __init iommu_set_def_domain_type(char *str)
{
bool pt;
+ int ret;
- if (!str || strtobool(str, &pt))
- return -EINVAL;
+ ret = kstrtobool(str, &pt);
+ if (ret)
+ return ret;
iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
return 0;
@@ -322,7 +324,6 @@ static struct kobj_type iommu_group_ktype = {
/**
* iommu_group_alloc - Allocate a new group
- * @name: Optional name to associate with group, visible in sysfs
*
* This function is called by an iommu driver to allocate a new iommu
* group. The iommu group represents the minimum granularity of the iommu.
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 65b9c99707f8..fe88a4880d3a 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -885,16 +885,14 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
return qcom_iommu_enable_clocks(qcom_iommu);
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
+ struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
qcom_iommu_disable_clocks(qcom_iommu);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 5fc8656c60f9..0468acfa131f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev,
data->iommu = platform_get_drvdata(iommu_dev);
dev->archdata.iommu = data;
- of_dev_put(iommu_dev);
+ platform_device_put(iommu_dev);
return 0;
}
@@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_clocks; ++i)
iommu->clocks[i].id = rk_iommu_clocks[i];
+ /*
+ * iommu clocks should be present for all new devices and devicetrees
+ * but there are older devicetrees without clocks out in the wild.
+ * So clocks as optional for the time being.
+ */
err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
- if (err)
+ if (err == -ENOENT)
+ iommu->num_clocks = 0;
+ else if (err)
return err;
err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index b62f790ad1ba..89ec24c6952c 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -72,6 +72,8 @@ struct gart_domain {
static struct gart_device *gart_handle; /* unique for a system */
+static bool gart_debug;
+
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
@@ -271,6 +273,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
struct gart_device *gart = gart_domain->gart;
unsigned long flags;
unsigned long pfn;
+ unsigned long pte;
if (!gart_iova_range_valid(gart, iova, bytes))
return -EINVAL;
@@ -282,6 +285,14 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
spin_unlock_irqrestore(&gart->pte_lock, flags);
return -EINVAL;
}
+ if (gart_debug) {
+ pte = gart_read_pte(gart, iova);
+ if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
+ spin_unlock_irqrestore(&gart->pte_lock, flags);
+ dev_err(gart->dev, "Page entry is in-use\n");
+ return -EBUSY;
+ }
+ }
gart_set_pte(gart, iova, GART_PTE(pfn));
FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags);
@@ -302,7 +313,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
gart_set_pte(gart, iova, 0);
FLUSH_GART_REGS(gart);
spin_unlock_irqrestore(&gart->pte_lock, flags);
- return 0;
+ return bytes;
}
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -515,7 +526,9 @@ static void __exit tegra_gart_exit(void)
subsys_initcall(tegra_gart_init);
module_exit(tegra_gart_exit);
+module_param(gart_debug, bool, 0644);
+MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-gart");