diff options
author | Steven L Kinney <Steven.Kinney@amd.com> | 2013-06-05 16:11:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-19 13:04:52 +0200 |
commit | 30861ddc9cca479a7fc6a5efef4e5c69d6b274f4 (patch) | |
tree | 69a11531b19c72b818c68a6b16e8854e563b7143 /drivers/iommu/amd_iommu_init.c | |
parent | ae0def05ed856343181bf1eca4fab3e09056df6d (diff) | |
download | linux-30861ddc9cca479a7fc6a5efef4e5c69d6b274f4.tar.bz2 |
perf/x86/amd: Add IOMMU Performance Counter resource management
Add functionality to check the availability of the AMD IOMMU Performance
Counters and export this functionality to other core drivers, such as in this
case, a perf AMD IOMMU PMU. This feature is not bound to any specific AMD
family/model other than the presence of the IOMMU with P-C enabled.
The AMD IOMMU P-C support static counting only at this time.
Signed-off-by: Steven Kinney <steven.kinney@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1370466709-3212-2-git-send-email-suravee.suthikulpanit@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 140 |
1 files changed, 131 insertions, 9 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index bf51abb78dee..7acbf351e9af 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -99,7 +99,7 @@ struct ivhd_header { u64 mmio_phys; u16 pci_seg; u16 info; - u32 reserved; + u32 efr; } __attribute__((packed)); /* @@ -154,6 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true; u32 amd_iommu_max_pasids __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; +bool amd_iommu_pc_present __read_mostly; bool amd_iommu_force_isolation __read_mostly; @@ -369,23 +370,23 @@ static void iommu_disable(struct amd_iommu *iommu) * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * the system has one. */ -static u8 __iomem * __init iommu_map_mmio_space(u64 address) +static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) { - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { - pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", - address); + if (!request_mem_region(address, end, "amd_iommu")) { + pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n", + address, end); pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; } - return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH); + return (u8 __iomem *)ioremap_nocache(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) { if (iommu->mmio_base) iounmap(iommu->mmio_base); - release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); + release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); } /**************************************************************************** @@ -1085,7 +1086,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; - iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); + + /* Check if IVHD EFR contains proper max banks/counters */ + if ((h->efr != 0) && + ((h->efr & (0xF << 13)) != 0) && + ((h->efr & (0x3F << 17)) != 0)) { + iommu->mmio_phys_end = MMIO_REG_END_OFFSET; + } else { + iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; + } + + iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, + iommu->mmio_phys_end); if (!iommu->mmio_base) return -ENOMEM; @@ -1160,6 +1172,33 @@ static int __init init_iommu_all(struct acpi_table_header *table) return 0; } + +static void init_iommu_perf_ctr(struct amd_iommu *iommu) +{ + u64 val = 0xabcd, val2 = 0; + + if (!iommu_feature(iommu, FEATURE_PC)) + return; + + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ + if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || + (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; + } + + pr_info("AMD-Vi: IOMMU performance counters supported\n"); + + val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); + iommu->max_banks = (u8) ((val >> 12) & 0x3f); + iommu->max_counters = (u8) ((val >> 7) & 0xf); +} + + static int iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; @@ -1226,6 +1265,8 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; + init_iommu_perf_ctr(iommu); + if (is_rd890_iommu(iommu->dev)) { int i, j; @@ -1278,7 +1319,7 @@ static void print_iommu_info(void) if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } - pr_cont("\n"); + pr_cont("\n"); } } if (irq_remapping_enabled) @@ -2232,3 +2273,84 @@ bool amd_iommu_v2_supported(void) return amd_iommu_v2_present; } EXPORT_SYMBOL(amd_iommu_v2_supported); + +/**************************************************************************** + * + * IOMMU EFR Performance Counter support functionality. This code allows + * access to the IOMMU PC functionality. + * + ****************************************************************************/ + +u8 amd_iommu_pc_get_max_banks(u16 devid) +{ + struct amd_iommu *iommu; + u8 ret = 0; + + /* locate the iommu governing the devid */ + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + ret = iommu->max_banks; + + return ret; +} +EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); + +bool amd_iommu_pc_supported(void) +{ + return amd_iommu_pc_present; +} +EXPORT_SYMBOL(amd_iommu_pc_supported); + +u8 amd_iommu_pc_get_max_counters(u16 devid) +{ + struct amd_iommu *iommu; + u8 ret = 0; + + /* locate the iommu governing the devid */ + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + ret = iommu->max_counters; + + return ret; +} +EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) +{ + struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + + /* Make sure the IOMMU PC resource is available */ + if (!amd_iommu_pc_present) + return -ENODEV; + + /* Locate the iommu associated with the device ID */ + iommu = amd_iommu_rlookup_table[devid]; + + /* Check for valid iommu and pc register indexing */ + if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); + + /* Limit the offset to the hw defined mmio region aperture */ + max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | + (iommu->max_counters << 8) | 0x28); + if ((offset < MMIO_CNTR_REG_OFFSET) || + (offset > max_offset_lim)) + return -EINVAL; + + if (is_write) { + writel((u32)*value, iommu->mmio_base + offset); + writel((*value >> 32), iommu->mmio_base + offset + 4); + } else { + *value = readl(iommu->mmio_base + offset + 4); + *value <<= 32; + *value = readl(iommu->mmio_base + offset); + } + + return 0; +} +EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); |