From a2daa27c0c6137481226aee5b3136e453c642929 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Feb 2022 11:44:42 +0100 Subject: swiotlb: simplify swiotlb_max_segment Remove the bogus Xen override that was usually larger than the actual size and just calculate the value on demand. Note that swiotlb_max_segment still doesn't make sense as an interface and should eventually be removed. Signed-off-by: Christoph Hellwig Reviewed-by: Anshuman Khandual Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- include/linux/swiotlb.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f6c3638255d5..9fb3a568f0c5 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -164,7 +164,6 @@ static inline void swiotlb_adjust_size(unsigned long size) #endif /* CONFIG_SWIOTLB */ extern void swiotlb_print_info(void); -extern void swiotlb_set_max_segment(unsigned int); #ifdef CONFIG_DMA_RESTRICTED_POOL struct page *swiotlb_alloc(struct device *dev, size_t size); -- cgit v1.2.3 From 0d5ffd9a256d8995764f9d4a35a8c3917839d169 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Feb 2022 11:07:28 +0100 Subject: swiotlb: rename swiotlb_late_init_with_default_size swiotlb_late_init_with_default_size is an overly verbose name that doesn't even catch what the function is doing, given that the size is not just a default but the actual requested size. Rename it to swiotlb_init_late. Signed-off-by: Christoph Hellwig Reviewed-by: Anshuman Khandual Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/x86/pci/sta2x11-fixup.c | 2 +- include/linux/swiotlb.h | 2 +- kernel/dma/swiotlb.c | 6 ++---- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 101081ad64b6..e0c039a75b2d 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev) int size = STA2X11_SWIOTLB_SIZE; /* First instance: register your own swiotlb area */ dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size); - if (swiotlb_late_init_with_default_size(size)) + if (swiotlb_init_late(size)) dev_emerg(&pdev->dev, "init swiotlb failed\n"); } list_add(&instance->list, &sta2x11_instance_list); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 9fb3a568f0c5..b48b26bfa0ed 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -40,7 +40,7 @@ extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -extern int swiotlb_late_init_with_default_size(size_t default_size); +int swiotlb_init_late(size_t size); extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index e0127e397335..9a4fe6e48a07 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -281,11 +281,9 @@ fail: * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ -int -swiotlb_late_init_with_default_size(size_t default_size) +int swiotlb_init_late(size_t size) { - unsigned long nslabs = - ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned long bytes; unsigned char *vstart = NULL; unsigned int order; -- cgit v1.2.3 From 78013eaadf696d2105982abb4018fbae394ca08f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Feb 2022 14:11:44 +0100 Subject: x86: remove the IOMMU table infrastructure The IOMMU table tries to separate the different IOMMUs into different backends, but actually requires various cross calls. Rewrite the code to do the generic swiotlb/swiotlb-xen setup directly in pci-dma.c and then just call into the IOMMU drivers. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/ia64/include/asm/iommu_table.h | 7 --- arch/x86/include/asm/dma-mapping.h | 1 - arch/x86/include/asm/gart.h | 5 +- arch/x86/include/asm/iommu.h | 6 ++ arch/x86/include/asm/iommu_table.h | 102 ------------------------------- arch/x86/include/asm/swiotlb.h | 30 --------- arch/x86/include/asm/xen/swiotlb-xen.h | 2 - arch/x86/kernel/Makefile | 2 - arch/x86/kernel/amd_gart_64.c | 5 +- arch/x86/kernel/aperture_64.c | 14 ++--- arch/x86/kernel/pci-dma.c | 107 ++++++++++++++++++++++++++------- arch/x86/kernel/pci-iommu_table.c | 77 ------------------------ arch/x86/kernel/pci-swiotlb.c | 77 ------------------------ arch/x86/kernel/tboot.c | 1 - arch/x86/kernel/vmlinux.lds.S | 12 ---- arch/x86/xen/Makefile | 2 - arch/x86/xen/pci-swiotlb-xen.c | 96 ----------------------------- drivers/iommu/amd/init.c | 6 -- drivers/iommu/amd/iommu.c | 5 +- drivers/iommu/intel/dmar.c | 6 +- include/linux/dmar.h | 6 +- 21 files changed, 110 insertions(+), 459 deletions(-) delete mode 100644 arch/ia64/include/asm/iommu_table.h delete mode 100644 arch/x86/include/asm/iommu_table.h delete mode 100644 arch/x86/include/asm/swiotlb.h delete mode 100644 arch/x86/kernel/pci-iommu_table.c delete mode 100644 arch/x86/kernel/pci-swiotlb.c delete mode 100644 arch/x86/xen/pci-swiotlb-xen.c (limited to 'include') diff --git a/arch/ia64/include/asm/iommu_table.h b/arch/ia64/include/asm/iommu_table.h deleted file mode 100644 index cc96116ac276..000000000000 --- a/arch/ia64/include/asm/iommu_table.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_IA64_IOMMU_TABLE_H -#define _ASM_IA64_IOMMU_TABLE_H - -#define IOMMU_INIT_POST(_detect) - -#endif /* _ASM_IA64_IOMMU_TABLE_H */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index bb1654fe0ce7..256fd8115223 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -9,7 +9,6 @@ #include #include -#include extern int iommu_merge; extern int panic_on_overflow; diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 318556574345..5af8088a10df 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -38,7 +38,7 @@ extern int gart_iommu_aperture_disabled; extern void early_gart_iommu_check(void); extern int gart_iommu_init(void); extern void __init gart_parse_options(char *); -extern int gart_iommu_hole_init(void); +void gart_iommu_hole_init(void); #else #define gart_iommu_aperture 0 @@ -51,9 +51,8 @@ static inline void early_gart_iommu_check(void) static inline void gart_parse_options(char *options) { } -static inline int gart_iommu_hole_init(void) +static inline void gart_iommu_hole_init(void) { - return -ENODEV; } #endif diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index bf1ed2ddc74b..dba89ed40d38 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -9,6 +9,12 @@ extern int force_iommu, no_iommu; extern int iommu_detected; +#ifdef CONFIG_SWIOTLB +extern bool x86_swiotlb_enable; +#else +#define x86_swiotlb_enable false +#endif + /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h deleted file mode 100644 index 1fb3fd1a83c2..000000000000 --- a/arch/x86/include/asm/iommu_table.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_IOMMU_TABLE_H -#define _ASM_X86_IOMMU_TABLE_H - -#include - -/* - * History lesson: - * The execution chain of IOMMUs in 2.6.36 looks as so: - * - * [xen-swiotlb] - * | - * +----[swiotlb *]--+ - * / | \ - * / | \ - * [GART] [Calgary] [Intel VT-d] - * / - * / - * [AMD-Vi] - * - * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip - * over the rest of IOMMUs and unconditionally initialize the SWIOTLB. - * Also it would surreptitiously initialize set the swiotlb=1 if there were - * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb - * flag would be turned off by all IOMMUs except the Calgary one. - * - * The IOMMU_INIT* macros allow a similar tree (or more complex if desired) - * to be built by defining who we depend on. - * - * And all that needs to be done is to use one of the macros in the IOMMU - * and the pci-dma.c will take care of the rest. - */ - -struct iommu_table_entry { - initcall_t detect; - initcall_t depend; - void (*early_init)(void); /* No memory allocate available. */ - void (*late_init)(void); /* Yes, can allocate memory. */ -#define IOMMU_FINISH_IF_DETECTED (1<<0) -#define IOMMU_DETECTED (1<<1) - int flags; -}; -/* - * Macro fills out an entry in the .iommu_table that is equivalent - * to the fields that 'struct iommu_table_entry' has. The entries - * that are put in the .iommu_table section are not put in any order - * hence during boot-time we will have to resort them based on - * dependency. */ - - -#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ - static const struct iommu_table_entry \ - __iommu_entry_##_detect __used \ - __attribute__ ((unused, __section__(".iommu_table"), \ - aligned((sizeof(void *))))) \ - = {_detect, _depend, _early_init, _late_init, \ - _finish ? IOMMU_FINISH_IF_DETECTED : 0} -/* - * The simplest IOMMU definition. Provide the detection routine - * and it will be run after the SWIOTLB and the other IOMMUs - * that utilize this macro. If the IOMMU is detected (ie, the - * detect routine returns a positive value), the other IOMMUs - * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer - * to stop detecting the other IOMMUs after yours has been detected. - */ -#define IOMMU_INIT_POST(_detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0) - -#define IOMMU_INIT_POST_FINISH(detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1) - -/* - * A more sophisticated version of IOMMU_INIT. This variant requires: - * a). A detection routine function. - * b). The name of the detection routine we depend on to get called - * before us. - * c). The init routine which gets called if the detection routine - * returns a positive value from the pci_iommu_alloc. This means - * no presence of a memory allocator. - * d). Similar to the 'init', except that this gets called from pci_iommu_init - * where we do have a memory allocator. - * - * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant - * in that the former will continue detecting other IOMMUs in the call - * list after the detection routine returns a positive number, while the - * latter will stop the execution chain upon first successful detection. - * Both variants will still call the 'init' and 'late_init' functions if - * they are set. - */ -#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ - __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) - -#define IOMMU_INIT(_detect, _depend, _init, _late_init) \ - __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) - -void sort_iommu_table(struct iommu_table_entry *start, - struct iommu_table_entry *finish); - -void check_iommu_entries(struct iommu_table_entry *start, - struct iommu_table_entry *finish); - -#endif /* _ASM_X86_IOMMU_TABLE_H */ diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h deleted file mode 100644 index ff6c92eff035..000000000000 --- a/arch/x86/include/asm/swiotlb.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_SWIOTLB_H -#define _ASM_X86_SWIOTLB_H - -#include - -#ifdef CONFIG_SWIOTLB -extern int swiotlb; -extern int __init pci_swiotlb_detect_override(void); -extern int __init pci_swiotlb_detect_4gb(void); -extern void __init pci_swiotlb_init(void); -extern void __init pci_swiotlb_late_init(void); -#else -#define swiotlb 0 -static inline int pci_swiotlb_detect_override(void) -{ - return 0; -} -static inline int pci_swiotlb_detect_4gb(void) -{ - return 0; -} -static inline void pci_swiotlb_init(void) -{ -} -static inline void pci_swiotlb_late_init(void) -{ -} -#endif -#endif /* _ASM_X86_SWIOTLB_H */ diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h index 66b4ddde7743..e5a90b42e4dd 100644 --- a/arch/x86/include/asm/xen/swiotlb-xen.h +++ b/arch/x86/include/asm/xen/swiotlb-xen.h @@ -3,10 +3,8 @@ #define _ASM_X86_SWIOTLB_XEN_H #ifdef CONFIG_SWIOTLB_XEN -extern int __init pci_xen_swiotlb_detect(void); extern int pci_xen_swiotlb_init_late(void); #else -#define pci_xen_swiotlb_detect NULL static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } #endif diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c41ef42adbe8..e17b7e92a3fa 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -68,7 +68,6 @@ obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += alternative.o i8253.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o -obj-y += pci-iommu_table.o obj-y += resource.o obj-y += irqflags.o obj-y += static_call.o @@ -134,7 +133,6 @@ obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o -obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_OF) += devicetree.o obj-$(CONFIG_UPROBES) += uprobes.o diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index ed837383de5c..194d54eed537 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -38,11 +38,9 @@ #include #include #include -#include #include #include #include -#include static unsigned long iommu_bus_base; /* GART remapping area (physical) */ static unsigned long iommu_size; /* size of remapping area bytes */ @@ -808,7 +806,7 @@ int __init gart_iommu_init(void) flush_gart(); dma_ops = &gart_dma_ops; x86_platform.iommu_shutdown = gart_iommu_shutdown; - swiotlb = 0; + x86_swiotlb_enable = false; return 0; } @@ -842,4 +840,3 @@ void __init gart_parse_options(char *p) } } } -IOMMU_INIT_POST(gart_iommu_hole_init); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index af3ba08b684b..7a5630d904b2 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -392,7 +392,7 @@ void __init early_gart_iommu_check(void) static int __initdata printed_gart_size_msg; -int __init gart_iommu_hole_init(void) +void __init gart_iommu_hole_init(void) { u32 agp_aper_base = 0, agp_aper_order = 0; u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; @@ -401,11 +401,11 @@ int __init gart_iommu_hole_init(void) int i, node; if (!amd_gart_present()) - return -ENODEV; + return; if (gart_iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) - return -ENODEV; + return; pr_info("Checking aperture...\n"); @@ -491,10 +491,8 @@ out: * and fixed up the northbridge */ exclude_from_core(last_aper_base, last_aper_order); - - return 1; } - return 0; + return; } if (!fallback_aper_force) { @@ -527,7 +525,7 @@ out: panic("Not enough memory for aperture"); } } else { - return 0; + return; } /* @@ -561,6 +559,4 @@ out: } set_up_gart_resume(aper_order, aper_alloc); - - return 1; } diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index de234e7a8962..df96926421be 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -7,13 +7,16 @@ #include #include #include +#include #include #include #include #include #include -#include + +#include +#include static bool disable_dac_quirk __read_mostly; @@ -34,24 +37,83 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; -extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; +#ifdef CONFIG_SWIOTLB +bool x86_swiotlb_enable; + +static void __init pci_swiotlb_detect(void) +{ + /* don't initialize swiotlb if iommu=off (no_iommu=1) */ + if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN) + x86_swiotlb_enable = true; + + /* + * Set swiotlb to 1 so that bounce buffers are allocated and used for + * devices that can't support DMA to encrypted memory. + */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + x86_swiotlb_enable = true; + + if (swiotlb_force == SWIOTLB_FORCE) + x86_swiotlb_enable = true; +} +#else +static inline void __init pci_swiotlb_detect(void) +{ +} +#endif /* CONFIG_SWIOTLB */ + +#ifdef CONFIG_SWIOTLB_XEN +static bool xen_swiotlb; + +static void __init pci_xen_swiotlb_init(void) +{ + if (!xen_initial_domain() && !x86_swiotlb_enable && + swiotlb_force != SWIOTLB_FORCE) + return; + x86_swiotlb_enable = true; + xen_swiotlb = true; + xen_swiotlb_init_early(); + dma_ops = &xen_swiotlb_dma_ops; + if (IS_ENABLED(CONFIG_PCI)) + pci_request_acs(); +} + +int pci_xen_swiotlb_init_late(void) +{ + int rc; + + if (xen_swiotlb) + return 0; + + rc = xen_swiotlb_init(); + if (rc) + return rc; + + /* XXX: this switches the dma ops under live devices! */ + dma_ops = &xen_swiotlb_dma_ops; + if (IS_ENABLED(CONFIG_PCI)) + pci_request_acs(); + return 0; +} +EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late); +#else +static inline void __init pci_xen_swiotlb_init(void) +{ +} +#endif /* CONFIG_SWIOTLB_XEN */ void __init pci_iommu_alloc(void) { - struct iommu_table_entry *p; - - sort_iommu_table(__iommu_table, __iommu_table_end); - check_iommu_entries(__iommu_table, __iommu_table_end); - - for (p = __iommu_table; p < __iommu_table_end; p++) { - if (p && p->detect && p->detect() > 0) { - p->flags |= IOMMU_DETECTED; - if (p->early_init) - p->early_init(); - if (p->flags & IOMMU_FINISH_IF_DETECTED) - break; - } + if (xen_pv_domain()) { + pci_xen_swiotlb_init(); + return; } + pci_swiotlb_detect(); + gart_iommu_hole_init(); + amd_iommu_detect(); + detect_intel_iommu(); + if (x86_swiotlb_enable) + swiotlb_init(0); } /* @@ -102,7 +164,7 @@ static __init int iommu_setup(char *p) } #ifdef CONFIG_SWIOTLB if (!strncmp(p, "soft", 4)) - swiotlb = 1; + x86_swiotlb_enable = true; #endif if (!strncmp(p, "pt", 2)) iommu_set_default_passthrough(true); @@ -121,14 +183,17 @@ early_param("iommu", iommu_setup); static int __init pci_iommu_init(void) { - struct iommu_table_entry *p; - x86_init.iommu.iommu_init(); - for (p = __iommu_table; p < __iommu_table_end; p++) { - if (p && (p->flags & IOMMU_DETECTED) && p->late_init) - p->late_init(); +#ifdef CONFIG_SWIOTLB + /* An IOMMU turned us off. */ + if (x86_swiotlb_enable) { + pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); + swiotlb_print_info(); + } else { + swiotlb_exit(); } +#endif return 0; } diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c deleted file mode 100644 index 42e92ec62973..000000000000 --- a/arch/x86/kernel/pci-iommu_table.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include - -static struct iommu_table_entry * __init -find_dependents_of(struct iommu_table_entry *start, - struct iommu_table_entry *finish, - struct iommu_table_entry *q) -{ - struct iommu_table_entry *p; - - if (!q) - return NULL; - - for (p = start; p < finish; p++) - if (p->detect == q->depend) - return p; - - return NULL; -} - - -void __init sort_iommu_table(struct iommu_table_entry *start, - struct iommu_table_entry *finish) { - - struct iommu_table_entry *p, *q, tmp; - - for (p = start; p < finish; p++) { -again: - q = find_dependents_of(start, finish, p); - /* We are bit sneaky here. We use the memory address to figure - * out if the node we depend on is past our point, if so, swap. - */ - if (q > p) { - tmp = *p; - memmove(p, q, sizeof(*p)); - *q = tmp; - goto again; - } - } - -} - -#ifdef DEBUG -void __init check_iommu_entries(struct iommu_table_entry *start, - struct iommu_table_entry *finish) -{ - struct iommu_table_entry *p, *q, *x; - - /* Simple cyclic dependency checker. */ - for (p = start; p < finish; p++) { - q = find_dependents_of(start, finish, p); - x = find_dependents_of(start, finish, q); - if (p == x) { - printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", - p->detect, q->detect); - /* Heavy handed way..*/ - x->depend = NULL; - } - } - - for (p = start; p < finish; p++) { - q = find_dependents_of(p, finish, p); - if (q && q > p) { - printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", - p->detect, q->detect); - } - } -} -#else -void __init check_iommu_entries(struct iommu_table_entry *start, - struct iommu_table_entry *finish) -{ -} -#endif diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c deleted file mode 100644 index 814ab46a0dad..000000000000 --- a/arch/x86/kernel/pci-swiotlb.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -int swiotlb __read_mostly; - -/* - * pci_swiotlb_detect_override - set swiotlb to 1 if necessary - * - * This returns non-zero if we are forced to use swiotlb (by the boot - * option). - */ -int __init pci_swiotlb_detect_override(void) -{ - if (swiotlb_force == SWIOTLB_FORCE) - swiotlb = 1; - - return swiotlb; -} -IOMMU_INIT_FINISH(pci_swiotlb_detect_override, - pci_xen_swiotlb_detect, - pci_swiotlb_init, - pci_swiotlb_late_init); - -/* - * If 4GB or more detected (and iommu=off not set) or if SME is active - * then set swiotlb to 1 and return 1. - */ -int __init pci_swiotlb_detect_4gb(void) -{ - /* don't initialize swiotlb if iommu=off (no_iommu=1) */ - if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN) - swiotlb = 1; - - /* - * Set swiotlb to 1 so that bounce buffers are allocated and used for - * devices that can't support DMA to encrypted memory. - */ - if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) - swiotlb = 1; - - return swiotlb; -} -IOMMU_INIT(pci_swiotlb_detect_4gb, - pci_swiotlb_detect_override, - pci_swiotlb_init, - pci_swiotlb_late_init); - -void __init pci_swiotlb_init(void) -{ - if (swiotlb) - swiotlb_init(0); -} - -void __init pci_swiotlb_late_init(void) -{ - /* An IOMMU turned us off. */ - if (!swiotlb) - swiotlb_exit(); - else { - printk(KERN_INFO "PCI-DMA: " - "Using software bounce buffering for IO (SWIOTLB)\n"); - swiotlb_print_info(); - } -} diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index f9af561c3cd4..0c1154a1c403 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 7fda7f27e762..f5f6dc2e8007 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -315,18 +315,6 @@ SECTIONS *(.altinstr_replacement) } - /* - * struct iommu_table_entry entries are injected in this section. - * It is an array of IOMMUs which during run time gets sorted depending - * on its dependency order. After rootfs_initcall is complete - * this section can be safely removed. - */ - .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { - __iommu_table = .; - *(.iommu_table) - __iommu_table_end = .; - } - . = ALIGN(8); .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { __apicdrivers = .; diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 4953260e281c..3c5b52fbe4a7 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -47,6 +47,4 @@ obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o obj-$(CONFIG_XEN_PV_DOM0) += vga.o -obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o - obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c deleted file mode 100644 index 46df59aeaa06..000000000000 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* Glue code to lib/swiotlb-xen.c */ - -#include -#include -#include - -#include -#include -#include - - -#include -#ifdef CONFIG_X86_64 -#include -#include -#endif -#include - -static int xen_swiotlb __read_mostly; - -/* - * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary - * - * This returns non-zero if we are forced to use xen_swiotlb (by the boot - * option). - */ -int __init pci_xen_swiotlb_detect(void) -{ - - if (!xen_pv_domain()) - return 0; - - /* If running as PV guest, either iommu=soft, or swiotlb=force will - * activate this IOMMU. If running as PV privileged, activate it - * irregardless. - */ - if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE) - xen_swiotlb = 1; - - /* If we are running under Xen, we MUST disable the native SWIOTLB. - * Don't worry about swiotlb_force flag activating the native, as - * the 'swiotlb' flag is the only one turning it on. */ - swiotlb = 0; - -#ifdef CONFIG_X86_64 - /* pci_swiotlb_detect_4gb turns on native SWIOTLB if no_iommu == 0 - * (so no iommu=X command line over-writes). - * Considering that PV guests do not want the *native SWIOTLB* but - * only Xen SWIOTLB it is not useful to us so set no_iommu=1 here. - */ - if (max_pfn > MAX_DMA32_PFN) - no_iommu = 1; -#endif - return xen_swiotlb; -} - -static void __init pci_xen_swiotlb_init(void) -{ - if (xen_swiotlb) { - xen_swiotlb_init_early(); - dma_ops = &xen_swiotlb_dma_ops; - -#ifdef CONFIG_PCI - /* Make sure ACS will be enabled */ - pci_request_acs(); -#endif - } -} - -int pci_xen_swiotlb_init_late(void) -{ - int rc; - - if (xen_swiotlb) - return 0; - - rc = xen_swiotlb_init(); - if (rc) - return rc; - - dma_ops = &xen_swiotlb_dma_ops; -#ifdef CONFIG_PCI - /* Make sure ACS will be enabled */ - pci_request_acs(); -#endif - - return 0; -} -EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late); - -IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, - NULL, - pci_xen_swiotlb_init, - NULL); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index b4a798c7b347..1a3ad58ba846 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -3257,11 +3256,6 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); __setup("ivrs_acpihid", parse_ivrs_acpihid); -IOMMU_INIT_FINISH(amd_iommu_detect, - gart_iommu_hole_init, - NULL, - NULL); - bool amd_iommu_v2_supported(void) { return amd_iommu_v2_present; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a1ada7bff44e..b47220ac09ea 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1840,7 +1840,10 @@ void amd_iommu_domain_update(struct protection_domain *domain) static void __init amd_iommu_init_dma_ops(void) { - swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; + if (iommu_default_passthrough() || sme_me_mask) + x86_swiotlb_enable = true; + else + x86_swiotlb_enable = false; } int __init amd_iommu_init_api(void) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 4de960834a1b..592c1e1a5d4b 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "../irq_remapping.h" @@ -912,7 +911,7 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) return 0; } -int __init detect_intel_iommu(void) +void __init detect_intel_iommu(void) { int ret; struct dmar_res_callback validate_drhd_cb = { @@ -945,8 +944,6 @@ int __init detect_intel_iommu(void) dmar_tbl = NULL; } up_write(&dmar_global_lock); - - return ret ? ret : 1; } static void unmap_iommu(struct intel_iommu *iommu) @@ -2164,7 +2161,6 @@ static int __init dmar_free_unused_resources(void) } late_initcall(dmar_free_unused_resources); -IOMMU_INIT_POST(detect_intel_iommu); /* * DMAR Hotplug Support diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 45e903d84733..cbd714a198a0 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -121,7 +121,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, struct dmar_dev_scope *devices, int count); /* Intel IOMMU detection */ -extern int detect_intel_iommu(void); +void detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); extern int dmar_device_add(acpi_handle handle); extern int dmar_device_remove(acpi_handle handle); @@ -197,6 +197,10 @@ static inline bool dmar_platform_optin(void) return false; } +static inline void detect_intel_iommu(void) +{ +} + #endif /* CONFIG_DMAR_TABLE */ struct irte { -- cgit v1.2.3 From c6af2aa9ffc9763826607bc2664ef3ea4475ed18 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 29 Mar 2022 17:27:33 +0200 Subject: swiotlb: make the swiotlb_init interface more useful Pass a boolean flag to indicate if swiotlb needs to be enabled based on the addressing needs, and replace the verbose argument with a set of flags, including one to force enable bounce buffering. Note that this patch removes the possibility to force xen-swiotlb use with the swiotlb=force parameter on the command line on x86 (arm and arm64 never supported that), but this interface will be restored shortly. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/arm/mm/init.c | 6 +----- arch/arm64/mm/init.c | 6 +----- arch/ia64/mm/init.c | 4 +--- arch/mips/cavium-octeon/dma-octeon.c | 2 +- arch/mips/loongson64/dma.c | 2 +- arch/mips/sibyte/common/dma.c | 2 +- arch/powerpc/mm/mem.c | 3 ++- arch/powerpc/platforms/pseries/setup.c | 3 --- arch/riscv/mm/init.c | 8 +------- arch/s390/mm/init.c | 3 +-- arch/x86/kernel/pci-dma.c | 15 +++++++-------- drivers/xen/swiotlb-xen.c | 4 ++-- include/linux/swiotlb.h | 15 +++++++-------- include/trace/events/swiotlb.h | 29 ++++++++++------------------ kernel/dma/swiotlb.c | 35 ++++++++++++++++++---------------- 15 files changed, 55 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index fe249ea91908..ce64bdb55a16 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -271,11 +271,7 @@ static void __init free_highpages(void) void __init mem_init(void) { #ifdef CONFIG_ARM_LPAE - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > arm_dma_pfn_limit) - swiotlb_init(1); - else - swiotlb_force = SWIOTLB_NO_FORCE; + swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE); #endif set_max_mapnr(pfn_to_page(max_pfn) - mem_map); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e7b1550e2fc..bd4095b7fb40 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -398,11 +398,7 @@ void __init bootmem_init(void) */ void __init mem_init(void) { - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > PFN_DOWN(arm64_dma_phys_limit)) - swiotlb_init(1); - else if (!xen_swiotlb_detect()) - swiotlb_force = SWIOTLB_NO_FORCE; + swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE); /* this will put all unused low memory onto the freelists */ memblock_free_all(); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 5d165607bf35..3c3e15b22608 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -437,9 +437,7 @@ mem_init (void) if (iommu_detected) break; #endif -#ifdef CONFIG_SWIOTLB - swiotlb_init(1); -#endif + swiotlb_init(true, SWIOTLB_VERBOSE); } while (0); #ifdef CONFIG_FLATMEM diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index fb7547e21726..9fbba6a8fa4c 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -235,5 +235,5 @@ void __init plat_swiotlb_setup(void) #endif swiotlb_adjust_size(swiotlbsize); - swiotlb_init(1); + swiotlb_init(true, SWIOTLB_VERBOSE); } diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c index 364f2f27c872..8220a1bc0db6 100644 --- a/arch/mips/loongson64/dma.c +++ b/arch/mips/loongson64/dma.c @@ -24,5 +24,5 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) void __init plat_swiotlb_setup(void) { - swiotlb_init(1); + swiotlb_init(true, SWIOTLB_VERBOSE); } diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c index eb47a94f3583..c5c2c782aff6 100644 --- a/arch/mips/sibyte/common/dma.c +++ b/arch/mips/sibyte/common/dma.c @@ -10,5 +10,5 @@ void __init plat_swiotlb_setup(void) { - swiotlb_init(1); + swiotlb_init(true, SWIOTLB_VERBOSE); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4d221d033804..74ca516c3e7e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -251,7 +252,7 @@ void __init mem_init(void) if (is_secure_guest()) svm_swiotlb_init(); else - swiotlb_init(0); + swiotlb_init(ppc_swiotlb_enable, 0); #endif high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 955ff8aa1644..0f74b2284773 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -849,9 +849,6 @@ static void __init pSeries_setup_arch(void) } ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare; - - if (swiotlb_force == SWIOTLB_FORCE) - ppc_swiotlb_enable = 1; } static void pseries_panic(char *str) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 9535bea8688c..181ffd322eaf 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -120,13 +120,7 @@ void __init mem_init(void) BUG_ON(!mem_map); #endif /* CONFIG_FLATMEM */ -#ifdef CONFIG_SWIOTLB - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > PFN_DOWN(dma32_phys_limit)) - swiotlb_init(1); - else - swiotlb_force = SWIOTLB_NO_FORCE; -#endif + swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE); memblock_free_all(); print_vm_layout(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 86ffd0d51fd5..6fb6bf64326f 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -185,8 +185,7 @@ static void pv_init(void) return; /* make sure bounce buffers are shared */ - swiotlb_force = SWIOTLB_FORCE; - swiotlb_init(1); + swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE); swiotlb_update_mem_attributes(); } diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 04140e20ef1a..a705a199bf8a 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -39,6 +39,7 @@ int iommu_detected __read_mostly = 0; #ifdef CONFIG_SWIOTLB bool x86_swiotlb_enable; +static unsigned int x86_swiotlb_flags; static void __init pci_swiotlb_detect(void) { @@ -58,16 +59,16 @@ static void __init pci_swiotlb_detect(void) * bounce buffers as the hypervisor can't access arbitrary VM memory * that is not explicitly shared with it. */ - if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) - swiotlb_force = SWIOTLB_FORCE; - - if (swiotlb_force == SWIOTLB_FORCE) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { x86_swiotlb_enable = true; + x86_swiotlb_flags |= SWIOTLB_FORCE; + } } #else static inline void __init pci_swiotlb_detect(void) { } +#define x86_swiotlb_flags 0 #endif /* CONFIG_SWIOTLB */ #ifdef CONFIG_SWIOTLB_XEN @@ -75,8 +76,7 @@ static bool xen_swiotlb; static void __init pci_xen_swiotlb_init(void) { - if (!xen_initial_domain() && !x86_swiotlb_enable && - swiotlb_force != SWIOTLB_FORCE) + if (!xen_initial_domain() && !x86_swiotlb_enable) return; x86_swiotlb_enable = true; xen_swiotlb = true; @@ -120,8 +120,7 @@ void __init pci_iommu_alloc(void) gart_iommu_hole_init(); amd_iommu_detect(); detect_intel_iommu(); - if (x86_swiotlb_enable) - swiotlb_init(0); + swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags); } /* diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 485cd06ed39e..c2da3eb4826e 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -251,7 +251,7 @@ retry: panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); } - if (swiotlb_init_with_tbl(start, nslabs, true)) + if (swiotlb_init_with_tbl(start, nslabs, SWIOTLB_VERBOSE)) panic("Cannot allocate SWIOTLB buffer"); } #endif /* CONFIG_X86 */ @@ -376,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, /* * Oh well, have to allocate and map a bounce buffer. */ - trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); + trace_swiotlb_bounced(dev, dev_addr, size); map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs); if (map == (phys_addr_t)DMA_MAPPING_ERROR) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b48b26bfa0ed..ae0407173e84 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -13,11 +13,8 @@ struct device; struct page; struct scatterlist; -enum swiotlb_force { - SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ - SWIOTLB_FORCE, /* swiotlb=force */ - SWIOTLB_NO_FORCE, /* swiotlb=noforce */ -}; +#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ +#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ /* * Maximum allowable number of contiguous slabs to map, @@ -36,8 +33,7 @@ enum swiotlb_force { /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) -extern void swiotlb_init(int verbose); -int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); +int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); int swiotlb_init_late(size_t size); @@ -126,13 +122,16 @@ static inline bool is_swiotlb_force_bounce(struct device *dev) return mem && mem->force_bounce; } +void swiotlb_init(bool addressing_limited, unsigned int flags); void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); #else -#define swiotlb_force SWIOTLB_NO_FORCE +static inline void swiotlb_init(bool addressing_limited, unsigned int flags) +{ +} static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { return false; diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h index 705be43b71ab..da05c9ebd224 100644 --- a/include/trace/events/swiotlb.h +++ b/include/trace/events/swiotlb.h @@ -8,20 +8,15 @@ #include TRACE_EVENT(swiotlb_bounced, - - TP_PROTO(struct device *dev, - dma_addr_t dev_addr, - size_t size, - enum swiotlb_force swiotlb_force), - - TP_ARGS(dev, dev_addr, size, swiotlb_force), + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size), TP_STRUCT__entry( - __string( dev_name, dev_name(dev) ) - __field( u64, dma_mask ) - __field( dma_addr_t, dev_addr ) - __field( size_t, size ) - __field( enum swiotlb_force, swiotlb_force ) + __string(dev_name, dev_name(dev)) + __field(u64, dma_mask) + __field(dma_addr_t, dev_addr) + __field(size_t, size) + __field(bool, force) ), TP_fast_assign( @@ -29,19 +24,15 @@ TRACE_EVENT(swiotlb_bounced, __entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0); __entry->dev_addr = dev_addr; __entry->size = size; - __entry->swiotlb_force = swiotlb_force; + __entry->force = is_swiotlb_force_bounce(dev); ), - TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx " - "size=%zu %s", + TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx size=%zu %s", __get_str(dev_name), __entry->dma_mask, (unsigned long long)__entry->dev_addr, __entry->size, - __print_symbolic(__entry->swiotlb_force, - { SWIOTLB_NORMAL, "NORMAL" }, - { SWIOTLB_FORCE, "FORCE" }, - { SWIOTLB_NO_FORCE, "NO_FORCE" })) + __entry->force ? "FORCE" : "NORMAL") ); #endif /* _TRACE_SWIOTLB_H */ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 9a4fe6e48a07..86e877a96b82 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -62,7 +62,8 @@ #define INVALID_PHYS_ADDR (~(phys_addr_t)0) -enum swiotlb_force swiotlb_force; +static bool swiotlb_force_bounce; +static bool swiotlb_force_disable; struct io_tlb_mem io_tlb_default_mem; @@ -81,9 +82,9 @@ setup_io_tlb_npages(char *str) if (*str == ',') ++str; if (!strcmp(str, "force")) - swiotlb_force = SWIOTLB_FORCE; + swiotlb_force_bounce = true; else if (!strcmp(str, "noforce")) - swiotlb_force = SWIOTLB_NO_FORCE; + swiotlb_force_disable = true; return 0; } @@ -202,7 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->index = 0; mem->late_alloc = late_alloc; - if (swiotlb_force == SWIOTLB_FORCE) + if (swiotlb_force_bounce) mem->force_bounce = true; spin_lock_init(&mem->lock); @@ -224,12 +225,13 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, return; } -int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, + unsigned int flags) { struct io_tlb_mem *mem = &io_tlb_default_mem; size_t alloc_size; - if (swiotlb_force == SWIOTLB_NO_FORCE) + if (swiotlb_force_disable) return 0; /* protect against double initialization */ @@ -243,8 +245,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) __func__, alloc_size, PAGE_SIZE); swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); + mem->force_bounce = flags & SWIOTLB_FORCE; - if (verbose) + if (flags & SWIOTLB_VERBOSE) swiotlb_print_info(); return 0; } @@ -253,20 +256,21 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init -swiotlb_init(int verbose) +void __init swiotlb_init(bool addressing_limit, unsigned int flags) { size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); void *tlb; - if (swiotlb_force == SWIOTLB_NO_FORCE) + if (!addressing_limit && !swiotlb_force_bounce) + return; + if (swiotlb_force_disable) return; /* Get IO TLB memory from the low pages */ tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) goto fail; - if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) + if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) goto fail_free_mem; return; @@ -289,7 +293,7 @@ int swiotlb_init_late(size_t size) unsigned int order; int rc = 0; - if (swiotlb_force == SWIOTLB_NO_FORCE) + if (swiotlb_force_disable) return 0; /* @@ -328,7 +332,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long bytes = nslabs << IO_TLB_SHIFT; - if (swiotlb_force == SWIOTLB_NO_FORCE) + if (swiotlb_force_disable) return 0; /* protect against double initialization */ @@ -353,7 +357,7 @@ void __init swiotlb_exit(void) unsigned long tbl_vaddr; size_t tbl_size, slots_size; - if (swiotlb_force == SWIOTLB_FORCE) + if (swiotlb_force_bounce) return; if (!mem->nslabs) @@ -704,8 +708,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, phys_addr_t swiotlb_addr; dma_addr_t dma_addr; - trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, - swiotlb_force); + trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, attrs); -- cgit v1.2.3 From 8ba2ed1be90fc210126f68186564707478552c95 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 28 Feb 2022 13:36:57 +0200 Subject: swiotlb: add a SWIOTLB_ANY flag to lift the low memory restriction Power SVM wants to allocate a swiotlb buffer that is not restricted to low memory for the trusted hypervisor scheme. Consolidate the support for this into the swiotlb_init interface by adding a new flag. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/powerpc/include/asm/svm.h | 4 ---- arch/powerpc/include/asm/swiotlb.h | 1 + arch/powerpc/kernel/dma-swiotlb.c | 1 + arch/powerpc/mm/mem.c | 5 +---- arch/powerpc/platforms/pseries/svm.c | 26 +------------------------- include/linux/swiotlb.h | 1 + kernel/dma/swiotlb.c | 11 +++++++++-- 7 files changed, 14 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h index 7546402d796a..85580b30aba4 100644 --- a/arch/powerpc/include/asm/svm.h +++ b/arch/powerpc/include/asm/svm.h @@ -15,8 +15,6 @@ static inline bool is_secure_guest(void) return mfmsr() & MSR_S; } -void __init svm_swiotlb_init(void); - void dtl_cache_ctor(void *addr); #define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL) @@ -27,8 +25,6 @@ static inline bool is_secure_guest(void) return false; } -static inline void svm_swiotlb_init(void) {} - #define get_dtl_cache_ctor() NULL #endif /* CONFIG_PPC_SVM */ diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index 3c1a1cd16128..4203b5e0a88e 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -9,6 +9,7 @@ #include extern unsigned int ppc_swiotlb_enable; +extern unsigned int ppc_swiotlb_flags; #ifdef CONFIG_SWIOTLB void swiotlb_detect_4g(void); diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index fc7816126a40..ba256c37bcc0 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -10,6 +10,7 @@ #include unsigned int ppc_swiotlb_enable; +unsigned int ppc_swiotlb_flags; void __init swiotlb_detect_4g(void) { diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 74ca516c3e7e..46fb78e3bb36 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -249,10 +249,7 @@ void __init mem_init(void) * back to to-down. */ memblock_set_bottom_up(true); - if (is_secure_guest()) - svm_swiotlb_init(); - else - swiotlb_init(ppc_swiotlb_enable, 0); + swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); #endif high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index c5228f4969eb..3b4045d508ec 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -28,7 +28,7 @@ static int __init init_svm(void) * need to use the SWIOTLB buffer for DMA even if dma_capable() says * otherwise. */ - swiotlb_force = SWIOTLB_FORCE; + ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE; /* Share the SWIOTLB buffer with the host. */ swiotlb_update_mem_attributes(); @@ -37,30 +37,6 @@ static int __init init_svm(void) } machine_early_initcall(pseries, init_svm); -/* - * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it - * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have - * any addressing limitation, we don't need to allocate it in low addresses. - */ -void __init svm_swiotlb_init(void) -{ - unsigned char *vstart; - unsigned long bytes, io_tlb_nslabs; - - io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - - bytes = io_tlb_nslabs << IO_TLB_SHIFT; - - vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); - if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) - return; - - - memblock_free(vstart, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - panic("SVM: Cannot allocate SWIOTLB buffer"); -} - int set_memory_encrypted(unsigned long addr, int numpages) { if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ae0407173e84..eabdd8998702 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -15,6 +15,7 @@ struct scatterlist; #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ +#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ /* * Maximum allowable number of contiguous slabs to map, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 86e877a96b82..f6e091424af3 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -266,8 +266,15 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags) if (swiotlb_force_disable) return; - /* Get IO TLB memory from the low pages */ - tlb = memblock_alloc_low(bytes, PAGE_SIZE); + /* + * By default allocate the bounce buffer memory from low memory, but + * allow to pick a location everywhere for hypervisors with guest + * memory encryption. + */ + if (flags & SWIOTLB_ANY) + tlb = memblock_alloc(bytes, PAGE_SIZE); + else + tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) goto fail; if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) -- cgit v1.2.3 From 742519538e6b07250c8085bbff4bd358bc03bf16 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Feb 2022 11:12:59 +0100 Subject: swiotlb: pass a gfp_mask argument to swiotlb_init_late Let the caller chose a zone to allocate from. This will be used later on by the xen-swiotlb initialization on arm. Signed-off-by: Christoph Hellwig Reviewed-by: Anshuman Khandual Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/x86/pci/sta2x11-fixup.c | 2 +- include/linux/swiotlb.h | 2 +- kernel/dma/swiotlb.c | 7 ++----- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index e0c039a75b2d..c7e6faf59a86 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev) int size = STA2X11_SWIOTLB_SIZE; /* First instance: register your own swiotlb area */ dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size); - if (swiotlb_init_late(size)) + if (swiotlb_init_late(size, GFP_DMA)) dev_emerg(&pdev->dev, "init swiotlb failed\n"); } list_add(&instance->list, &sta2x11_instance_list); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index eabdd8998702..ee655f2e4d28 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -37,7 +37,7 @@ struct scatterlist; int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -int swiotlb_init_late(size_t size); +int swiotlb_init_late(size_t size, gfp_t gfp_mask); extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index f6e091424af3..119187afc65e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -292,7 +292,7 @@ fail: * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ -int swiotlb_init_late(size_t size) +int swiotlb_init_late(size_t size, gfp_t gfp_mask) { unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned long bytes; @@ -303,15 +303,12 @@ int swiotlb_init_late(size_t size) if (swiotlb_force_disable) return 0; - /* - * Get IO TLB memory from the low pages - */ order = get_order(nslabs << IO_TLB_SHIFT); nslabs = SLABS_PER_PAGE << order; bytes = nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, order); if (vstart) break; -- cgit v1.2.3 From 7374153d294eb51de5a81ac38ff1c4fef8927bec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Mar 2022 08:02:57 +0100 Subject: swiotlb: provide swiotlb_init variants that remap the buffer To shared more code between swiotlb and xen-swiotlb, offer a swiotlb_init_remap interface and add a remap callback to swiotlb_init_late that will allow Xen to remap the buffer without duplicating much of the logic. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/x86/pci/sta2x11-fixup.c | 2 +- include/linux/swiotlb.h | 5 ++++- kernel/dma/swiotlb.c | 36 +++++++++++++++++++++++++++++++++--- 3 files changed, 38 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index c7e6faf59a86..7368afc03998 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev) int size = STA2X11_SWIOTLB_SIZE; /* First instance: register your own swiotlb area */ dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size); - if (swiotlb_init_late(size, GFP_DMA)) + if (swiotlb_init_late(size, GFP_DMA, NULL)) dev_emerg(&pdev->dev, "init swiotlb failed\n"); } list_add(&instance->list, &sta2x11_instance_list); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ee655f2e4d28..7b50c82f84ce 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -36,8 +36,11 @@ struct scatterlist; int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); unsigned long swiotlb_size_or_default(void); +void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)); +int swiotlb_init_late(size_t size, gfp_t gfp_mask, + int (*remap)(void *tlb, unsigned long nslabs)); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -int swiotlb_init_late(size_t size, gfp_t gfp_mask); extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 119187afc65e..f6acfc7a41bf 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init swiotlb_init(bool addressing_limit, unsigned int flags) +void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)) { - size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); + unsigned long nslabs = default_nslabs; + size_t bytes; void *tlb; if (!addressing_limit && !swiotlb_force_bounce) @@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags) * allow to pick a location everywhere for hypervisors with guest * memory encryption. */ +retry: + bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); if (flags & SWIOTLB_ANY) tlb = memblock_alloc(bytes, PAGE_SIZE); else tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) goto fail; + if (remap && remap(tlb, nslabs) < 0) { + memblock_free(tlb, PAGE_ALIGN(bytes)); + + nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); + if (nslabs < IO_TLB_MIN_SLABS) + panic("%s: Failed to remap %zu bytes\n", + __func__, bytes); + goto retry; + } if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) goto fail_free_mem; return; @@ -287,12 +300,18 @@ fail: pr_warn("Cannot allocate buffer"); } +void __init swiotlb_init(bool addressing_limit, unsigned int flags) +{ + return swiotlb_init_remap(addressing_limit, flags, NULL); +} + /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ -int swiotlb_init_late(size_t size, gfp_t gfp_mask) +int swiotlb_init_late(size_t size, gfp_t gfp_mask, + int (*remap)(void *tlb, unsigned long nslabs)) { unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned long bytes; @@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask) if (swiotlb_force_disable) return 0; +retry: order = get_order(nslabs << IO_TLB_SHIFT); nslabs = SLABS_PER_PAGE << order; bytes = nslabs << IO_TLB_SHIFT; @@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask) (PAGE_SIZE << order) >> 20); nslabs = SLABS_PER_PAGE << order; } + if (remap) + rc = remap(vstart, nslabs); + if (rc) { + free_pages((unsigned long)vstart, order); + + nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); + if (nslabs < IO_TLB_MIN_SLABS) + return rc; + goto retry; + } rc = swiotlb_late_init_with_tbl(vstart, nslabs); if (rc) free_pages((unsigned long)vstart, order); -- cgit v1.2.3 From 3f70356edf5611c28a68d8d5a9c2b442c9eb81e6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Mar 2022 07:58:45 +0100 Subject: swiotlb: merge swiotlb-xen initialization into swiotlb Reuse the generic swiotlb initialization for xen-swiotlb. For ARM/ARM64 this works trivially, while for x86 xen_swiotlb_fixup needs to be passed as the remap argument to swiotlb_init_remap/swiotlb_init_late. Note that the lower bound of the swiotlb size is changed to the smaller IO_TLB_MIN_SLABS based value with this patch, but that is fine as the 2MB value used in Xen before was just an optimization and is not the hard lower bound. Signed-off-by: Christoph Hellwig Reviewed-by: Stefano Stabellini Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- arch/arm/xen/mm.c | 21 +++---- arch/x86/include/asm/xen/page.h | 5 -- arch/x86/kernel/pci-dma.c | 20 +++---- drivers/xen/swiotlb-xen.c | 128 +--------------------------------------- include/xen/arm/page.h | 1 - include/xen/swiotlb-xen.h | 8 ++- 6 files changed, 28 insertions(+), 155 deletions(-) (limited to 'include') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 28c207060253..ff05a7899cb8 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -23,22 +23,20 @@ #include #include -unsigned long xen_get_swiotlb_free_pages(unsigned int order) +static gfp_t xen_swiotlb_gfp(void) { phys_addr_t base; - gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; u64 i; for_each_mem_range(i, &base, NULL) { if (base < (phys_addr_t)0xffffffff) { if (IS_ENABLED(CONFIG_ZONE_DMA32)) - flags |= __GFP_DMA32; - else - flags |= __GFP_DMA; - break; + return __GFP_DMA32; + return __GFP_DMA; } } - return __get_free_pages(flags, order); + + return GFP_KERNEL; } static bool hypercall_cflush = false; @@ -140,10 +138,13 @@ static int __init xen_mm_init(void) if (!xen_swiotlb_detect()) return 0; - rc = xen_swiotlb_init(); /* we can work with the default swiotlb */ - if (rc < 0 && rc != -EEXIST) - return rc; + if (!io_tlb_default_mem.nslabs) { + rc = swiotlb_init_late(swiotlb_size_or_default(), + xen_swiotlb_gfp(), NULL); + if (rc < 0) + return rc; + } cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index e989bc2269f5..1fc67df50014 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev, return false; } -static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) -{ - return __get_free_pages(__GFP_NOWARN, order); -} - #endif /* _ASM_X86_XEN_PAGE_H */ diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index a705a199bf8a..30bbe4abb5d6 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -72,15 +72,13 @@ static inline void __init pci_swiotlb_detect(void) #endif /* CONFIG_SWIOTLB */ #ifdef CONFIG_SWIOTLB_XEN -static bool xen_swiotlb; - static void __init pci_xen_swiotlb_init(void) { if (!xen_initial_domain() && !x86_swiotlb_enable) return; x86_swiotlb_enable = true; - xen_swiotlb = true; - xen_swiotlb_init_early(); + x86_swiotlb_flags |= SWIOTLB_ANY; + swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup); dma_ops = &xen_swiotlb_dma_ops; if (IS_ENABLED(CONFIG_PCI)) pci_request_acs(); @@ -88,14 +86,16 @@ static void __init pci_xen_swiotlb_init(void) int pci_xen_swiotlb_init_late(void) { - int rc; - - if (xen_swiotlb) + if (dma_ops == &xen_swiotlb_dma_ops) return 0; - rc = xen_swiotlb_init(); - if (rc) - return rc; + /* we can work with the default swiotlb */ + if (!io_tlb_default_mem.nslabs) { + int rc = swiotlb_init_late(swiotlb_size_or_default(), + GFP_KERNEL, xen_swiotlb_fixup); + if (rc < 0) + return rc; + } /* XXX: this switches the dma ops under live devices! */ dma_ops = &xen_swiotlb_dma_ops; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index c2da3eb4826e..df8085b50df1 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) return 0; } -static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) +int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int rc; unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); @@ -130,132 +130,6 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) return 0; } -enum xen_swiotlb_err { - XEN_SWIOTLB_UNKNOWN = 0, - XEN_SWIOTLB_ENOMEM, - XEN_SWIOTLB_EFIXUP -}; - -static const char *xen_swiotlb_error(enum xen_swiotlb_err err) -{ - switch (err) { - case XEN_SWIOTLB_ENOMEM: - return "Cannot allocate Xen-SWIOTLB buffer\n"; - case XEN_SWIOTLB_EFIXUP: - return "Failed to get contiguous memory for DMA from Xen!\n"\ - "You either: don't have the permissions, do not have"\ - " enough free memory under 4GB, or the hypervisor memory"\ - " is too fragmented!"; - default: - break; - } - return ""; -} - -int xen_swiotlb_init(void) -{ - enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; - unsigned long bytes = swiotlb_size_or_default(); - unsigned long nslabs = bytes >> IO_TLB_SHIFT; - unsigned int order, repeat = 3; - int rc = -ENOMEM; - char *start; - - if (io_tlb_default_mem.nslabs) { - pr_warn("swiotlb buffer already initialized\n"); - return -EEXIST; - } - -retry: - m_ret = XEN_SWIOTLB_ENOMEM; - order = get_order(bytes); - - /* - * Get IO TLB memory from any location. - */ -#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) -#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) - while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - start = (void *)xen_get_swiotlb_free_pages(order); - if (start) - break; - order--; - } - if (!start) - goto exit; - if (order != get_order(bytes)) { - pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", - (PAGE_SIZE << order) >> 20); - nslabs = SLABS_PER_PAGE << order; - bytes = nslabs << IO_TLB_SHIFT; - } - - /* - * And replace that memory with pages under 4GB. - */ - rc = xen_swiotlb_fixup(start, nslabs); - if (rc) { - free_pages((unsigned long)start, order); - m_ret = XEN_SWIOTLB_EFIXUP; - goto error; - } - rc = swiotlb_late_init_with_tbl(start, nslabs); - if (rc) - return rc; - return 0; -error: - if (nslabs > 1024 && repeat--) { - /* Min is 2MB */ - nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); - bytes = nslabs << IO_TLB_SHIFT; - pr_info("Lowering to %luMB\n", bytes >> 20); - goto retry; - } -exit: - pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - return rc; -} - -#ifdef CONFIG_X86 -void __init xen_swiotlb_init_early(void) -{ - unsigned long bytes = swiotlb_size_or_default(); - unsigned long nslabs = bytes >> IO_TLB_SHIFT; - unsigned int repeat = 3; - char *start; - int rc; - -retry: - /* - * Get IO TLB memory from any location. - */ - start = memblock_alloc(PAGE_ALIGN(bytes), - IO_TLB_SEGSIZE << IO_TLB_SHIFT); - if (!start) - panic("%s: Failed to allocate %lu bytes\n", - __func__, PAGE_ALIGN(bytes)); - - /* - * And replace that memory with pages under 4GB. - */ - rc = xen_swiotlb_fixup(start, nslabs); - if (rc) { - memblock_free(start, PAGE_ALIGN(bytes)); - if (nslabs > 1024 && repeat--) { - /* Min is 2MB */ - nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); - bytes = nslabs << IO_TLB_SHIFT; - pr_info("Lowering to %luMB\n", bytes >> 20); - goto retry; - } - panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); - } - - if (swiotlb_init_with_tbl(start, nslabs, SWIOTLB_VERBOSE)) - panic("Cannot allocate SWIOTLB buffer"); -} -#endif /* CONFIG_X86 */ - static void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index ac1b65470563..7e199c6656b9 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h @@ -115,6 +115,5 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool xen_arch_need_swiotlb(struct device *dev, phys_addr_t phys, dma_addr_t dev_addr); -unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index b3e647f86e3e..590ceb923f0c 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -10,8 +10,12 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -int xen_swiotlb_init(void); -void __init xen_swiotlb_init_early(void); +#ifdef CONFIG_SWIOTLB_XEN +int xen_swiotlb_fixup(void *buf, unsigned long nslabs); +#else +#define xen_swiotlb_fixup NULL +#endif + extern const struct dma_map_ops xen_swiotlb_dma_ops; #endif /* __LINUX_SWIOTLB_XEN_H */ -- cgit v1.2.3 From 6424e31b1c050a25aea033206d5f626f3523448c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Mar 2022 07:41:04 +0100 Subject: swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl No users left. Signed-off-by: Christoph Hellwig Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Boris Ostrovsky --- include/linux/swiotlb.h | 2 -- kernel/dma/swiotlb.c | 77 +++++++++++++------------------------------------ 2 files changed, 20 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 7b50c82f84ce..7ed35dd3de6e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -34,13 +34,11 @@ struct scatterlist; /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) -int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); unsigned long swiotlb_size_or_default(void); void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, int (*remap)(void *tlb, unsigned long nslabs)); int swiotlb_init_late(size_t size, gfp_t gfp_mask, int (*remap)(void *tlb, unsigned long nslabs)); -extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index f6acfc7a41bf..e2ef0864eb1e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -225,33 +225,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, return; } -int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, - unsigned int flags) -{ - struct io_tlb_mem *mem = &io_tlb_default_mem; - size_t alloc_size; - - if (swiotlb_force_disable) - return 0; - - /* protect against double initialization */ - if (WARN_ON_ONCE(mem->nslabs)) - return -ENOMEM; - - alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); - mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem->slots) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); - - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - mem->force_bounce = flags & SWIOTLB_FORCE; - - if (flags & SWIOTLB_VERBOSE) - swiotlb_print_info(); - return 0; -} - /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. @@ -259,7 +232,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, int (*remap)(void *tlb, unsigned long nslabs)) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs = default_nslabs; + size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); size_t bytes; void *tlb; @@ -280,7 +255,8 @@ retry: else tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) - goto fail; + panic("%s: failed to allocate tlb structure\n", __func__); + if (remap && remap(tlb, nslabs) < 0) { memblock_free(tlb, PAGE_ALIGN(bytes)); @@ -290,14 +266,17 @@ retry: __func__, bytes); goto retry; } - if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) - goto fail_free_mem; - return; -fail_free_mem: - memblock_free(tlb, bytes); -fail: - pr_warn("Cannot allocate buffer"); + mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->slots) + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); + + swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false); + mem->force_bounce = flags & SWIOTLB_FORCE; + + if (flags & SWIOTLB_VERBOSE) + swiotlb_print_info(); } void __init swiotlb_init(bool addressing_limit, unsigned int flags) @@ -313,6 +292,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags) int swiotlb_init_late(size_t size, gfp_t gfp_mask, int (*remap)(void *tlb, unsigned long nslabs)) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned long bytes; unsigned char *vstart = NULL; @@ -353,33 +333,16 @@ retry: return rc; goto retry; } - rc = swiotlb_late_init_with_tbl(vstart, nslabs); - if (rc) - free_pages((unsigned long)vstart, order); - - return rc; -} - -int -swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) -{ - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long bytes = nslabs << IO_TLB_SHIFT; - - if (swiotlb_force_disable) - return 0; - - /* protect against double initialization */ - if (WARN_ON_ONCE(mem->nslabs)) - return -ENOMEM; mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(array_size(sizeof(*mem->slots), nslabs))); - if (!mem->slots) + if (!mem->slots) { + free_pages((unsigned long)vstart, order); return -ENOMEM; + } - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); + set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); swiotlb_print_info(); return 0; -- cgit v1.2.3 From 566fb90e050dfa2132340bbdab9533b727dda6f1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 22 Apr 2022 06:37:57 +0200 Subject: swiotlb-xen: fix DMA_ATTR_NO_KERNEL_MAPPING on arm swiotlb-xen uses very different ways to allocate coherent memory on x86 vs arm. On the former it allocates memory from the page allocator, while on the later it reuses the dma-direct allocator the handles the complexities of non-coherent DMA on arm platforms. Unfortunately the complexities of trying to deal with the two cases in the swiotlb-xen.c code lead to a bug in the handling of DMA_ATTR_NO_KERNEL_MAPPING on arm. With the DMA_ATTR_NO_KERNEL_MAPPING flag the coherent memory allocator does not actually allocate coherent memory, but just a DMA handle for some memory that is DMA addressable by the device, but which does not have to have a kernel mapping. Thus dereferencing the return value will lead to kernel crashed and memory corruption. Fix this by using the dma-direct allocator directly for arm, which works perfectly fine because on arm swiotlb-xen is only used when the domain is 1:1 mapped, and then simplifying the remaining code to only cater for the x86 case with DMA coherent device. Reported-by: Rahul Singh Signed-off-by: Christoph Hellwig Reviewed-by: Rahul Singh Reviewed-by: Stefano Stabellini Tested-by: Rahul Singh --- arch/arm/include/asm/xen/page-coherent.h | 2 - arch/arm/xen/mm.c | 14 ----- arch/arm64/include/asm/xen/page-coherent.h | 2 - arch/x86/include/asm/xen/page-coherent.h | 24 -------- arch/x86/include/asm/xen/swiotlb-xen.h | 6 ++ arch/x86/xen/mmu_pv.c | 1 + drivers/xen/swiotlb-xen.c | 99 ++++++++++-------------------- include/xen/arm/page-coherent.h | 20 ------ include/xen/swiotlb-xen.h | 6 -- include/xen/xen-ops.h | 7 --- 10 files changed, 41 insertions(+), 140 deletions(-) delete mode 100644 arch/arm/include/asm/xen/page-coherent.h delete mode 100644 arch/arm64/include/asm/xen/page-coherent.h delete mode 100644 arch/x86/include/asm/xen/page-coherent.h delete mode 100644 include/xen/arm/page-coherent.h (limited to 'include') diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h deleted file mode 100644 index 27e984977402..000000000000 --- a/arch/arm/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index ff05a7899cb8..3d826c0b5fee 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -116,20 +116,6 @@ bool xen_arch_need_swiotlb(struct device *dev, !dev_is_dma_coherent(dev)); } -int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, - unsigned int address_bits, - dma_addr_t *dma_handle) -{ - /* the domain is 1:1 mapped to use swiotlb-xen */ - *dma_handle = pstart; - return 0; -} - -void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) -{ - return; -} - static int __init xen_mm_init(void) { struct gnttab_cache_flush cflush; diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h deleted file mode 100644 index 27e984977402..000000000000 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h deleted file mode 100644 index 63cd41b2e17a..000000000000 --- a/arch/x86/include/asm/xen/page-coherent.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_XEN_PAGE_COHERENT_H -#define _ASM_X86_XEN_PAGE_COHERENT_H - -#include -#include - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - void *vstart = (void*)__get_free_pages(flags, get_order(size)); - *dma_handle = virt_to_phys(vstart); - return vstart; -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - unsigned long attrs) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */ diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h index e5a90b42e4dd..77a2d19cc990 100644 --- a/arch/x86/include/asm/xen/swiotlb-xen.h +++ b/arch/x86/include/asm/xen/swiotlb-xen.h @@ -8,4 +8,10 @@ extern int pci_xen_swiotlb_init_late(void); static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } #endif +int xen_swiotlb_fixup(void *buf, unsigned long nslabs); +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, + unsigned int address_bits, + dma_addr_t *dma_handle); +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); + #endif /* _ASM_X86_SWIOTLB_XEN_H */ diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 00354866921b..ee29fb558f2e 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -80,6 +80,7 @@ #include #include #include +#include #include "multicalls.h" #include "mmu.h" diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index df8085b50df1..67aa74d20162 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -36,7 +36,6 @@ #include #include -#include #include #define MAX_DMA_BITS 32 @@ -104,6 +103,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) return 0; } +#ifdef CONFIG_X86 int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int rc; @@ -131,94 +131,58 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs) } static void * -xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) +xen_swiotlb_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - void *ret; + u64 dma_mask = dev->coherent_dma_mask; int order = get_order(size); - u64 dma_mask = DMA_BIT_MASK(32); phys_addr_t phys; - dma_addr_t dev_addr; - - /* - * Ignore region specifiers - the kernel's ideas of - * pseudo-phys memory layout has nothing to do with the - * machine physical layout. We can't allocate highmem - * because we can't return a pointer to it. - */ - flags &= ~(__GFP_DMA | __GFP_HIGHMEM); + void *ret; - /* Convert the size to actually allocated. */ + /* Align the allocation to the Xen page size */ size = 1UL << (order + XEN_PAGE_SHIFT); - /* On ARM this function returns an ioremap'ped virtual address for - * which virt_to_phys doesn't return the corresponding physical - * address. In fact on ARM virt_to_phys only works for kernel direct - * mapped RAM memory. Also see comment below. - */ - ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); - + ret = (void *)__get_free_pages(flags, get_order(size)); if (!ret) return ret; - - if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; - - /* At this point dma_handle is the dma address, next we are - * going to set it to the machine address. - * Do not use virt_to_phys(ret) because on ARM it doesn't correspond - * to *dma_handle. */ - phys = dma_to_phys(hwdev, *dma_handle); - dev_addr = xen_phys_to_dma(hwdev, phys); - if (((dev_addr + size - 1 <= dma_mask)) && - !range_straddles_page_boundary(phys, size)) - *dma_handle = dev_addr; - else { - if (xen_create_contiguous_region(phys, order, - fls64(dma_mask), dma_handle) != 0) { - xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); - return NULL; - } - *dma_handle = phys_to_dma(hwdev, *dma_handle); + phys = virt_to_phys(ret); + + *dma_handle = xen_phys_to_dma(dev, phys); + if (*dma_handle + size - 1 > dma_mask || + range_straddles_page_boundary(phys, size)) { + if (xen_create_contiguous_region(phys, order, fls64(dma_mask), + dma_handle) != 0) + goto out_free_pages; SetPageXenRemapped(virt_to_page(ret)); } + memset(ret, 0, size); return ret; + +out_free_pages: + free_pages((unsigned long)ret, get_order(size)); + return NULL; } static void -xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dev_addr, unsigned long attrs) +xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { + phys_addr_t phys = virt_to_phys(vaddr); int order = get_order(size); - phys_addr_t phys; - u64 dma_mask = DMA_BIT_MASK(32); - struct page *page; - - if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; - - /* do not use virt_to_phys because on ARM it doesn't return you the - * physical address */ - phys = xen_dma_to_phys(hwdev, dev_addr); /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); - if (is_vmalloc_addr(vaddr)) - page = vmalloc_to_page(vaddr); - else - page = virt_to_page(vaddr); + if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || + WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) + return; - if (!WARN_ON((dev_addr + size - 1 > dma_mask) || - range_straddles_page_boundary(phys, size)) && - TestClearPageXenRemapped(page)) + if (TestClearPageXenRemapped(virt_to_page(vaddr))) xen_destroy_contiguous_region(phys, order); - - xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys), - attrs); + free_pages((unsigned long)vaddr, get_order(size)); } +#endif /* CONFIG_X86 */ /* * Map a single buffer of the indicated size for DMA in streaming mode. The @@ -421,8 +385,13 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) } const struct dma_map_ops xen_swiotlb_dma_ops = { +#ifdef CONFIG_X86 .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, +#else + .alloc = dma_direct_alloc, + .free = dma_direct_free, +#endif .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_device = xen_swiotlb_sync_single_for_device, .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h deleted file mode 100644 index b9cc11e887ed..000000000000 --- a/include/xen/arm/page-coherent.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XEN_ARM_PAGE_COHERENT_H -#define _XEN_ARM_PAGE_COHERENT_H - -#include -#include - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -#endif /* _XEN_ARM_PAGE_COHERENT_H */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 590ceb923f0c..808d17ad8d57 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -10,12 +10,6 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -#ifdef CONFIG_SWIOTLB_XEN -int xen_swiotlb_fixup(void *buf, unsigned long nslabs); -#else -#define xen_swiotlb_fixup NULL -#endif - extern const struct dma_map_ops xen_swiotlb_dma_ops; #endif /* __LINUX_SWIOTLB_XEN_H */ diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index a3584a357f35..c7c1b46ff4cd 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -42,13 +42,6 @@ int xen_setup_shutdown_event(void); extern unsigned long *xen_contiguous_bitmap; -#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) -int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, - unsigned int address_bits, - dma_addr_t *dma_handle); -void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); -#endif - #if defined(CONFIG_XEN_PV) int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, -- cgit v1.2.3