diff options
Diffstat (limited to 'drivers/pci')
41 files changed, 1409 insertions, 371 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 79c4a2ef269a..46935695cfb9 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ @@ -409,7 +405,7 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) *val = 0; if (pos & 1) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); @@ -444,7 +440,7 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) *val = 0; if (pos & 3) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); @@ -469,7 +465,7 @@ EXPORT_SYMBOL(pcie_capability_read_dword); int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { if (pos & 1) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; @@ -481,7 +477,7 @@ EXPORT_SYMBOL(pcie_capability_write_word); int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) { if (pos & 3) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index b761c1f72f67..647e097530a8 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -325,6 +325,21 @@ int pci_prg_resp_pasid_required(struct pci_dev *pdev) return pdev->pasid_required; } + +/** + * pci_pri_supported - Check if PRI is supported. + * @pdev: PCI device structure + * + * Returns true if PRI capability is present, false otherwise. + */ +bool pci_pri_supported(struct pci_dev *pdev) +{ + /* VFs share the PF PRI */ + if (pci_physfn(pdev)->pri_cap) + return true; + return false; +} +EXPORT_SYMBOL_GPL(pci_pri_supported); #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 8e40b3e6da77..3cef835b375f 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index b76b3cf55ce5..5d30564190e1 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -42,4 +42,27 @@ config PCIE_CADENCE_PLAT_EP endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. +config PCI_J721E + bool + +config PCI_J721E_HOST + bool "TI J721E PCIe platform host controller" + depends on OF + select PCIE_CADENCE_HOST + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in host mode. TI J721E PCIe controller uses Cadence PCIe + core. + +config PCI_J721E_EP + bool "TI J721E PCIe platform endpoint controller" + depends on OF + depends on PCI_ENDPOINT + select PCIE_CADENCE_EP + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe + core. endmenu diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile index 232a3f20876a..9bac5fb2f13d 100644 --- a/drivers/pci/controller/cadence/Makefile +++ b/drivers/pci/controller/cadence/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o +obj-$(CONFIG_PCI_J721E) += pci-j721e.o diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c new file mode 100644 index 000000000000..23ad8fa699c4 --- /dev/null +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * pci-j721e - PCIe controller driver for TI's J721E SoCs + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include "../../pci.h" +#include "pcie-cadence.h" + +#define ENABLE_REG_SYS_2 0x108 +#define STATUS_REG_SYS_2 0x508 +#define STATUS_CLR_REG_SYS_2 0x708 +#define LINK_DOWN BIT(1) + +#define J721E_PCIE_USER_CMD_STATUS 0x4 +#define LINK_TRAINING_ENABLE BIT(0) + +#define J721E_PCIE_USER_LINKSTATUS 0x14 +#define LINK_STATUS GENMASK(1, 0) + +enum link_status { + NO_RECEIVERS_DETECTED, + LINK_TRAINING_IN_PROGRESS, + LINK_UP_DL_IN_PROGRESS, + LINK_UP_DL_COMPLETED, +}; + +#define J721E_MODE_RC BIT(7) +#define LANE_COUNT_MASK BIT(8) +#define LANE_COUNT(n) ((n) << 8) + +#define GENERATION_SEL_MASK GENMASK(1, 0) + +#define MAX_LANES 2 + +struct j721e_pcie { + struct device *dev; + u32 mode; + u32 num_lanes; + struct cdns_pcie *cdns_pcie; + void __iomem *user_cfg_base; + void __iomem *intd_cfg_base; +}; + +enum j721e_pcie_mode { + PCI_MODE_RC, + PCI_MODE_EP, +}; + +struct j721e_pcie_data { + enum j721e_pcie_mode mode; +}; + +static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->user_cfg_base + offset); +} + +static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->user_cfg_base + offset); +} + +static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->intd_cfg_base + offset); +} + +static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->intd_cfg_base + offset); +} + +static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) +{ + struct j721e_pcie *pcie = priv; + struct device *dev = pcie->dev; + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); + if (!(reg & LINK_DOWN)) + return IRQ_NONE; + + dev_err(dev, "LINK DOWN!\n"); + + j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN); + return IRQ_HANDLED; +} + +static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) +{ + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); + reg |= LINK_DOWN; + j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); +} + +static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg |= LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); + + return 0; +} + +static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg &= ~LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); +} + +static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); + reg &= LINK_STATUS; + if (reg == LINK_UP_DL_COMPLETED) + return true; + + return false; +} + +static const struct cdns_pcie_ops j721e_pcie_ops = { + .start_link = j721e_pcie_start_link, + .stop_link = j721e_pcie_stop_link, + .link_up = j721e_pcie_link_up, +}; + +static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 mask = J721E_MODE_RC; + u32 mode = pcie->mode; + u32 val = 0; + int ret = 0; + + if (mode == PCI_MODE_RC) + val = J721E_MODE_RC; + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) + dev_err(dev, "failed to set pcie mode\n"); + + return ret; +} + +static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + int link_speed; + u32 val = 0; + int ret; + + link_speed = of_pci_get_max_link_speed(np); + if (link_speed < 2) + link_speed = 2; + + val = link_speed - 1; + ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val); + if (ret) + dev_err(dev, "failed to set link speed\n"); + + return ret; +} + +static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 lanes = pcie->num_lanes; + u32 val = 0; + int ret; + + val = LANE_COUNT(lanes - 1); + ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val); + if (ret) + dev_err(dev, "failed to set link count\n"); + + return ret; +} + +static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct regmap *syscon; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl"); + if (IS_ERR(syscon)) { + dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n"); + return PTR_ERR(syscon); + } + + ret = j721e_pcie_set_mode(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set pci mode\n"); + return ret; + } + + ret = j721e_pcie_set_link_speed(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set link speed\n"); + return ret; + } + + ret = j721e_pcie_set_lane_count(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set num-lanes\n"); + return ret; + } + + return 0; +} + +static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + unsigned int busn = bus->number; + + if (busn == rc->bus_range->start) + return pci_generic_config_read32(bus, devfn, where, size, + value); + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + unsigned int busn = bus->number; + + if (busn == rc->bus_range->start) + return pci_generic_config_write32(bus, devfn, where, size, + value); + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static struct pci_ops cdns_ti_pcie_host_ops = { + .map_bus = cdns_pci_map_bus, + .read = cdns_ti_pcie_config_read, + .write = cdns_ti_pcie_config_write, +}; + +static const struct j721e_pcie_data j721e_pcie_rc_data = { + .mode = PCI_MODE_RC, +}; + +static const struct j721e_pcie_data j721e_pcie_ep_data = { + .mode = PCI_MODE_EP, +}; + +static const struct of_device_id of_j721e_pcie_match[] = { + { + .compatible = "ti,j721e-pcie-host", + .data = &j721e_pcie_rc_data, + }, + { + .compatible = "ti,j721e-pcie-ep", + .data = &j721e_pcie_ep_data, + }, + {}, +}; + +static int j721e_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct pci_host_bridge *bridge; + struct j721e_pcie_data *data; + struct cdns_pcie *cdns_pcie; + struct j721e_pcie *pcie; + struct cdns_pcie_rc *rc; + struct cdns_pcie_ep *ep; + struct gpio_desc *gpiod; + void __iomem *base; + u32 num_lanes; + u32 mode; + int ret; + int irq; + + data = (struct j721e_pcie_data *)of_device_get_match_data(dev); + if (!data) + return -EINVAL; + + mode = (u32)data->mode; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = dev; + pcie->mode = mode; + + base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->intd_cfg_base = base; + + base = devm_platform_ioremap_resource_byname(pdev, "user_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->user_cfg_base = base; + + ret = of_property_read_u32(node, "num-lanes", &num_lanes); + if (ret || num_lanes > MAX_LANES) + num_lanes = 1; + pcie->num_lanes = num_lanes; + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) + return -EINVAL; + + irq = platform_get_irq_byname(pdev, "link_state"); + if (irq < 0) + return irq; + + dev_set_drvdata(dev, pcie); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = j721e_pcie_ctrl_init(pcie); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, + "j721e-pcie-link-down-irq", pcie); + if (ret < 0) { + dev_err(dev, "failed to request link state IRQ %d\n", irq); + goto err_get_sync; + } + + j721e_pcie_config_link_irq(pcie); + + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { + ret = -ENODEV; + goto err_get_sync; + } + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) { + ret = -ENOMEM; + goto err_get_sync; + } + + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset GPIO\n"); + goto err_get_sync; + } + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + /* + * "Power Sequencing and Reset Signal Timings" table in + * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 + * indicates PERST# should be deasserted after minimum of 100us + * once REFCLK is stable. The REFCLK to the connector in RC + * mode is selected while enabling the PHY. So deassert PERST# + * after 100 us. + */ + if (gpiod) { + usleep_range(100, 200); + gpiod_set_value_cansleep(gpiod, 1); + } + + ret = cdns_pcie_host_setup(rc); + if (ret < 0) + goto err_pcie_setup; + + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { + ret = -ENODEV; + goto err_get_sync; + } + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto err_get_sync; + } + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + ret = cdns_pcie_ep_setup(ep); + if (ret < 0) + goto err_pcie_setup; + + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + + return 0; + +err_pcie_setup: + cdns_pcie_disable_phy(cdns_pcie); + +err_get_sync: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int j721e_pcie_remove(struct platform_device *pdev) +{ + struct j721e_pcie *pcie = platform_get_drvdata(pdev); + struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; + struct device *dev = &pdev->dev; + + cdns_pcie_disable_phy(cdns_pcie); + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static struct platform_driver j721e_pcie_driver = { + .probe = j721e_pcie_probe, + .remove = j721e_pcie_remove, + .driver = { + .name = "j721e-pcie", + .of_match_table = of_j721e_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(j721e_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1c15c8352125..ec1306da301f 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -8,7 +8,6 @@ #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/sizes.h> #include "pcie-cadence.h" @@ -52,6 +51,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; @@ -112,6 +112,8 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); + epf->epf_bar[bar] = epf_bar; + return 0; } @@ -119,6 +121,7 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; @@ -140,6 +143,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); + + epf->epf_bar[bar] = NULL; } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, @@ -225,10 +230,55 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) return mme; } +static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, + enum pci_barno bir, u32 offset) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, fn, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; + cdns_pcie_ep_fn_writew(pcie, fn, reg, val); + + /* Set MSIX BAR and offset */ + reg = cap + PCI_MSIX_TABLE; + val = offset | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + /* Set PBA BAR and offset. BAR must match MSIX BAR */ + reg = cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + return 0; +} + static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; + unsigned long flags; u32 offset; u16 status; u8 msg_code; @@ -253,11 +303,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, msg_code = MSG_CODE_DEASSERT_INTA + intx; } + spin_lock_irqsave(&ep->lock, flags); status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } + spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | @@ -331,6 +383,51 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, return 0; } +static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, + u16 interrupt_num) +{ + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 tbl_offset, msg_data, reg; + struct cdns_pcie *pcie = &ep->pcie; + struct pci_epf_msix_tbl *msix_tbl; + struct cdns_pcie_epf *epf; + u64 pci_addr_mask = 0xff; + u64 msg_addr; + u16 flags; + u8 bir; + + /* Check whether the MSI-X feature has been enabled by the PCI host. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); + if (!(flags & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + reg = cap + PCI_MSIX_TABLE; + tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); + bir = tbl_offset & PCI_MSIX_TABLE_BIR; + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + + epf = &ep->epf[fn]; + msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + + /* Set the outbound region if needed. */ + if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn) { + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, 0, + false, + ep->irq_phys_addr, + msg_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); + + return 0; +} + static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u16 interrupt_num) @@ -344,6 +441,9 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); + default: break; } @@ -355,8 +455,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; + struct device *dev = pcie->dev; struct pci_epf *epf; u32 cfg; + int ret; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -367,13 +469,19 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + return 0; } static const struct pci_epc_features cdns_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, }; static const struct pci_epc_features* @@ -390,6 +498,8 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, + .set_msix = cdns_pcie_ep_set_msix, + .get_msix = cdns_pcie_ep_get_msix, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, .get_features = cdns_pcie_ep_get_features, @@ -440,8 +550,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); - ret = PTR_ERR(epc); - goto err_init; + return PTR_ERR(epc); } epc_set_drvdata(epc, ep); @@ -449,11 +558,16 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; + ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), + GFP_KERNEL); + if (!ep->epf) + return -ENOMEM; + ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res), PAGE_SIZE); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); - goto err_init; + return ret; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, @@ -466,14 +580,12 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); + spin_lock_init(&ep->lock); return 0; free_epc_mem: pci_epc_mem_exit(epc); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8c2543f28ba0..8d86560196aa 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -3,16 +3,28 @@ // Cadence PCIe host controller driver. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> +#include <linux/delay.h> #include <linux/kernel.h> +#include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include "pcie-cadence.h" -static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) +static u64 bar_max_size[] = { + [RP_BAR0] = _ULL(128 * SZ_2G), + [RP_BAR1] = SZ_2G, + [RP_NO_BAR] = _BITULL(63), +}; + +static u8 bar_aperture_mask[] = { + [RP_BAR0] = 0x1F, + [RP_BAR1] = 0xF, +}; + +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); @@ -70,6 +82,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -89,8 +102,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); @@ -101,18 +118,229 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) return 0; } +static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, u64 size, + unsigned long flags) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 addr0, addr1, aperture, value; + + if (!rc->avail_ib_bar[bar]) + return -EBUSY; + + rc->avail_ib_bar[bar] = false; + + aperture = ilog2(size); + addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); + + if (bar == RP_NO_BAR) + return 0; + + value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG); + value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); + if (size + cpu_addr >= SZ_4G) { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); + } else { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); + } + + value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + + return 0; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size <= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] < bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size >= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] > bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry) +{ + u64 cpu_addr, pci_addr, size, winsize; + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + enum cdns_pcie_rp_bar bar; + unsigned long flags; + int ret; + + cpu_addr = entry->res->start; + pci_addr = entry->res->start - entry->offset; + flags = entry->res->flags; + size = resource_size(entry->res); + + if (entry->offset) { + dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", + pci_addr, cpu_addr); + return -EINVAL; + } + + while (size > 0) { + /* + * Try to find a minimum BAR whose size is greater than + * or equal to the remaining resource_entry size. This will + * fail if the size of each of the available BARs is less than + * the remaining resource_entry size. + * If a minimum BAR is found, IB ATU will be configured and + * exited. + */ + bar = cdns_pcie_host_find_min_bar(rc, size); + if (bar != RP_BAR_UNDEFINED) { + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, + size, flags); + if (ret) + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + /* + * If the control reaches here, it would mean the remaining + * resource_entry size cannot be fitted in a single BAR. So we + * find a maximum BAR whose size is less than or equal to the + * remaining resource_entry size and split the resource entry + * so that part of resource entry is fitted inside the maximum + * BAR. The remaining size would be fitted during the next + * iteration of the loop. + * If a maximum BAR is not found, there is no way we can fit + * this resource_entry, so we error out. + */ + bar = cdns_pcie_host_find_max_bar(rc, size); + if (bar == RP_BAR_UNDEFINED) { + dev_err(dev, "No free BAR to map cpu_addr %llx\n", + cpu_addr); + return -EINVAL; + } + + winsize = bar_max_size[bar]; + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, + flags); + if (ret) { + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + size -= winsize; + cpu_addr += winsize; + } + + return 0; +} + +static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct resource_entry *entry1, *entry2; + + entry1 = container_of(a, struct resource_entry, node); + entry2 = container_of(b, struct resource_entry, node); + + return resource_size(entry2->res) - resource_size(entry1->res); +} + +static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + struct resource_entry *entry; + u32 no_bar_nbits = 32; + int err; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + if (list_empty(&bridge->dma_ranges)) { + of_property_read_u32(np, "cdns,no-bar-match-nbits", + &no_bar_nbits); + err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, + (u64)1 << no_bar_nbits, 0); + if (err) + dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); + return err; + } + + list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + err = cdns_pcie_host_bar_config(rc, entry); + if (err) { + dev_err(dev, "Fail to configure IB using dma-ranges\n"); + return err; + } + } + + return 0; +} + static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; - struct resource *mem_res = pcie->mem_res; struct resource *bus_range = rc->bus_range; struct resource *cfg_res = rc->cfg_res; struct device *dev = pcie->dev; struct device_node *np = dev->of_node; struct of_pci_range_parser parser; + u64 cpu_addr = cfg_res->start; struct of_pci_range range; u32 addr0, addr1, desc1; - u64 cpu_addr; int r, err; /* @@ -125,7 +353,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); - cpu_addr = cfg_res->start - mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -154,16 +384,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) r++; } - /* - * Set Root Port no BAR match Inbound Translation registers: - * needed for MSI and DMA. - * Root Port BAR0 and BAR1 are disabled, hence no need to set their - * inbound translation registers. - */ - addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); - addr1 = 0; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + err = cdns_pcie_host_map_dma_ranges(rc); + if (err) + return err; return 0; } @@ -173,10 +396,16 @@ static int cdns_pcie_host_init(struct device *dev, struct cdns_pcie_rc *rc) { struct resource *bus_range = NULL; + struct pci_host_bridge *bridge; int err; + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); + err = pci_parse_request_of_pci_ranges(dev, resources, + &bridge->dma_ranges, &bus_range); if (err) return err; @@ -198,6 +427,23 @@ static int cdns_pcie_host_init(struct device *dev, return err; } +static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) +{ + struct device *dev = pcie->dev; + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (cdns_pcie_link_up(pcie)) { + dev_info(dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + return -ETIMEDOUT; +} + int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { struct device *dev = rc->pcie.dev; @@ -205,6 +451,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; struct list_head resources; + enum cdns_pcie_rp_bar bar; struct cdns_pcie *pcie; struct resource *res; int ret; @@ -216,9 +463,6 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) pcie = &rc->pcie; pcie->is_rc = true; - rc->no_bar_nbits = 32; - of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); - rc->vendor_id = 0xffff; of_property_read_u32(np, "vendor-id", &rc->vendor_id); @@ -240,22 +484,28 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) } rc->cfg_res = res; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; } - pcie->mem_res = res; + ret = cdns_pcie_host_wait_for_link(pcie); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; ret = cdns_pcie_host_init(dev, &resources, rc); if (ret) - goto err_init; + return ret; list_splice_init(&resources, &bridge->windows); bridge->dev.parent = dev; bridge->busnr = pcie->bus; - bridge->ops = &cdns_pcie_host_ops; + if (!bridge->ops) + bridge->ops = &cdns_pcie_host_ops; bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; @@ -268,8 +518,5 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) err_host_probe: pci_free_resource_list(&resources); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index f5c6bf6dfcb8..6f5f07b3eed1 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -13,6 +13,8 @@ #include <linux/of_device.h> #include "pcie-cadence.h" +#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF + /** * struct cdns_plat_pcie - private data for this PCIe platform driver * @pcie: Cadence PCIe controller @@ -30,6 +32,15 @@ struct cdns_plat_pcie_of_data { static const struct of_device_id cdns_plat_pcie_of_match[]; +static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) +{ + return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR; +} + +static const struct cdns_pcie_ops cdns_plat_ops = { + .cpu_addr_fixup = cdns_plat_cpu_addr_fixup, +}; + static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; @@ -66,6 +77,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) rc = pci_host_bridge_priv(bridge); rc->pcie.dev = dev; + rc->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &rc->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -93,6 +105,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; ep->pcie.dev = dev; + ep->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &ep->pcie; cdns_plat_pcie->is_rc = is_rc; diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index cd795f6fc1e2..8a02981fd456 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -73,7 +73,9 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -100,7 +102,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, } /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index df14ad002fe9..00e44256c3e8 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -10,6 +10,11 @@ #include <linux/pci.h> #include <linux/phy/phy.h> +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + /* * Local Management Registers */ @@ -87,6 +92,20 @@ #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 2) << ((bar) * 8)) /* * Endpoint Function Registers (PCI configuration space for endpoint functions) @@ -94,6 +113,7 @@ #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 +#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 /* * Root Port Registers (PCI configuration space for the root port function) @@ -170,11 +190,19 @@ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) enum cdns_pcie_rp_bar { + RP_BAR_UNDEFINED = -1, RP_BAR0, RP_BAR1, RP_NO_BAR }; +#define CDNS_PCIE_RP_MAX_IB 0x3 + +struct cdns_pcie_rp_ib_bar { + u64 size; + bool free; +}; + /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) @@ -223,12 +251,21 @@ enum cdns_pcie_msg_routing { MSG_ROUTING_GATHER, }; +struct cdns_pcie_ops { + int (*start_link)(struct cdns_pcie *pcie); + void (*stop_link)(struct cdns_pcie *pcie); + bool (*link_up)(struct cdns_pcie *pcie); + u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); +}; + /** * struct cdns_pcie - private data for Cadence PCIe controller drivers * @reg_base: IO mapped register base * @mem_res: start/end offsets in the physical system memory to map PCI accesses * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. * @bus: In Root Complex mode, the bus number + * @ops: Platform specific ops to control various inputs from Cadence PCIe + * wrapper */ struct cdns_pcie { void __iomem *reg_base; @@ -239,7 +276,7 @@ struct cdns_pcie { int phy_count; struct phy **phy; struct device_link **link; - const struct cdns_pcie_common_ops *ops; + const struct cdns_pcie_ops *ops; }; /** @@ -251,19 +288,27 @@ struct cdns_pcie { * @bus_range: first/last buses behind the PCIe host controller * @cfg_base: IO mapped window to access the PCI configuration space of a * single function at a time - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address - * translation (nbits sets into the "no BAR match" register) * @vendor_id: PCI vendor ID * @device_id: PCI device ID + * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or + * available */ struct cdns_pcie_rc { struct cdns_pcie pcie; struct resource *cfg_res; struct resource *bus_range; void __iomem *cfg_base; - u32 no_bar_nbits; u32 vendor_id; u32 device_id; + bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; +}; + +/** + * struct cdns_pcie_epf - Structure to hold info about endpoint function + * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers + */ +struct cdns_pcie_epf { + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; /** @@ -282,6 +327,10 @@ struct cdns_pcie_rc { * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. + * @lock: spin lock to disable interrupts while modifying PCIe controller + * registers fields (RMW) accessible by both remote RC and EP to + * minimize time between read and write + * @epf: Structure to hold info about endpoint function */ struct cdns_pcie_ep { struct cdns_pcie pcie; @@ -293,54 +342,95 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; + /* protect writing to PCI_STATUS while raising legacy interrupts */ + spinlock_t lock; + struct cdns_pcie_epf *epf; }; /* Register access */ -static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) { - writeb(value, pcie->reg_base + reg); + writel(value, pcie->reg_base + reg); } -static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) { - writew(value, pcie->reg_base + reg); + return readl(pcie->reg_base + reg); } -static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) { - writel(value, pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 val = readl(aligned_addr); + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return 0; + } + + if (size > 2) + return val; + + return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); } -static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) { - return readl(pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 mask; + u32 val; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return; + } + + if (size > 2) { + writel(value, addr); + return; + } + + mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); + val = readl(aligned_addr) & mask; + val |= value << (offset * 8); + writel(val, aligned_addr); } /* Root Port register access */ static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } /* Endpoint Function register access */ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, @@ -349,14 +439,11 @@ static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } -static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) { - return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + return cdns_pcie_read_sz(addr, 0x2); } static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) @@ -364,13 +451,43 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } +static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->start_link) + return pcie->ops->start_link(pcie); + + return 0; +} + +static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->stop_link) + pcie->ops->stop_link(pcie); +} + +static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) +{ + if (pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return true; +} + #ifdef CONFIG_PCIE_CADENCE_HOST int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); #else static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { return 0; } + +static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + return NULL; +} #endif #ifdef CONFIG_PCIE_CADENCE_EP diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index e5d0c7ac09b9..a896b9b6e274 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -2,7 +2,7 @@ /* * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs * - * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: Kishon Vijay Abraham I <kishon@ti.com> */ diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c5043d951e80..a075eba45abb 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 8f08ae53f53e..4e5c379ae418 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Freescale i.MX6 SoCs * * Copyright (C) 2013 Kosagi - * http://www.kosagi.com + * https://www.kosagi.com * * Author: Sean Cross <xobs@kosagi.com> */ diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 00279002102e..0b7ed21cf8d0 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Texas Instruments Keystone SoCs * * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com + * https://www.ti.com * * Author: Murali Karicheri <m-karicheri2@ti.com> * Implementation based on pci-exynos.c and pcie-designware.c diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 0a4a5aa6fe46..2cb286a49293 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c92496e36fd5..b723e0cc41fb 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 656e00f8fbeb..90915dc89e08 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index e5e765038686..9adaa65c581f 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Kirin Phone SoCs * * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. - * http://www.huawei.com + * https://www.huawei.com * * Author: Xiaowei Song <songxiaowei@huawei.com> */ diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 90ff291c24f0..1258f45cbc11 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -644,6 +644,13 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) return false; + /* + * If the link goes down after we check for link-up, nothing bad + * happens but the config access times out. + */ + if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie)) + return false; + return true; } @@ -688,8 +695,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); - if (ret < 0) + if (ret < 0) { + *val = 0xffffffff; return PCIBIOS_SET_FAILED; + } advk_pcie_check_pio_status(pcie); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index c5eb509c72f0..f979b7098acf 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -352,7 +352,7 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) * -ENODEV Not a valid drc_name * -EIO Internal PCI Error */ -int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) +static int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) { struct pci_bus *bus; struct slot *slot; @@ -458,7 +458,7 @@ static inline int is_dlpar_capable(void) return (int) (rc != RTAS_UNKNOWN_SERVICE); } -int __init rpadlpar_io_init(void) +static int __init rpadlpar_io_init(void) { if (!is_dlpar_capable()) { @@ -470,7 +470,7 @@ int __init rpadlpar_io_init(void) return dlpar_sysfs_init(); } -void rpadlpar_io_exit(void) +static void __exit rpadlpar_io_exit(void) { dlpar_sysfs_exit(); } diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index a1de501a2729..12ecd0aaa28d 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -6,61 +6,11 @@ * Copyright (C) 2017 Christoph Hellwig. */ -#include <linux/acpi.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> -static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) -{ - struct pci_dev *parent = to_pci_dev(pdev->dev.parent); - - pci_err(pdev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", - dev_name(&parent->dev), parent->vendor, parent->device); - pci_err(pdev, "%s\n", reason); - pci_err(pdev, "Please report to linux-kernel@vger.kernel.org\n"); - WARN_ON(1); -} - -/** - * pci_lost_interrupt - reports a lost PCI interrupt - * @pdev: device whose interrupt is lost - * - * The primary function of this routine is to report a lost interrupt - * in a standard way which users can recognise (instead of blaming the - * driver). - * - * Returns: - * a suggestion for fixing it (although the driver is not required to - * act on this). - */ -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) -{ - if (pdev->msi_enabled || pdev->msix_enabled) { - enum pci_lost_interrupt_reason ret; - - if (pdev->msix_enabled) { - pci_note_irq_problem(pdev, "MSIX routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSIX; - } else { - pci_note_irq_problem(pdev, "MSI routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSI; - } - return ret; - } -#ifdef CONFIG_ACPI - if (!(acpi_disabled || acpi_noirq)) { - pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq"); - /* currently no way to fix acpi on the fly */ - return PCI_LOST_IRQ_DISABLE_ACPI; - } -#endif - pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)"); - return PCI_LOST_IRQ_NO_INFORMATION; -} -EXPORT_SYMBOL(pci_lost_interrupt); - /** * pci_request_irq - allocate an interrupt line for a PCI device * @dev: PCI device to operate on diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 6b43a5455c7a..cade9be68b09 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1191,8 +1191,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, struct irq_affinity *affd) { struct irq_affinity msi_default_affd = {0}; - int msix_vecs = -ENOSPC; - int msi_vecs = -ENOSPC; + int nvecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) @@ -1203,17 +1202,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } if (flags & PCI_IRQ_MSIX) { - msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, - max_vecs, affd, flags); - if (msix_vecs > 0) - return msix_vecs; + nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, + affd, flags); + if (nvecs > 0) + return nvecs; } if (flags & PCI_IRQ_MSI) { - msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, - affd); - if (msi_vecs > 0) - return msi_vecs; + nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); + if (nvecs > 0) + return nvecs; } /* use legacy IRQ if allowed */ @@ -1231,9 +1229,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } } - if (msix_vecs == -ENOSPC) - return -ENOSPC; - return msi_vecs; + return nvecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 27839cd2459f..22727fc9558d 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -42,7 +42,7 @@ void pci_set_bus_of_node(struct pci_bus *bus) } else { node = of_node_get(bus->self->dev.of_node); if (node && of_property_read_bool(node, "external-facing")) - bus->self->untrusted = true; + bus->self->external_facing = true; } bus->dev.of_node = node; diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index e8e444eeb1cd..64ebed129dbf 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -253,7 +253,7 @@ static int pci_bridge_has_acs_redir(struct pci_dev *pdev) int pos; u16 ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return 0; @@ -273,6 +273,19 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) seq_buf_printf(buf, "%s;", pci_name(pdev)); } +static bool cpu_supports_p2pdma(void) +{ +#ifdef CONFIG_X86 + struct cpuinfo_x86 *c = &cpu_data(0); + + /* Any AMD CPU whose family ID is Zen or newer supports p2pdma */ + if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) + return true; +#endif + + return false; +} + static const struct pci_p2pdma_whitelist_entry { unsigned short vendor; unsigned short device; @@ -280,11 +293,6 @@ static const struct pci_p2pdma_whitelist_entry { REQ_SAME_HOST_BRIDGE = 1 << 0, } flags; } pci_p2pdma_whitelist[] = { - /* AMD ZEN */ - {PCI_VENDOR_ID_AMD, 0x1450, 0}, - {PCI_VENDOR_ID_AMD, 0x15d0, 0}, - {PCI_VENDOR_ID_AMD, 0x1630, 0}, - /* Intel Xeon E5/Core i7 */ {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, @@ -473,7 +481,8 @@ upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, acs_redirects, acs_list); if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) { - if (!host_bridge_whitelist(provider, client)) + if (!cpu_supports_p2pdma() && + !host_bridge_whitelist(provider, client)) map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7224b1e5f2a8..54520d34e27e 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1213,7 +1213,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev, ACPI_FREE(obj); } -static void pci_acpi_set_untrusted(struct pci_dev *dev) +static void pci_acpi_set_external_facing(struct pci_dev *dev) { u8 val; @@ -1224,11 +1224,10 @@ static void pci_acpi_set_untrusted(struct pci_dev *dev) /* * These root ports expose PCIe (including DMA) outside of the - * system so make sure we treat them and everything behind as - * untrusted. + * system. Everything downstream from them is external. */ if (val) - dev->untrusted = 1; + dev->external_facing = 1; } static void pci_acpi_setup(struct device *dev) @@ -1240,7 +1239,7 @@ static void pci_acpi_setup(struct device *dev) return; pci_acpi_optimize_delay(pci_dev, adev->handle); - pci_acpi_set_untrusted(pci_dev); + pci_acpi_set_external_facing(pci_dev); pci_acpi_add_edr_notifier(pci_dev); pci_acpi_add_pm_notifier(adev, pci_dev); diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 707dd9808676..781e45cf60d1 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -18,7 +18,7 @@ * the instance number and string from the type 41 record and exports * it to sysfs. * - * Please see http://linux.dell.com/files/biosdevname/ for more + * Please see https://linux.dell.com/files/biosdevname/ for more * information. */ diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ce096272f52b..501035cc0409 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -777,6 +777,133 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) return 0; } +static int pci_acs_enable; + +/** + * pci_request_acs - ask for ACS to be enabled if supported + */ +void pci_request_acs(void) +{ + pci_acs_enable = 1; +} + +static const char *disable_acs_redir_param; + +/** + * pci_disable_acs_redir - disable ACS redirect capabilities + * @dev: the PCI device + * + * For only devices specified in the disable_acs_redir parameter. + */ +static void pci_disable_acs_redir(struct pci_dev *dev) +{ + int ret = 0; + const char *p; + int pos; + u16 ctrl; + + if (!disable_acs_redir_param) + return; + + p = disable_acs_redir_param; + while (*p) { + ret = pci_dev_str_match(dev, p, &p); + if (ret < 0) { + pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", + disable_acs_redir_param); + + break; + } else if (ret == 1) { + /* Found a match */ + break; + } + + if (*p != ';' && *p != ',') { + /* End of param or invalid format */ + break; + } + p++; + } + + if (ret != 1) + return; + + if (!pci_dev_specific_disable_acs_redir(dev)) + return; + + pos = dev->acs_cap; + if (!pos) { + pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); + return; + } + + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); + + /* P2P Request & Completion Redirect */ + ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); + + pci_info(dev, "disabled ACS redirect\n"); +} + +/** + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities + * @dev: the PCI device + */ +static void pci_std_enable_acs(struct pci_dev *dev) +{ + int pos; + u16 cap; + u16 ctrl; + + pos = dev->acs_cap; + if (!pos) + return; + + pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); + + /* Source Validation */ + ctrl |= (cap & PCI_ACS_SV); + + /* P2P Request Redirect */ + ctrl |= (cap & PCI_ACS_RR); + + /* P2P Completion Redirect */ + ctrl |= (cap & PCI_ACS_CR); + + /* Upstream Forwarding */ + ctrl |= (cap & PCI_ACS_UF); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); +} + +/** + * pci_enable_acs - enable ACS if hardware support it + * @dev: the PCI device + */ +static void pci_enable_acs(struct pci_dev *dev) +{ + if (!pci_acs_enable) + goto disable_acs_redir; + + if (!pci_dev_specific_enable_acs(dev)) + goto disable_acs_redir; + + pci_std_enable_acs(dev); + +disable_acs_redir: + /* + * Note: pci_disable_acs_redir() must be called even if ACS was not + * enabled by the kernel because it may have been enabled by + * platform firmware. So if we are told to disable it, we should + * always disable it after setting the kernel's default + * preferences. + */ + pci_disable_acs_redir(dev); +} + /** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored @@ -2046,6 +2173,14 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) } EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); +void pcie_clear_device_status(struct pci_dev *dev) +{ + u16 sta; + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); +} + /** * pcie_clear_root_pme_status - Clear root port PME interrupt status. * @dev: PCIe root port or event collector. @@ -3230,139 +3365,12 @@ void pci_configure_ari(struct pci_dev *dev) } } -static int pci_acs_enable; - -/** - * pci_request_acs - ask for ACS to be enabled if supported - */ -void pci_request_acs(void) -{ - pci_acs_enable = 1; -} - -static const char *disable_acs_redir_param; - -/** - * pci_disable_acs_redir - disable ACS redirect capabilities - * @dev: the PCI device - * - * For only devices specified in the disable_acs_redir parameter. - */ -static void pci_disable_acs_redir(struct pci_dev *dev) -{ - int ret = 0; - const char *p; - int pos; - u16 ctrl; - - if (!disable_acs_redir_param) - return; - - p = disable_acs_redir_param; - while (*p) { - ret = pci_dev_str_match(dev, p, &p); - if (ret < 0) { - pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", - disable_acs_redir_param); - - break; - } else if (ret == 1) { - /* Found a match */ - break; - } - - if (*p != ';' && *p != ',') { - /* End of param or invalid format */ - break; - } - p++; - } - - if (ret != 1) - return; - - if (!pci_dev_specific_disable_acs_redir(dev)) - return; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (!pos) { - pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); - return; - } - - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); - - /* P2P Request & Completion Redirect */ - ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); - - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); - - pci_info(dev, "disabled ACS redirect\n"); -} - -/** - * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities - * @dev: the PCI device - */ -static void pci_std_enable_acs(struct pci_dev *dev) -{ - int pos; - u16 cap; - u16 ctrl; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); - - /* Source Validation */ - ctrl |= (cap & PCI_ACS_SV); - - /* P2P Request Redirect */ - ctrl |= (cap & PCI_ACS_RR); - - /* P2P Completion Redirect */ - ctrl |= (cap & PCI_ACS_CR); - - /* Upstream Forwarding */ - ctrl |= (cap & PCI_ACS_UF); - - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); -} - -/** - * pci_enable_acs - enable ACS if hardware support it - * @dev: the PCI device - */ -void pci_enable_acs(struct pci_dev *dev) -{ - if (!pci_acs_enable) - goto disable_acs_redir; - - if (!pci_dev_specific_enable_acs(dev)) - goto disable_acs_redir; - - pci_std_enable_acs(dev); - -disable_acs_redir: - /* - * Note: pci_disable_acs_redir() must be called even if ACS was not - * enabled by the kernel because it may have been enabled by - * platform firmware. So if we are told to disable it, we should - * always disable it after setting the kernel's default - * preferences. - */ - pci_disable_acs_redir(dev); -} - static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) { int pos; u16 cap, ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return false; @@ -3488,6 +3496,18 @@ bool pci_acs_path_enabled(struct pci_dev *start, } /** + * pci_acs_init - Initialize ACS if hardware supports it + * @dev: the PCI device + */ +void pci_acs_init(struct pci_dev *dev) +{ + dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + + if (dev->acs_cap) + pci_enable_acs(dev); +} + +/** * pci_rebar_find_pos - find position of resize ctrl reg for BAR * @pdev: PCI device * @bar: BAR to find @@ -5688,6 +5708,7 @@ EXPORT_SYMBOL(pcie_get_readrq); int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; + int ret; if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -5706,8 +5727,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_readrq); @@ -5738,6 +5761,7 @@ EXPORT_SYMBOL(pcie_get_mps); int pcie_set_mps(struct pci_dev *dev, int mps) { u16 v; + int ret; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) return -EINVAL; @@ -5747,8 +5771,10 @@ int pcie_set_mps(struct pci_dev *dev, int mps) return -EINVAL; v <<= 5; - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_mps); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6d3f75867106..42b95dba772a 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -92,6 +92,7 @@ void pci_refresh_power_state(struct pci_dev *dev); int pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); +void pcie_clear_device_status(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); @@ -532,7 +533,7 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, return resource_alignment(res); } -void pci_enable_acs(struct pci_dev *dev); +void pci_acs_init(struct pci_dev *dev); #ifdef CONFIG_PCI_QUIRKS int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); @@ -555,7 +556,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); @@ -658,7 +659,6 @@ void pci_aer_init(struct pci_dev *dev); void pci_aer_exit(struct pci_dev *dev); extern const struct attribute_group aer_stats_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); -void pci_aer_clear_device_status(struct pci_dev *dev); int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); #else @@ -666,7 +666,6 @@ static inline void pci_no_aer(void) { } static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } -static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; } #endif diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 9cd31331aee9..3946555a6042 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -43,7 +43,7 @@ config PCIEAER_INJECT error injection can fake almost all kinds of errors with the help of a user space helper tool aer-inject, which can be gotten from: - http://www.kernel.org/pub/linux/utils/pci/aer-inject/ + https://www.kernel.org/pub/linux/utils/pci/aer-inject/ # # PCI Express ECRC diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 3acf56683915..87283cda3990 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -224,31 +224,28 @@ int pcie_aer_is_native(struct pci_dev *dev) int pci_enable_pcie_error_reporting(struct pci_dev *dev) { + int rc; + if (!pcie_aer_is_native(dev)) return -EIO; - return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); } EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); int pci_disable_pcie_error_reporting(struct pci_dev *dev) { + int rc; + if (!pcie_aer_is_native(dev)) return -EIO; - return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_AER_FLAGS); + rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); } EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); -void pci_aer_clear_device_status(struct pci_dev *dev) -{ - u16 sta; - - pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); - pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); -} - int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { int aer = dev->aer_cap; @@ -447,7 +444,7 @@ static const char *aer_error_layer[] = { "Transaction Layer" }; -static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { +static const char *aer_correctable_error_string[] = { "RxErr", /* Bit Position 0 */ NULL, NULL, @@ -464,9 +461,25 @@ static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { "NonFatalErr", /* Bit Position 13 */ "CorrIntErr", /* Bit Position 14 */ "HeaderOF", /* Bit Position 15 */ + NULL, /* Bit Position 16 */ + NULL, /* Bit Position 17 */ + NULL, /* Bit Position 18 */ + NULL, /* Bit Position 19 */ + NULL, /* Bit Position 20 */ + NULL, /* Bit Position 21 */ + NULL, /* Bit Position 22 */ + NULL, /* Bit Position 23 */ + NULL, /* Bit Position 24 */ + NULL, /* Bit Position 25 */ + NULL, /* Bit Position 26 */ + NULL, /* Bit Position 27 */ + NULL, /* Bit Position 28 */ + NULL, /* Bit Position 29 */ + NULL, /* Bit Position 30 */ + NULL, /* Bit Position 31 */ }; -static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { +static const char *aer_uncorrectable_error_string[] = { "Undefined", /* Bit Position 0 */ NULL, NULL, @@ -494,6 +507,11 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ "PoisonTLPBlocked", /* Bit Position 26 */ + NULL, /* Bit Position 27 */ + NULL, /* Bit Position 28 */ + NULL, /* Bit Position 29 */ + NULL, /* Bit Position 30 */ + NULL, /* Bit Position 31 */ }; static const char *aer_agent_string[] = { @@ -650,24 +668,26 @@ static void __print_tlp_header(struct pci_dev *dev, static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { + const char **strings; unsigned long status = info->status & ~info->mask; - const char *errmsg = NULL; + const char *level, *errmsg; int i; + if (info->severity == AER_CORRECTABLE) { + strings = aer_correctable_error_string; + level = KERN_WARNING; + } else { + strings = aer_uncorrectable_error_string; + level = KERN_ERR; + } + for_each_set_bit(i, &status, 32) { - if (info->severity == AER_CORRECTABLE) - errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? - aer_correctable_error_string[i] : NULL; - else - errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? - aer_uncorrectable_error_string[i] : NULL; + errmsg = strings[i]; + if (!errmsg) + errmsg = "Unknown Error Bit"; - if (errmsg) - pci_err(dev, " [%2d] %-22s%s\n", i, errmsg, + pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg, info->first_error == i ? " (First)" : ""); - else - pci_err(dev, " [%2d] Unknown Error Bit%s\n", - i, info->first_error == i ? " (First)" : ""); } pci_dev_aer_stats_incr(dev, info); } @@ -676,6 +696,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { int layer, agent; int id = ((dev->bus->number << 8) | dev->devfn); + const char *level; if (!info->status) { pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", @@ -686,13 +707,14 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) layer = AER_GET_LAYER_ERROR(info->severity, info->status); agent = AER_GET_AGENT(info->severity, info->status); - pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", - aer_error_severity_string[info->severity], - aer_error_layer[layer], aer_agent_string[agent]); + level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR; + + pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", + aer_error_severity_string[info->severity], + aer_error_layer[layer], aer_agent_string[agent]); - pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", - dev->vendor, dev->device, - info->status, info->mask); + pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", + dev->vendor, dev->device, info->status, info->mask); __aer_print_error(dev, info); @@ -922,7 +944,8 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) if (aer) pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, info->status); - pci_aer_clear_device_status(dev); + if (pcie_aer_is_native(dev)) + pcie_clear_device_status(dev); } else if (info->severity == AER_NONFATAL) pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset); else if (info->severity == AER_FATAL) diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 21cc3d3387f7..c2cbf425afc5 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -6,7 +6,7 @@ * trigger various real hardware errors. Software based error * injection can fake almost all kinds of errors with the help of a * user space helper tool aer-inject, which can be gotten from: - * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ + * https://www.kernel.org/pub/linux/utils/pci/aer-inject/ * * Copyright 2009 Intel Corporation. * Huang Ying <ying.huang@intel.com> diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b17e5ffd31b1..253c30cc1967 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1182,6 +1182,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 14bb8f54723e..c543f419d8f9 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -46,7 +46,7 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig, } static int report_error_detected(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, enum pci_ers_result *result) { pci_ers_result_t vote; @@ -147,7 +147,7 @@ out: } pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)) { pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; @@ -197,7 +197,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pci_dbg(dev, "broadcast resume message\n"); pci_walk_bus(bus, report_resume, &status); - pci_aer_clear_device_status(dev); + if (pcie_aer_is_native(dev)) + pcie_clear_device_status(dev); pci_aer_clear_nonfatal_status(dev); pci_info(dev, "device recovery successful\n"); return status; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3acf151ae015..3a3ce40ae1ab 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -146,7 +146,7 @@ static void pcie_portdrv_remove(struct pci_dev *dev) } static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, - enum pci_channel_state error) + pci_channel_state_t error) { /* Root Port has no impact. Always recovers. */ return PCI_ERS_RESULT_CAN_RECOVER; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2f66988cea25..494333df46af 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1552,7 +1552,7 @@ static void set_pcie_untrusted(struct pci_dev *dev) * untrusted as well. */ parent = pci_upstream_bridge(dev); - if (parent && parent->untrusted) + if (parent && (parent->untrusted || parent->external_facing)) dev->untrusted = true; } @@ -1802,9 +1802,6 @@ int pci_setup_device(struct pci_dev *dev) dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ - pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", - dev->vendor, dev->device, dev->hdr_type, dev->class); - if (pci_early_dump) early_dump_pci_device(dev); @@ -1822,6 +1819,9 @@ int pci_setup_device(struct pci_dev *dev) /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", + dev->vendor, dev->device, dev->hdr_type, dev->class); + /* Device class may be changed after fixup */ class = dev->class >> 8; @@ -2390,7 +2390,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_ats_init(dev); /* Address Translation Services */ pci_pri_init(dev); /* Page Request Interface */ pci_pasid_init(dev); /* Process Address Space ID */ - pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */ + pci_acs_init(dev); /* Access Control Services */ pci_ptm_init(dev); /* Precision Time Measurement */ pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 812bfc32ecb8..a81315eeb092 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3549,7 +3549,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev) * The device will throw a Link Down error on AER-capable systems and * regardless of AER, config space of the device is never accessible again * and typically causes the system to hang or reset when access is attempted. - * http://www.spinics.net/lists/linux-pci/msg34797.html + * https://lore.kernel.org/r/20140923210318.498dacbd@dualc.maya.org/ */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); @@ -4378,9 +4378,9 @@ static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) * redirect (CR) since all transactions are redirected to the upstream * root complex. * - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086 - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102 - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402 + * https://lore.kernel.org/r/201207111426.q6BEQTbh002928@mail.maya.org/ + * https://lore.kernel.org/r/20120711165854.GM25282@amd.com/ + * https://lore.kernel.org/r/20121005130857.GX4009@amd.com/ * * 1002:4385 SBx00 SMBus Controller * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller @@ -4409,6 +4409,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); @@ -4620,11 +4622,11 @@ static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * - * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html - * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html - * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html - * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html - * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html + * [1] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html + * [2] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html + * [3] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html + * [4] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html + * [5] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ @@ -4653,7 +4655,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -4961,7 +4963,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -4988,7 +4990,7 @@ static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -5192,7 +5194,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5203,6 +5206,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ @@ -5355,7 +5360,7 @@ int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout) bool found; struct pci_dev *bridge = bus->self; - pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS); + pos = bridge->acs_cap; /* Disable ACS SV before initial config reads */ if (pos) { diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 9b94b1f16d80..4d870ed89385 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -152,7 +152,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) - panic("pdev_sort_resources(): kmalloc() failed!\n"); + panic("%s: kzalloc() failed!\n", __func__); tmp->res = r; tmp->dev = dev; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d21fa04fa44d..43eda101fcf4 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -73,7 +73,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) /* * Apparently some Matrox devices have ROM BARs that read * as zero when disabled, so don't update ROM BARs unless - * they're enabled. See https://lkml.org/lkml/2005/8/30/138. + * they're enabled. See + * https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/ */ if (!(res->flags & IORESOURCE_ROM_ENABLE)) return; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index cc386ef2fa12..3861505741e6 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -268,13 +268,16 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; + } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); @@ -293,7 +296,6 @@ out: mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 850cfeb74608..ba52459928f7 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -940,7 +940,7 @@ static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, size_t off; if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); off = event_regs[event_id].offset; @@ -948,10 +948,10 @@ static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) index = stdev->partition; else if (index < 0 || index >= stdev->partition_count) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); } else if (event_regs[event_id].map_reg == pff_ev_reg) { if (index < 0 || index >= stdev->pff_csr_count) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); } return event_regs[event_id].map_reg(stdev, off, index); @@ -1057,11 +1057,11 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, } static int ioctl_pff_to_port(struct switchtec_dev *stdev, - struct switchtec_ioctl_pff_port *up) + struct switchtec_ioctl_pff_port __user *up) { int i, part; u32 reg; - struct part_cfg_regs *pcfg; + struct part_cfg_regs __iomem *pcfg; struct switchtec_ioctl_pff_port p; if (copy_from_user(&p, up, sizeof(p))) @@ -1104,10 +1104,10 @@ static int ioctl_pff_to_port(struct switchtec_dev *stdev, } static int ioctl_port_to_pff(struct switchtec_dev *stdev, - struct switchtec_ioctl_pff_port *up) + struct switchtec_ioctl_pff_port __user *up) { struct switchtec_ioctl_pff_port p; - struct part_cfg_regs *pcfg; + struct part_cfg_regs __iomem *pcfg; if (copy_from_user(&p, up, sizeof(p))) return -EFAULT; @@ -1484,7 +1484,7 @@ static void init_pff(struct switchtec_dev *stdev) { int i; u32 reg; - struct part_cfg_regs *pcfg = stdev->mmio_part_cfg; + struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg; for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); |