summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/dwc/Kconfig78
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c19
-rw-r--r--drivers/pci/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c21
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c19
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c80
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c155
-rw-r--r--drivers/pci/dwc/pcie-designware.c22
-rw-r--r--drivers/pci/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/dwc/pcie-qcom.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c23
-rw-r--r--drivers/pci/host/Kconfig34
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c2
-rw-r--r--drivers/pci/host/pcie-rockchip.c6
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c45
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c84
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c84
-rw-r--r--drivers/pci/hotplug/pnv_php.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c8
-rw-r--r--drivers/pci/iov.c42
-rw-r--r--drivers/pci/of.c63
-rw-r--r--drivers/pci/pci-acpi.c55
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-pf-stub.c54
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c397
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c38
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/err.c388
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c57
-rw-r--r--drivers/pci/pcie/portdrv_core.c71
-rw-r--r--drivers/pci/probe.c96
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/setup-bus.c82
70 files changed, 2651 insertions, 1136 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 34b56a8f8480..cdef2a2a9bc5 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -71,6 +71,18 @@ config PCI_STUB
When in doubt, say N.
+config PCI_PF_STUB
+ tristate "PCI PF Stub driver"
+ depends on PCI
+ depends on PCI_IOV
+ help
+ Say Y or M here if you want to enable support for devices that
+ require SR-IOV support, while at the same time the PF itself is
+ not providing any actual services on the host itself such as
+ storage or networking.
+
+ When in doubt, say N.
+
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
depends on PCI && X86 && XEN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 952addc7bacf..84c9eef6b1c3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
+obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 89305b569d3d..4923a2a8e14b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -20,6 +20,9 @@ void pci_ats_init(struct pci_dev *dev)
{
int pos;
+ if (pci_ats_disabled())
+ return;
+
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
return;
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 2f3f5c50aa48..7445de139fb4 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
menu "DesignWare PCI Core Support"
+ depends on PCI
config PCIE_DW
bool
config PCIE_DW_HOST
bool
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
@@ -22,7 +22,7 @@ config PCI_DRA7XX
config PCI_DRA7XX_HOST
bool "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
@@ -51,38 +51,55 @@ config PCI_DRA7XX_EP
This uses the DesignWare core.
config PCIE_DW_PLAT
- bool "Platform bus based DesignWare PCIe Controller"
- depends on PCI
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW_HOST
- ---help---
- This selects the DesignWare PCIe controller support. Select this if
- you have a PCIe controller on Platform bus.
+ bool
- If you have a controller with this interface, say Y or M here.
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe Controller - Host mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_DW_PLAT
+ default y
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
- If unsure, say N.
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
- depends on PCI
- depends on SOC_EXYNOS5440
+ depends on SOC_EXYNOS5440 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
- depends on PCI
- depends on SOC_IMX6Q
+ depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
- depends on PCI
- depends on ARCH_SPEAR13XX
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
@@ -91,8 +108,7 @@ config PCIE_SPEAR13XX
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
- depends on PCI
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
@@ -104,8 +120,7 @@ config PCI_KEYSTONE
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
- depends on PCI
- depends on OF && (ARM || ARCH_LAYERSCAPE)
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
select PCIE_DW_HOST
@@ -113,9 +128,8 @@ config PCI_LAYERSCAPE
Say Y here if you want PCIe controller support on Layerscape SoCs.
config PCI_HISI
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
@@ -126,8 +140,7 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
- depends on PCI
- depends on ARCH_QCOM && OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
@@ -138,8 +151,7 @@ config PCIE_QCOM
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
- depends on PCI
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
@@ -154,8 +166,8 @@ config PCIE_ARTPEC6
config PCIE_ARTPEC6_HOST
bool "Axis ARTPEC-6 PCIe controller Host Mode"
- depends on MACH_ARTPEC6
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
select PCIE_ARTPEC6
@@ -165,7 +177,7 @@ config PCIE_ARTPEC6_HOST
config PCIE_ARTPEC6_EP
bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
- depends on MACH_ARTPEC6
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_ARTPEC6
@@ -174,10 +186,9 @@ config PCIE_ARTPEC6_EP
endpoint mode. This uses the DesignWare core.
config PCIE_KIRIN
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
depends on PCI_MSI_IRQ_DOMAIN
- depends on PCI
select PCIEPORTBUS
select PCIE_DW_HOST
help
@@ -186,8 +197,7 @@ config PCIE_KIRIN
config PCIE_HISI_STB
bool "HiSilicon STB SoCs PCIe controllers"
- depends on ARCH_HISI
- depends on PCI
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ed8558d638e5..f688204e50c5 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -27,6 +27,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../pci.h"
#include "pcie-designware.h"
/* PCIe controller wrapper DRA7XX configuration registers */
@@ -406,14 +407,14 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -459,9 +460,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
pp->ops = &dra7xx_pcie_host_ops;
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4818ef875f8a..80f604602783 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -338,7 +338,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP: /* FALLTHROUGH */
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d55ae0716adf..3722a5f31e5e 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -89,7 +89,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
+ dev_info(dev, "Link already up\n");
return 0;
}
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index b587352f8b9f..072fd7ecc29f 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -28,6 +28,7 @@
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
+ struct clk *clk_reg;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -229,26 +230,38 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pcie->clk_reg = devm_clk_get(dev, "reg");
+ if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+ if (!IS_ERR(pcie->clk_reg)) {
+ ret = clk_prepare_enable(pcie->clk_reg);
+ if (ret)
+ goto fail_clkreg;
+ }
+
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
- goto fail;
+ goto fail_clkreg;
}
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail;
+ goto fail_clkreg;
return 0;
+fail_clkreg:
+ clk_disable_unprepare(pcie->clk_reg);
fail:
- if (!IS_ERR(pcie->clk))
- clk_disable_unprepare(pcie->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index e66cede2b5b7..321b56cfd5d0 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -463,9 +463,9 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index f07678bf7cfc..1eec4415a77f 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -75,7 +75,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
if (free_win >= ep->num_ib_windows) {
- dev_err(pci->dev, "no free inbound window\n");
+ dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
@@ -100,7 +100,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
if (free_win >= ep->num_ob_windows) {
- dev_err(pci->dev, "no free outbound window\n");
+ dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
if (ret) {
- dev_err(pci->dev, "failed to enable address\n");
+ dev_err(pci->dev, "Failed to enable address\n");
return ret;
}
@@ -348,21 +348,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ib-windows* property\n");
+ dev_err(dev, "Unable to read *num-ib-windows* property\n");
return ret;
}
if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "invalid *num-ib-windows*\n");
+ dev_err(dev, "Invalid *num-ib-windows*\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ob-windows* property\n");
+ dev_err(dev, "Unable to read *num-ob-windows* property\n");
return ret;
}
if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "invalid *num-ob-windows*\n");
+ dev_err(dev, "Invalid *num-ob-windows*\n");
return -EINVAL;
}
@@ -389,7 +389,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
+ dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
@@ -411,6 +411,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
}
+ epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
+ EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+
ep->epc = epc;
epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 6c409079d514..cba1432e395d 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -15,6 +15,7 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
+#include "../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
@@ -83,18 +84,23 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- &val);
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &val);
if (!val)
continue;
ret = IRQ_HANDLED;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val, 32,
- pos)) != 32) {
- irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ while ((pos = find_next_bit((unsigned long *) &val,
+ MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
generic_handle_irq(irq);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
4, 1 << pos);
pos++;
}
@@ -157,9 +163,9 @@ static void dw_pci_bottom_mask(struct irq_data *data)
if (pp->ops->msi_clear_irq) {
pp->ops->msi_clear_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -180,9 +186,9 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
if (pp->ops->msi_set_irq) {
pp->ops->msi_set_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -248,8 +254,10 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
+
bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
+
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -266,7 +274,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
if (!pp->irq_domain) {
- dev_err(pci->dev, "failed to create IRQ domain\n");
+ dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -274,7 +282,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
&dw_pcie_msi_domain_info,
pp->irq_domain);
if (!pp->msi_domain) {
- dev_err(pci->dev, "failed to create MSI domain\n");
+ dev_err(pci->dev, "Failed to create MSI domain\n");
irq_domain_remove(pp->irq_domain);
return -ENOMEM;
}
@@ -301,13 +309,13 @@ void dw_pcie_msi_init(struct pcie_port *pp)
page = alloc_page(GFP_KERNEL);
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "failed to map MSI data\n");
+ dev_err(dev, "Failed to map MSI data\n");
__free_page(page);
return;
}
msi_target = (u64)pp->msi_data;
- /* program the msi_data */
+ /* Program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
lower_32_bits(msi_target));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
@@ -330,19 +338,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) / 2;
- pp->cfg1_size = resource_size(cfg_res) / 2;
+ pp->cfg0_size = resource_size(cfg_res) >> 1;
+ pp->cfg1_size = resource_size(cfg_res) >> 1;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
- dev_err(dev, "missing *config* reg space\n");
+ dev_err(dev, "Missing *config* reg space\n");
}
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return -ENOMEM;
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&bridge->windows, &pp->io_base);
if (ret)
return ret;
@@ -357,7 +365,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
case IORESOURCE_IO:
ret = pci_remap_iospace(win->res, pp->io_base);
if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
+ dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
resource_list_destroy_entry(win);
} else {
@@ -375,8 +383,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
break;
case 0:
pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) / 2;
- pp->cfg1_size = resource_size(pp->cfg) / 2;
+ pp->cfg0_size = resource_size(pp->cfg) >> 1;
+ pp->cfg1_size = resource_size(pp->cfg) >> 1;
pp->cfg0_base = pp->cfg->start;
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
break;
@@ -391,7 +399,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg->start,
resource_size(pp->cfg));
if (!pci->dbi_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -403,7 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
- dev_err(dev, "error with ioremap in function\n");
+ dev_err(dev, "Error with ioremap in function\n");
ret = -ENOMEM;
goto error;
}
@@ -414,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -586,7 +594,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
return 0;
}
- /* access only one slot on each root port */
+ /* Access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
@@ -650,13 +658,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
- &pp->irq_status[ctrl]);
- /* setup RC BARs */
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &pp->irq_status[ctrl]);
+
+ /* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
- /* setup interrupt pins */
+ /* Setup interrupt pins */
dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
@@ -664,13 +674,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
dw_pcie_dbi_ro_wr_dis(pci);
- /* setup bus numbers */
+ /* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
- /* setup command register */
+ /* Setup command register */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -683,7 +693,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* get iATU unroll support */
+ /* Get iATU unroll support */
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
@@ -701,7 +711,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Enable write permission for the DBI read-only register */
dw_pcie_dbi_ro_wr_en(pci);
- /* program correct class for RC */
+ /* Program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
/* Better disable write permission right after the update */
dw_pcie_dbi_ro_wr_dis(pci);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 5416aa8a07a5..5937fed4c938 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -12,19 +12,29 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
+#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
- struct dw_pcie *pci;
+ struct dw_pcie *pci;
+ struct regmap *regmap;
+ enum dw_pcie_device_mode mode;
};
+struct dw_plat_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -38,13 +48,63 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
return 0;
}
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+ pp->num_vectors = MAX_MSI_IRQS;
+}
+
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
.host_init = dw_plat_pcie_host_init,
+ .set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = dw_plat_pcie_establish_link,
};
-static int dw_plat_add_pcie_port(struct pcie_port *pp,
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dw_plat_pcie_ep_init,
+ .raise_irq = dw_plat_pcie_ep_raise_irq,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -63,15 +123,44 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ dev_err(dev, "Failed to initialize host\n");
return ret;
}
return 0;
}
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint\n");
+ return ret;
+ }
+ return 0;
+}
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
@@ -80,6 +169,16 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct resource *res; /* Resource from DT */
int ret;
+ const struct of_device_id *match;
+ const struct dw_plat_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ match = of_match_device(dw_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct dw_plat_pcie_of_data *)match->data;
+ mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
@@ -93,23 +192,59 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
+ dw_plat_pcie->mode = mode;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
platform_set_drvdata(pdev, dw_plat_pcie);
- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
- if (ret < 0)
- return ret;
+ switch (dw_plat_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ }
return 0;
}
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id dw_plat_pcie_of_match[] = {
- { .compatible = "snps,dw-pcie", },
+ {
+ .compatible = "snps,dw-pcie",
+ .data = &dw_plat_pcie_rc_of_data,
+ },
+ {
+ .compatible = "snps,dw-pcie-ep",
+ .data = &dw_plat_pcie_ep_of_data,
+ },
{},
};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 1b7282e5b494..778c4f76a884 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -69,7 +69,7 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_read(base + reg, size, &val);
if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
+ dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
@@ -86,7 +86,7 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_write(base + reg, size, val);
if (ret)
- dev_err(pci->dev, "write DBI address failed\n");
+ dev_err(pci->dev, "Write DBI address failed\n");
}
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -137,7 +137,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
@@ -180,7 +180,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -238,7 +238,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -284,7 +284,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -313,16 +313,16 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
int retries;
- /* check if the link is up or not */
+ /* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "link up\n");
+ dev_info(pci->dev, "Link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(pci->dev, "phy link never came up\n");
+ dev_err(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
@@ -351,7 +351,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
if (ret)
lanes = 0;
- /* set the number of lanes */
+ /* Set the number of lanes */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (lanes) {
@@ -373,7 +373,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- /* set link width speed control register */
+ /* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (lanes) {
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index fe811dbc12cf..bee4e2535a61 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -110,6 +110,7 @@
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE 12
#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5897af7d3355..a1d0198081a6 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -19,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
@@ -869,7 +870,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= !BIT(0);
+ val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
/* change DBI base address */
@@ -1088,6 +1089,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
+ pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1124,6 +1126,7 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
+ pm_runtime_put(pci->dev);
return ret;
}
@@ -1212,6 +1215,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ pm_runtime_enable(dev);
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1257,14 +1261,17 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
ret = phy_init(pcie->phy);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
return ret;
+ }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 7cef85124325..63ed706445b9 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -87,7 +87,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate source address\n");
+ dev_err(dev, "Failed to allocate source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -96,14 +96,14 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map source address\n");
+ dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_src_addr;
}
dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err_src_map_addr;
@@ -112,7 +112,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map destination address\n");
+ dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_dst_addr;
}
@@ -149,7 +149,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -158,7 +158,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_addr;
}
@@ -201,7 +201,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -210,7 +210,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_addr;
}
@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
* wait 1ms inorder for the write to complete. Without this delay L3
* error in observed in the host system.
*/
- mdelay(1);
+ usleep_range(1000, 2000);
kfree(buf);
@@ -379,7 +379,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
- dev_err(dev, "failed to set BAR%d\n", bar);
+ dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
}
@@ -406,7 +406,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
test_reg_bar);
if (!base) {
- dev_err(dev, "failed to allocated register space\n");
+ dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
}
epf_test->reg[test_reg_bar] = base;
@@ -416,7 +416,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar);
if (!base)
- dev_err(dev, "failed to allocate space for BAR%d\n",
+ dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
}
@@ -435,9 +435,16 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
+ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
+ epf_test->linkup_notifier = false;
+ else
+ epf_test->linkup_notifier = true;
+
+ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
+
ret = pci_epc_write_header(epc, epf->func_no, header);
if (ret) {
- dev_err(dev, "configuration header write failed\n");
+ dev_err(dev, "Configuration header write failed\n");
return ret;
}
@@ -519,7 +526,7 @@ static int __init pci_epf_test_init(void)
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&test_driver);
if (ret) {
- pr_err("failed to register pci epf test driver --> %d\n", ret);
+ pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 465b5f058b6d..523a8cab3bfb 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -15,6 +15,8 @@
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
+static DEFINE_MUTEX(pci_epf_mutex);
+
static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
@@ -143,7 +145,13 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/
void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{
- pci_ep_cfs_remove_epf_group(driver->group);
+ struct config_group *group;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry(group, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
@@ -159,6 +167,8 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner)
{
int ret;
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
if (!driver->ops)
return -EINVAL;
@@ -173,7 +183,16 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret)
return ret;
- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
return 0;
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436c..746eaf8a6e8f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
depends on ARM
depends on OF
config PCI_AARDVARK
bool "Aardvark PCIe controller"
- depends on ARCH_MVEBU && ARM64
+ depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
- depends on ARCH_ZYNQMP
+ depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
config PCI_FTPCI100
bool "Faraday Technology FTPCI100 PCI controller"
depends on OF
- depends on ARM
default ARCH_GEMINI
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
- depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,23 +65,24 @@ config PCI_HOST_COMMON
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on (ARM || ARM64) && OF
+ depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
+ select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
+ depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCIEPORTBUS
help
@@ -101,7 +102,7 @@ config PCI_XGENE_MSI
config PCI_V3_SEMI
bool "V3 Semiconductor PCI controller"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
default ARCH_INTEGRATOR_AP
config PCI_VERSATILE
@@ -147,8 +148,7 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
- depends on ARM || NIOS2
- depends on OF_PCI
+ depends on ARM || NIOS2 || COMPILE_TEST
select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +164,7 @@ config PCIE_ALTERA_MSI
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
@@ -172,7 +172,7 @@ config PCI_HOST_THUNDER_PEM
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
@@ -191,9 +191,9 @@ config PCIE_ROCKCHIP
config PCIE_MEDIATEK
bool "MediaTek PCIe controller"
- depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
- depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
help
Say Y here if you want to enable PCIe controller support on
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..bac37d22b2b6 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include "../pci.h"
+
/* PCIe core registers */
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
@@ -815,14 +817,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a..a1ebe9ed441f 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
+#include "../pci.h"
+
/*
* Special configuration registers directly in the first few words
* in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcd..d8f10451f273 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
return ret;
}
+ platform_set_drvdata(pdev, bridge->bus);
+ return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
return 0;
}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484..dea3ec7592a2 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
.suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
+ .remove = pci_host_common_remove,
};
builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3..6cc5036ac83c 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
struct hv_pcibus_device {
struct pci_sysdata sysdata;
enum hv_pcibus_state state;
- atomic_t remove_lock;
+ refcount_t remove_lock;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
hv_pcichild_maximum
};
-enum hv_pcidev_ref_reason {
- hv_pcidev_ref_invalid = 0,
- hv_pcidev_ref_initial,
- hv_pcidev_ref_by_slot,
- hv_pcidev_ref_packet,
- hv_pcidev_ref_pnp,
- hv_pcidev_ref_childlist,
- hv_pcidev_irqdata,
- hv_pcidev_ref_max
-};
-
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
u32 wslot);
-static void get_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
-static void put_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+ refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+ if (refcount_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+ struct completion *comp)
+{
+ while (true) {
+ if (hdev->channel->rescind) {
+ dev_warn_once(&hdev->device, "The device is gone.\n");
+ return -ENODEV;
+ }
+
+ if (wait_for_completion_timeout(comp, HZ / 10))
+ break;
+ }
+
+ return 0;
+}
+
/**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_read_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_write_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
}
hv_int_desc_free(hpdev, int_desc);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = comp.int_desc.address & 0xffffffff;
msg->data = comp.int_desc.data;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return;
free_int_desc:
kfree(int_desc);
drop_reference:
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return_null_message:
msg->address_hi = 0;
msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
*/
static void survey_child_resources(struct hv_pcibus_device *hbus)
{
- struct list_head *iter;
struct hv_pci_dev *hpdev;
resource_size_t bar_size = 0;
unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* for a child device are a power of 2 in size and aligned in memory,
* so it's sufficient to just add them up without tracking alignment.
*/
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
resource_size_t low_base = 0;
resource_size_t bar_size;
struct hv_pci_dev *hpdev;
- struct list_head *iter;
unsigned long flags;
u64 bar_val;
u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
/* Pick addresses for the BARs. */
do {
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
complete(&completion->host_event);
}
-static void get_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- refcount_inc(&hpdev->refs);
-}
-
-static void put_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- if (refcount_dec_and_test(&hpdev->refs))
- kfree(hpdev);
-}
-
/**
* new_pcichild_device() - Create a new child device
* @hbus: The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
if (ret)
goto error;
- wait_for_completion(&comp_pkt.host_event);
+ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+ goto error;
hpdev->desc = *desc;
refcount_set(&hpdev->refs, 1);
- get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ get_pcichild(hpdev);
spin_lock_irqsave(&hbus->device_list_lock, flags);
- /*
- * When a device is being added to the bus, we set the PCI domain
- * number to be the device serial number, which is non-zero and
- * unique on the same VM. The serial numbers start with 1, and
- * increase by 1 for each device. So device names including this
- * can have shorter names than based on the bus instance UUID.
- * Only the first device serial number is used for domain, so the
- * domain number will not change after the first device is added.
- */
- if (list_empty(&hbus->children))
- hbus->sysdata.domain = desc->ser;
list_add_tail(&hpdev->list_entry, &hbus->children);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
list_for_each_entry(iter, &hbus->children, list_entry) {
if (iter->desc.win_slot.slot == wslot) {
hpdev = iter;
- get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ get_pcichild(hpdev);
break;
}
}
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct list_head *iter;
struct pci_function_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- hpdev->reported_missing = true;
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ hpdev->reported_missing = true;
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
new_desc = &dr->func[child_no];
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- if ((hpdev->desc.win_slot.slot ==
- new_desc->win_slot.slot) &&
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
(hpdev->desc.v_id == new_desc->v_id) &&
(hpdev->desc.d_id == new_desc->d_id) &&
(hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
spin_lock_irqsave(&hbus->device_list_lock, flags);
do {
found = false;
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
if (hpdev->reported_missing) {
found = true;
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev);
list_move_tail(&hpdev->list_entry, &removed);
break;
}
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
- put_pcichild(hpdev, hv_pcidev_ref_initial);
+ put_pcichild(hpdev);
}
switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
- put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
}
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
hpdev->state = hv_pcichild_ejecting;
- get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
get_hvpcibus(hpdev->hbus);
queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
dev_message->wslot.slot);
if (hpdev) {
hv_pci_eject_device(hpdev);
- put_pcichild(hpdev,
- hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
break;
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret) {
dev_err(&hdev->device,
- "PCI Pass-through VSP failed sending version reqquest: %#x",
+ "PCI Pass-through VSP failed to request version: %d",
ret);
goto exit;
}
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status >= 0) {
pci_protocol_version = pci_protocol_versions[i];
dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret)
goto exit;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
- if (ret)
- return ret;
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
- wait_for_completion(&comp);
- return 0;
+ return ret;
}
/**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt->message,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
if (ret)
break;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
ret = -EPROTO;
dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
static void get_hvpcibus(struct hv_pcibus_device *hbus)
{
- atomic_inc(&hbus->remove_lock);
+ refcount_inc(&hbus->remove_lock);
}
static void put_hvpcibus(struct hv_pcibus_device *hbus)
{
- if (atomic_dec_and_test(&hbus->remove_lock))
+ if (refcount_dec_and_test(&hbus->remove_lock))
complete(&hbus->remove_event);
}
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hdev->dev_instance.b[8] << 8;
hbus->hdev = hdev;
- atomic_inc(&hbus->remove_lock);
+ refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d81..23e270839e6a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include "../pci.h"
+
/*
* PCIe unit register offsets.
*/
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c5..326171cb1a97 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
#include <linux/sizes.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* AHB-PCI Bridge PCI communication registers */
#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846c..f4f53d092e00 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
+#include "../pci.h"
+
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea796663..68b8bfbdb867 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/clk.h>
+#include "../pci.h"
+
#define V3_PCI_VENDOR 0x00000000
#define V3_PCI_DEVICE 0x00000002
#define V3_PCI_CMD 0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312b..994f32061b32 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
static void __iomem *versatile_pci_base;
static void __iomem *versatile_cfg_base[2];
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *res)
{
int err, mem = 1, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c9..d854d67e873c 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
#define IBAR2 0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256d..7d05e51205b3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define RP_TX_REG0 0x2000
#define RP_TX_REG1 0x2004
#define RP_TX_CNTRL 0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- NULL);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, NULL);
if (err)
return err;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693c..f30f5f3fb5c1 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include "../pci.h"
#include "pcie-iproc.h"
static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
- &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+ &iobase);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a9..0baabe30858f 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -22,6 +24,8 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include "../pci.h"
+
/* PCIe shared registers */
#define PCIE_SYS_CFG 0x00
#define PCIE_INT_ENABLE 0x0c
@@ -66,6 +70,10 @@
/* PCIe V2 per-port registers */
#define PCIE_MSI_VECTOR 0x0c0
+
+#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_CLASS_ID 0x106
+
#define PCIE_INT_MASK 0x420
#define INTX_MASK GENMASK(19, 16)
#define INTX_SHIFT 16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
/**
* struct mtk_pcie_soc - differentiate between host generations
- * @has_msi: whether this host supports MSI interrupts or not
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
*/
struct mtk_pcie_soc {
- bool has_msi;
+ bool need_fix_class_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
* @lane: lane count
* @slot: port slot
* @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
u32 lane;
u32 slot;
struct irq_domain *irq_domain;
+ struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
+ struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
size_t size;
int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
PCIE_MAC_SRSTB | PCIE_CRSTB;
writel(val, port->base + PCIE_RST_CTRL);
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_HOST;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
/* 100ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
!!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
return 0;
}
-static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- int msi;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr;
- msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
- if (msi < MTK_MSI_IRQS_NUM)
- set_bit(msi, port->msi_irq_in_use);
- else
- return -ENOSPC;
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg->address_hi = 0;
+ msg->address_lo = lower_32_bits(addr);
+
+ msg->data = data->hwirq;
- return msi;
+ dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- clear_bit(hwirq, port->msi_irq_in_use);
+ return -EINVAL;
}
-static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void mtk_msi_ack_irq(struct irq_data *data)
{
- struct mtk_pcie_port *port;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- phys_addr_t msg_addr;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ u32 hwirq = data->hwirq;
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return -EINVAL;
+ writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
- hwirq = mtk_pcie_msi_alloc(port);
- if (hwirq < 0)
- return hwirq;
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .name = "MTK MSI",
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_msi_set_affinity,
+ .irq_ack = mtk_msi_ack_irq,
+};
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq) {
- mtk_pcie_msi_free(port, hwirq);
- return -EINVAL;
- }
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
- chip->dev = &pdev->dev;
+ bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (bit >= MTK_MSI_IRQS_NUM) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
- irq_set_msi_desc(irq, desc);
+ __set_bit(bit, port->msi_irq_in_use);
- /* MT2712/MT7622 only support 32-bit MSI addresses */
- msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
- msg.address_hi = 0;
- msg.address_lo = lower_32_bits(msg_addr);
- msg.data = hwirq;
+ mutex_unlock(&port->lock);
- pci_write_msi_msg(irq, &msg);
+ irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq,
+ NULL, NULL);
return 0;
}
-static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- struct pci_dev *pdev = to_pci_dev(chip->dev);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct mtk_pcie_port *port;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return;
+ mutex_lock(&port->lock);
- irq_dispose_mapping(irq);
- mtk_pcie_msi_free(port, hwirq);
+ if (!test_bit(d->hwirq, port->msi_irq_in_use))
+ dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static struct msi_controller mtk_pcie_msi_chip = {
- .setup_irq = mtk_pcie_msi_setup_irq,
- .teardown_irq = mtk_msi_teardown_irq,
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mtk_pcie_irq_domain_alloc,
+ .free = mtk_pcie_irq_domain_free,
};
static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+ .name = "MTK PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+ mutex_init(&port->lock);
+
+ port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops, port);
+ if (!port->inner_domain) {
+ dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+ port->inner_domain);
+ if (!port->msi_domain) {
+ dev_err(port->pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(port->inner_domain);
+ return -ENOMEM;
+ }
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = mtk_pcie_msi_map,
-};
-
static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
{
u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
{
struct device *dev = port->pcie->dev;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
- &msi_domain_ops,
- &mtk_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "failed to create MSI IRQ domain\n");
- return -ENODEV;
- }
+ ret = mtk_pcie_allocate_msi_domains(port);
+ if (ret)
+ return ret;
+
mtk_pcie_enable_msi(port);
}
return 0;
}
-static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
u32 virq;
u32 bit = INTX_SHIFT;
- while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+ chained_irq_enter(irqchip, desc);
+
+ status = readl(port->base + PCIE_INT_STATUS);
+ if (status & INTX_MASK) {
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+ if (status & MSI_STATUS){
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- /* Clear the MSI */
- writel(1 << bit, port->base + PCIE_IMSI_STATUS);
- virq = irq_find_mapping(port->msi_domain, bit);
+ virq = irq_find_mapping(port->inner_domain, bit);
generic_handle_irq(virq);
}
}
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
}
- return IRQ_HANDLED;
+ chained_irq_exit(irqchip, desc);
+
+ return;
}
static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct platform_device *pdev = to_platform_device(dev);
int err, irq;
- irq = platform_get_irq(pdev, port->slot);
- err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
- IRQF_SHARED, "mtk-pcie", port);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", irq);
- return err;
- }
-
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
dev_err(dev, "failed to init PCIe IRQ domain\n");
return err;
}
+ irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+
return 0;
}
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
- host->msi = &mtk_pcie_msi_chip;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.startup = mtk_pcie_startup_port,
};
-static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
- .has_msi = true,
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+ .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
- { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
- { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
{},
};
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 000000000000..4d6c20e47bed
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_EN 0xc00
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_INTX_MASK 0x01e0
+#define PAB_INTP_MSI_MASK 0x8
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ struct list_head resources;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ struct irq_domain *intx_domain;
+ raw_spinlock_t intx_mask_lock;
+ int irq;
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ struct resource *ob_io_res;
+ char root_bus_nr;
+ struct mobiveil_msi msi;
+};
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+}
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->csr_axi_slave_base + reg);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ return (csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ /* Only one device down on each root port */
+ if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+ if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ if (!mobiveil_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (bus->number == pcie->root_bus_nr) {
+ /* RC config access */
+ return pcie->csr_axi_slave_base + where;
+ }
+
+ /*
+ * EP config access (in Config/APIO space)
+ * Program PEX Address base (31..16 bits) with appropriate value
+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+ * Relies on pci_lock serialization
+ */
+ csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
+ PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+ .map_bus = mobiveil_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct device *dev = &pcie->pdev->dev;
+ struct mobiveil_msi *msi = &pcie->msi;
+ u32 msi_data, msi_addr_lo, msi_addr_hi;
+ u32 intr_status, msi_status;
+ unsigned long shifted_status;
+ u32 bit, virq, val, mask;
+
+ /*
+ * The core provides a single interrupt for both INTx/MSI messages.
+ * So we'll read both INTx and MSI status
+ */
+
+ chained_irq_enter(chip, desc);
+
+ /* read INTx status */
+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ intr_status = val & mask;
+
+ /* Handle INTx */
+ if (intr_status & PAB_INTP_INTX_MASK) {
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
+ PAB_INTX_START;
+ do {
+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(pcie->intx_domain,
+ bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev,
+ "unexpected IRQ, INT%d\n", bit);
+
+ /* clear interrupt */
+ csr_writel(pcie,
+ shifted_status << PAB_INTX_START,
+ PAB_INTP_AMBA_MISC_STAT);
+ }
+ } while ((shifted_status >> PAB_INTX_START) != 0);
+ }
+
+ /* read extra MSI status register */
+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+ /* handle MSI interrupts */
+ while (msi_status & 1) {
+ msi_data = readl_relaxed(pcie->apb_csr_base
+ + MSI_DATA_OFFSET);
+
+ /*
+ * MSI_STATUS_OFFSET register gets updated to zero
+ * once we pop not only the MSI data but also address
+ * from MSI hardware FIFO. So keeping these following
+ * two dummy reads.
+ */
+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_L_OFFSET);
+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_H_OFFSET);
+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+ msi_data, msi_addr_hi, msi_addr_lo);
+
+ virq = irq_find_mapping(msi->dev_domain, msi_data);
+ if (virq)
+ generic_handle_irq(virq);
+
+ msi_status = readl_relaxed(pcie->apb_csr_base +
+ MSI_STATUS_OFFSET);
+ }
+
+ /* Clear the interrupt status */
+ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct platform_device *pdev = pcie->pdev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ const char *type;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ /* map config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "config_axi_slave");
+ pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->config_axi_slave_base))
+ return PTR_ERR(pcie->config_axi_slave_base);
+ pcie->ob_io_res = res;
+
+ /* map csr resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "csr_axi_slave");
+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->csr_axi_slave_base))
+ return PTR_ERR(pcie->csr_axi_slave_base);
+ pcie->pcie_reg_base = res->start;
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* read the number of windows requested */
+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+ pcie->apio_wins = MAX_PIO_WINDOWS;
+
+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+ pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+ return 0;
+}
+
+/*
+ * select_paged_register - routine to access paged register of root complex
+ *
+ * registers of RC are paged, for this scheme to work
+ * extracted higher 6 bits of the offset will be written to pg_sel
+ * field of PAB_CTRL register and rest of the lower 10 bits enabled with
+ * PAGE_SEL_EN are used as offset of the register.
+ */
+static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ int pab_ctrl_dw, pg_sel;
+
+ /* clear pg_sel field */
+ pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
+
+ /* set pg_sel field */
+ pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
+ pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
+ csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
+}
+
+static void write_paged_register(struct mobiveil_pcie *pcie,
+ u32 val, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ csr_writel(pcie, val, off);
+}
+
+static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ return csr_readl(pcie, off);
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ int pci_addr, u32 type, u64 size)
+{
+ int pio_ctrl_val;
+ int amap_ctrl_dw;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ csr_writel(pcie,
+ pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
+ amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
+ amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+
+ write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
+ PAB_PEX_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+{
+
+ u32 value, type;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ type = config_io_bit;
+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+
+ value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+ phys_addr_t msg_addr = pcie->pcie_reg_base;
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+ writel_relaxed(lower_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ writel_relaxed(upper_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+ u32 value, pab_ctrl, type = 0;
+ int err;
+ struct resource_entry *win, *tmp;
+
+ err = mobiveil_bringup_link(pcie);
+ if (err) {
+ dev_info(&pcie->pdev->dev, "link bring-up failed\n");
+ return err;
+ }
+
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+ */
+ value = csr_readl(pcie, PCI_COMMAND);
+ csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER, PCI_COMMAND);
+
+ /*
+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+ * register
+ */
+ pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
+ (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+
+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+ /*
+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+ * PAB_AXI_PIO_CTRL Register
+ */
+ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+
+ /*
+ * we'll program one outbound window for config reads and
+ * another default inbound window for all the upstream traffic
+ * rest of the outbound windows will be configured according to
+ * the "ranges" field defined in device tree
+ */
+
+ /* config outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
+ resource_size(pcie->ob_io_res));
+
+ /* memory inbound translation window */
+ program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+ type = 0;
+ if (resource_type(win->res) == IORESOURCE_MEM)
+ type = MEM_WINDOW_TYPE;
+ if (resource_type(win->res) == IORESOURCE_IO)
+ type = IO_WINDOW_TYPE;
+ if (type) {
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start, 0, type,
+ resource_size(win->res));
+ }
+ }
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ return err;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 mask, shifted_val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 shifted_val, mask;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mobiveil_pcie:intx",
+ .irq_enable = mobiveil_unmask_intx_irq,
+ .irq_disable = mobiveil_mask_intx_irq,
+ .irq_mask = mobiveil_mask_intx_irq,
+ .irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+ .name = "Mobiveil PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+ .name = "Mobiveil MSI",
+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
+ .irq_set_affinity = mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs, void *args)
+{
+ struct mobiveil_pcie *pcie = domain->host_data;
+ struct mobiveil_msi *msi = &pcie->msi;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+ domain->host_data, handle_level_irq,
+ NULL, NULL);
+ return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mobiveil_irq_msi_domain_alloc,
+ .free = mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_init(&pcie->msi.lock);
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &mobiveil_msi_domain_info, msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ /* setup INTx */
+ pcie->intx_domain = irq_domain_add_linear(node,
+ PCI_NUM_INTX, &intx_domain_ops, pcie);
+
+ if (!pcie->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ raw_spin_lock_init(&pcie->intx_mask_lock);
+
+ /* setup MSI */
+ ret = mobiveil_allocate_msi_domains(pcie);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ resource_size_t iobase;
+ int ret;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = mobiveil_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ /* parse the host bridge base addresses from the device tree file */
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
+ if (ret) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * configure all inbound and outbound windows and prepare the RC for
+ * config access
+ */
+ ret = mobiveil_host_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ goto error;
+ }
+
+ /* fixup for PCIe class register */
+ csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ goto error;
+ }
+
+ ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (ret)
+ goto error;
+
+ /* Initialize bridge */
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &mobiveil_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ /* setup the kernel resources for the newly added PCIe root bus */
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+
+ return 0;
+error:
+ pci_free_resource_list(&pcie->resources);
+ return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 636c3c5095d2..874d75c9ee4a 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -30,6 +30,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE BIT(31)
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb..cd85de5fa928 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -36,6 +36,8 @@
#include <linux/reset.h>
#include <linux/regmap.h>
+#include "../pci.h"
+
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
@@ -1560,8 +1562,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err < 0)
goto err_deinit_port;
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
if (err)
goto err_remove_irq_domain;
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711..6a4bbb5b3de0 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
+#include "../pci.h"
+
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
#define BRCFG_INTERRUPT 0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc09..b110a3a814e3 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
/* Register definitions */
#define XILINX_PCIE_REG_BIR 0x00000130
#define XILINX_PCIE_REG_IDR 0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index a8f21d051e0c..e9f78eb390d2 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -104,14 +104,11 @@ config HOTPLUG_PCI_CPCI_GENERIC
When in doubt, say N.
config HOTPLUG_PCI_SHPC
- tristate "SHPC PCI Hotplug driver"
+ bool "SHPC PCI Hotplug driver"
help
Say Y here if you have a motherboard with a SHPC PCI Hotplug
controller.
- To compile this driver as a module, choose M here: the
- module will be called shpchp.
-
When in doubt, say N.
config HOTPLUG_PCI_POWERNV
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c9816166978e..3979f89b250a 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -63,22 +63,17 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
/**
* acpi_get_hp_hw_control_from_firmware
* @dev: the pci_dev of the bridge that has a hotplug controller
- * @flags: requested control bits for _OSC
*
* Attempt to take hotplug control from firmware.
*/
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
{
+ const struct pci_host_bridge *host;
+ const struct acpi_pci_root *root;
acpi_status status;
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (!flags) {
- err("Invalid flags %u specified!\n", flags);
- return -EINVAL;
- }
-
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
@@ -88,25 +83,20 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
* OSHP within the scope of the hotplug controller and its parents,
* up to the host bridge under which this controller exists.
*/
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, &flags, flags);
- if (ACPI_SUCCESS(status))
- goto got_one;
- if (status == AE_SUPPORT)
- goto no_control;
- kfree(string.pointer);
- string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
- }
+ if (shpchp_is_native(pdev))
+ return 0;
+
+ /* If _OSC exists, we should not evaluate OSHP */
+ host = pci_find_host_bridge(pdev->bus);
+ root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+ if (root->osc_support_set)
+ goto no_control;
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
- * space at all. Try to get acpi handle of parent pci bus.
+ * space at all. Try to get ACPI handle of parent PCI bus.
*/
struct pci_bus *pbus;
for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
@@ -118,8 +108,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
while (handle) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
+ pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n",
+ (char *)string.pointer);
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
@@ -131,13 +121,12 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
break;
}
no_control:
- dbg("Cannot get control of hotplug hardware for pci %s\n",
- pci_name(pdev));
+ pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;
got_one:
- dbg("Gained control for hotplug HW for pci %s (%s)\n",
- pci_name(pdev), (char *)string.pointer);
+ pci_info(pdev, "Gained control of SHPC hotplug (%s)\n",
+ (char *)string.pointer);
kfree(string.pointer);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b45b375c0e6c..3a17b290df5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -287,11 +287,12 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
/*
* Expose slots to user space for functions that have _EJ0 or _RMV or
* are located in dock stations. Do not expose them for devices handled
- * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
- * expose slots to user space in those cases.
+ * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug
+ * (SHPCHP), because that code is supposed to expose slots to user
+ * space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
+ && !(pdev && hotplug_is_native(pdev))) {
unsigned long long sun;
int retval;
@@ -430,6 +431,29 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
}
+static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_dev *dev;
+ int max;
+
+ if (!bus)
+ return;
+
+ max = bus->busn_res.start;
+ /* Scan already configured non-hotplug bridges */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 0);
+ }
+
+ /* Scan non-hotplug bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 1);
+ }
+}
+
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
@@ -442,25 +466,42 @@ static void enable_slot(struct acpiphp_slot *slot)
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- int max, pass;
- LIST_HEAD(add_list);
- acpiphp_rescan_slot(slot);
- max = acpiphp_max_busnr(bus);
- for (pass = 0; pass < 2; pass++) {
+ if (bus->self && hotplug_is_native(bus->self)) {
+ /*
+ * If native hotplug is used, it will take care of hotplug
+ * slot management and resource allocation for hotplug
+ * bridges. However, ACPI hotplug may still be used for
+ * non-hotplug bridges to bring in additional devices such
+ * as a Thunderbolt host controller.
+ */
for_each_pci_bridge(dev, bus) {
- if (PCI_SLOT(dev->devfn) != slot->device)
- continue;
-
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate, &add_list);
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ acpiphp_native_scan_bridge(dev);
+ }
+ pci_assign_unassigned_bridge_resources(bus->self);
+ } else {
+ LIST_HEAD(add_list);
+ int max, pass;
+
+ acpiphp_rescan_slot(slot);
+ max = acpiphp_max_busnr(bus);
+ for (pass = 0; pass < 2; pass++) {
+ for_each_pci_bridge(dev, bus) {
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ }
}
}
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
- __pci_bus_assign_resources(bus, &add_list, NULL);
acpiphp_sanitize_bus(bus);
pcie_bus_configure_settings(bus);
@@ -481,7 +522,7 @@ static void enable_slot(struct acpiphp_slot *slot)
if (!dev) {
/* Do not set SLOT_ENABLED flag if some funcs
are not added. */
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
continue;
}
}
@@ -510,7 +551,7 @@ static void disable_slot(struct acpiphp_slot *slot)
list_for_each_entry(func, &slot->funcs, sibling)
acpi_bus_trim(func_to_acpi_device(func));
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
}
static bool slot_no_hotplug(struct acpiphp_slot *slot)
@@ -608,6 +649,11 @@ static void trim_stale_devices(struct pci_dev *dev)
alive = pci_device_is_present(dev);
if (!alive) {
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate, pci_dev_set_disconnected,
+ NULL);
+
pci_stop_and_remove_bus_device(dev);
if (adev)
acpi_bus_trim(adev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index b81ca3fa0e84..1869b0411ce0 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -379,7 +379,7 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_max_bus_speed(struct slot *slot)
{
- int rc;
+ int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88e917c9120f..5f892065585e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-void pcie_enable_notification(struct controller *ctrl);
+void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 332b723ff9e6..44a6a63802d5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
- pcie_enable_notification(ctrl);
+ pcie_reenable_notification(ctrl);
slot = ctrl->slot;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 18a42f8f5dc5..718b6073afad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -10,7 +10,6 @@
* All rights reserved.
*
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
- *
*/
#include <linux/kernel.h>
@@ -147,25 +146,22 @@ static void pcie_wait_cmd(struct controller *ctrl)
else
rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
- /*
- * Controllers with errata like Intel CF118 don't generate
- * completion notifications unless the power/indicator/interlock
- * control bits are changed. On such controllers, we'll emit this
- * timeout message when we wait for completion of commands that
- * don't change those bits, e.g., commands that merely enable
- * interrupts.
- */
if (!rc)
ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
ctrl->slot_ctrl,
jiffies_to_msecs(jiffies - ctrl->cmd_started));
}
+#define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
+ PCI_EXP_SLTCTL_PIC | \
+ PCI_EXP_SLTCTL_AIC | \
+ PCI_EXP_SLTCTL_EIC)
+
static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
u16 mask, bool wait)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 slot_ctrl;
+ u16 slot_ctrl_orig, slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
@@ -180,6 +176,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
goto out;
}
+ slot_ctrl_orig = slot_ctrl;
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
@@ -189,6 +186,17 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
ctrl->slot_ctrl = slot_ctrl;
/*
+ * Controllers with the Intel CF118 and similar errata advertise
+ * Command Completed support, but they only set Command Completed
+ * if we change the "Control" bits for power, power indicator,
+ * attention indicator, or interlock. If we only change the
+ * "Enable" bits, they never set the Command Completed bit.
+ */
+ if (pdev->broken_cmd_compl &&
+ (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
+ ctrl->cmd_busy = 0;
+
+ /*
* Optionally wait for the hardware to be ready for a new command,
* indicating completion of the above issued command.
*/
@@ -231,25 +239,11 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void __pcie_wait_link_active(struct controller *ctrl, bool active)
-{
- int timeout = 1000;
-
- if (pciehp_check_link_active(ctrl) == active)
- return;
- while (timeout > 0) {
- msleep(10);
- timeout -= 10;
- if (pciehp_check_link_active(ctrl) == active)
- return;
- }
- ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-}
-
static void pcie_wait_link_active(struct controller *ctrl)
{
- __pcie_wait_link_active(ctrl, true);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+
+ pcie_wait_for_link(pdev, true);
}
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
@@ -659,7 +653,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return handled;
}
-void pcie_enable_notification(struct controller *ctrl)
+static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -697,6 +691,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
+void pcie_reenable_notification(struct controller *ctrl)
+{
+ /*
+ * Clear both Presence and Data Link Layer Changed to make sure
+ * those events still fire after we have re-enabled them.
+ */
+ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_enable_notification(ctrl);
+}
+
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -861,7 +866,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -872,7 +877,8 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
- FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
+ pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
if (pcie_init_slot(ctrl))
goto abort_ctrl;
@@ -891,3 +897,21 @@ void pciehp_release_ctrl(struct controller *ctrl)
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
+
+static void quirk_cmd_compl(struct pci_dev *pdev)
+{
+ u32 slot_cap;
+
+ if (pci_is_pcie(pdev)) {
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+ if (slot_cap & PCI_EXP_SLTCAP_HPC &&
+ !(slot_cap & PCI_EXP_SLTCAP_NCCS))
+ pdev->broken_cmd_compl = 1;
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d44100687dfe..6c2e8d7307c6 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -220,12 +220,16 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
ret = pnv_php_populate_changeset(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index c55730b61c9a..516e4835019c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -105,7 +105,6 @@ struct controller {
};
/* Define AMD SHPC ID */
-#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
/* AMD PCI-X bridge registers */
@@ -173,17 +172,6 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-#ifdef CONFIG_ACPI
-#include <linux/pci-acpi.h>
-static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- return acpi_get_hp_hw_control_from_firmware(dev, flags);
-}
-#else
-#define get_hp_hw_control_from_firmware(dev) (0)
-#endif
-
struct ctrl_reg {
volatile u32 base_offset;
volatile u32 slot_avail1;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 1f0f96908b5a..e91be287f292 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -270,24 +270,12 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int is_shpc_capable(struct pci_dev *dev)
-{
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
- return 1;
- if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
- return 0;
- if (get_hp_hw_control_from_firmware(dev))
- return 0;
- return 1;
-}
-
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
- if (!is_shpc_capable(pdev))
+ if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index bedda5bda910..1047b56e5730 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -585,13 +585,13 @@ static int shpchp_enable_slot (struct slot *p_slot)
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
- (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
+ if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
+ p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
&& p_slot->ctrl->num_slots == 1) {
- /* handle amd pogo errata; this must be done before enable */
+ /* handle AMD POGO errata; this must be done before enable */
amd_pogo_errata_save_misc_reg(p_slot);
retval = board_added(p_slot);
- /* handle amd pogo errata; this must be done after enable */
+ /* handle AMD POGO errata; this must be done after enable */
amd_pogo_errata_restore_misc_reg(p_slot);
} else
retval = board_added(p_slot);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 8adf4a64f291..d0d73dbbd5ca 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -469,6 +469,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ iov->driver_max_VFs = total;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
@@ -827,9 +828,42 @@ int pci_sriov_get_totalvfs(struct pci_dev *dev)
if (!dev->is_physfn)
return 0;
- if (dev->sriov->driver_max_VFs)
- return dev->sriov->driver_max_VFs;
-
- return dev->sriov->total_VFs;
+ return dev->sriov->driver_max_VFs;
}
EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
+
+/**
+ * pci_sriov_configure_simple - helper to configure SR-IOV
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable, 0 to disable
+ *
+ * Enable or disable SR-IOV for devices that don't require any PF setup
+ * before enabling SR-IOV. Return value is negative on error, or number of
+ * VFs allocated on success.
+ */
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return -ENODEV;
+
+ if (pci_vfs_assigned(dev)) {
+ pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ if (nr_virtfn == 0) {
+ sriov_disable(dev);
+ return 0;
+ }
+
+ rc = sriov_enable(dev, nr_virtfn);
+ if (rc < 0)
+ return rc;
+
+ return nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a28355c273ae..d088c9147f10 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -244,8 +244,9 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
#if defined(CONFIG_OF_ADDRESS)
/**
- * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
- * @dev: device node of the host bridge having the range property
+ * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
+ * host bridge resources from DT
+ * @dev: host bridge device
* @busno: bus number associated with the bridge root bus
* @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
@@ -253,8 +254,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* address for the start of the I/O range. Can be NULL if the caller doesn't
* expect I/O ranges to be present in the device tree.
*
- * It is the caller's job to free the @resources list.
- *
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping based on its content. It is expected
* that the property conforms with the Power ePAPR document.
@@ -262,11 +261,11 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int of_pci_get_host_bridge_resources(struct device_node *dev,
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
{
- struct resource_entry *window;
+ struct device_node *dev_node = dev->of_node;
struct resource *res;
struct resource *bus_range;
struct of_pci_range range;
@@ -277,19 +276,19 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (io_base)
*io_base = (resource_size_t)OF_BAD_ADDR;
- bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
+ bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
if (!bus_range)
return -ENOMEM;
- pr_info("host bridge %pOF ranges:\n", dev);
+ dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
- err = of_pci_parse_bus_range(dev, bus_range);
+ err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
bus_range->start = busno;
bus_range->end = bus_max;
bus_range->flags = IORESOURCE_BUS;
- pr_info(" No bus range found for %pOF, using %pR\n",
- dev, bus_range);
+ dev_info(dev, " No bus range found for %pOF, using %pR\n",
+ dev_node, bus_range);
} else {
if (bus_range->end > bus_range->start + bus_max)
bus_range->end = bus_range->start + bus_max;
@@ -297,11 +296,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
pci_add_resource(resources, bus_range);
/* Check for ranges property */
- err = of_pci_range_parser_init(&parser, dev);
+ err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto parse_failed;
+ goto failed;
- pr_debug("Parsing ranges property...\n");
+ dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -310,9 +309,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
snprintf(range_type, 4, "MEM");
else
snprintf(range_type, 4, "err");
- pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
- range.cpu_addr, range.cpu_addr + range.size - 1,
- range.pci_addr);
+ dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type, range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
/*
* If we failed translation or got a zero-sized region
@@ -321,28 +320,28 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
- res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
- goto parse_failed;
+ goto failed;
}
- err = of_pci_range_to_resource(&range, dev, res);
+ err = of_pci_range_to_resource(&range, dev_node, res);
if (err) {
- kfree(res);
+ devm_kfree(dev, res);
continue;
}
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
- pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
- dev);
+ dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
+ dev_node);
err = -EINVAL;
- goto conversion_failed;
+ goto failed;
}
if (*io_base != (resource_size_t)OF_BAD_ADDR)
- pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
- dev);
+ dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
+ dev_node);
*io_base = range.cpu_addr;
}
@@ -351,15 +350,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
return 0;
-conversion_failed:
- kfree(res);
-parse_failed:
- resource_list_for_each_entry(window, resources)
- kfree(window->res);
+failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
/**
@@ -599,12 +594,12 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource **bus_range)
{
int err, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
+ &iobase);
if (err)
return err;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1abdbf267c19..65113b6eed14 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -370,26 +370,57 @@ EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
- * @pdev: Hotplug port to check
+ * @bridge: Hotplug port to check
*
- * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
- * and return the value of the "PCI Express Native Hot Plug control" bit.
- * On failure to obtain the _OSC Control Field return %false.
+ * Returns true if the given @bridge is handled by the native PCIe hotplug
+ * driver.
*/
-bool pciehp_is_native(struct pci_dev *pdev)
+bool pciehp_is_native(struct pci_dev *bridge)
{
- struct acpi_pci_root *root;
- acpi_handle handle;
+ const struct pci_host_bridge *host;
+ u32 slot_cap;
- handle = acpi_find_root_bridge_handle(pdev);
- if (!handle)
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- root = acpi_pci_find_root(handle);
- if (!root)
+ pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
+ if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
+ return false;
+
+ if (pcie_ports_native)
+ return true;
+
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_pcie_hotplug;
+}
+
+/**
+ * shpchp_is_native - Check whether a hotplug port is handled by the OS
+ * @bridge: Hotplug port to check
+ *
+ * Returns true if the given @bridge is handled by the native SHPC hotplug
+ * driver.
+ */
+bool shpchp_is_native(struct pci_dev *bridge)
+{
+ const struct pci_host_bridge *host;
+
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+ return false;
+
+ /*
+ * It is assumed that AMD GOLAM chips support SHPC but they do not
+ * have SHPC capability.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+ bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return true;
+
+ if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
return false;
- return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_shpc_hotplug;
}
/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..ffb956457b4a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1535,7 +1535,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
new file mode 100644
index 000000000000..9795649fc6f9
--- /dev/null
+++ b/drivers/pci/pci-pf-stub.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
+ *
+ * This driver is meant to act as a "whitelist" for devices that provde
+ * SR-IOV functionality while at the same time not actually needing a
+ * driver of their own.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/**
+ * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto
+ *
+ * This table provides the list of IDs this driver is supposed to bind
+ * onto. You could think of this as a list of "quirked" devices where we
+ * are adding support for SR-IOV here since there are no other drivers
+ * that they would be running under.
+ */
+static const struct pci_device_id pci_pf_stub_whitelist[] = {
+ { PCI_VDEVICE(AMAZON, 0x0053) },
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist);
+
+static int pci_pf_stub_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ pci_info(dev, "claimed by pci-pf-stub\n");
+ return 0;
+}
+
+static struct pci_driver pf_stub_driver = {
+ .name = "pci-pf-stub",
+ .id_table = pci_pf_stub_whitelist,
+ .probe = pci_pf_stub_probe,
+ .sriov_configure = pci_sriov_configure_simple,
+};
+
+static int __init pci_pf_stub_init(void)
+{
+ return pci_register_driver(&pf_stub_driver);
+}
+
+static void __exit pci_pf_stub_exit(void)
+{
+ pci_unregister_driver(&pf_stub_driver);
+}
+
+module_init(pci_pf_stub_init);
+module_exit(pci_pf_stub_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 366d93af051d..788a200fb2dc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -288,13 +288,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!val) {
- if (pci_is_enabled(pdev))
- pci_disable_device(pdev);
- else
- result = -EIO;
- } else
+ device_lock(dev);
+ if (dev->driver)
+ result = -EBUSY;
+ else if (val)
result = pci_enable_device(pdev);
+ else if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ else
+ result = -EIO;
+ device_unlock(dev);
return result < 0 ? result : count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e597655a5643..b345d20227b8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -112,6 +112,14 @@ unsigned int pcibios_max_latency = 255;
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;
+/* If set, the PCIe ATS capability will not be used. */
+static bool pcie_ats_disabled;
+
+bool pci_ats_disabled(void)
+{
+ return pcie_ats_disabled;
+}
+
/* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable;
/* Force bridge_d3 for all PCIe ports */
@@ -4138,6 +4146,35 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ int timeout = 1000;
+ bool ret;
+ u16 lnk_status;
+
+ for (;;) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ if (ret == active)
+ return true;
+ if (timeout <= 0)
+ break;
+ msleep(10);
+ timeout -= 10;
+ }
+
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+
+ return false;
+}
void pci_reset_secondary_bus(struct pci_dev *dev)
{
@@ -5070,49 +5107,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
EXPORT_SYMBOL(pcie_set_mps);
/**
- * pcie_get_minimum_link - determine minimum link settings of a PCI device
- * @dev: PCI device to query
- * @speed: storage for minimum speed
- * @width: storage for minimum width
- *
- * This function will walk up the PCI device chain and determine the minimum
- * link width and speed of the device.
- */
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- int ret;
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- while (dev) {
- u16 lnksta;
- enum pci_bus_speed next_speed;
- enum pcie_link_width next_width;
-
- ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- if (ret)
- return ret;
-
- next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
-
- if (next_speed < *speed)
- *speed = next_speed;
-
- if (next_width < *width)
- *width = next_width;
-
- dev = dev->bus->self;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(pcie_get_minimum_link);
-
-/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
* @dev: PCI device to query
@@ -5702,15 +5696,14 @@ static void pci_no_domains(void)
#endif
}
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
static atomic_t __domain_nr = ATOMIC_INIT(-1);
-int pci_get_new_domain_nr(void)
+static int pci_get_new_domain_nr(void)
{
return atomic_inc_return(&__domain_nr);
}
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
static int of_pci_bus_find_domain_nr(struct device *parent)
{
static int use_dt_domains = -1;
@@ -5765,7 +5758,6 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
#endif
-#endif
/**
* pci_ext_cfg_avail - can we access extended PCI config space?
@@ -5793,6 +5785,9 @@ static int __init pci_setup(char *str)
if (*str && (str = pcibios_setup(str)) && *str) {
if (!strcmp(str, "nomsi")) {
pci_no_msi();
+ } else if (!strncmp(str, "noats", 5)) {
+ pr_info("PCIe: ATS is disabled\n");
+ pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
} else if (!strncmp(str, "realloc=", 8)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 023f7cf25bff..c358e7a07f3f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -353,6 +353,11 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
void pci_enable_acs(struct pci_dev *dev);
+/* PCI error reporting and recovery */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
+void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
@@ -407,4 +412,44 @@ static inline u64 pci_rebar_size_to_bytes(int size)
return 1ULL << (size + 20);
}
+struct device_node;
+
+#ifdef CONFIG_OF
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
+
+#else
+static inline int
+of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+ return -EINVAL;
+}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF_ADDRESS)
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
+#else
+static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 800e1d404a45..03f4e0b3a140 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 779b3879b1b5..9735c19bf39c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -94,7 +94,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
*/
static void aer_enable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
int aer_pos;
u16 reg16;
u32 reg32;
@@ -136,7 +136,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
*/
static void aer_disable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
u32 reg32;
int pos;
@@ -232,7 +232,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
/* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
- rpc->rpd = dev;
+ rpc->rpd = dev->port;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
@@ -353,10 +353,7 @@ static void aer_error_resume(struct pci_dev *dev)
pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- if (dev->error_state == pci_channel_io_normal)
- status &= ~mask; /* Clear corresponding nonfatal bits */
- else
- status &= mask; /* Clear corresponding fatal bits */
+ status &= ~mask; /* Clear corresponding nonfatal bits */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 08b4584f62fe..6e0ad9a68fd9 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -58,7 +58,7 @@ struct aer_err_source {
};
struct aer_rpc {
- struct pcie_device *rpd; /* Root Port device */
+ struct pci_dev *rpd; /* Root Port device */
struct work_struct dpc_handler;
struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
struct aer_err_info e_info;
@@ -76,36 +76,6 @@ struct aer_rpc {
*/
};
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
-static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
- enum pci_ers_result new)
-{
- if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
- return PCI_ERS_RESULT_NO_AER_DRIVER;
-
- if (new == PCI_ERS_RESULT_NONE)
- return orig;
-
- switch (orig) {
- case PCI_ERS_RESULT_CAN_RECOVER:
- case PCI_ERS_RESULT_RECOVERED:
- orig = new;
- break;
- case PCI_ERS_RESULT_DISCONNECT:
- if (new == PCI_ERS_RESULT_NEED_RESET)
- orig = PCI_ERS_RESULT_NEED_RESET;
- break;
- default:
- break;
- }
-
- return orig;
-}
-
extern struct bus_type pcie_port_bus_type;
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ea5acc40323..42d4f3f32282 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include "aerdrv.h"
+#include "../../pci.h"
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -227,329 +228,14 @@ static bool find_source_device(struct pci_dev *parent,
return true;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
- /*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
- */
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
- vote = PCI_ERS_RESULT_NO_AER_DRIVER;
- else
- vote = PCI_ERS_RESULT_NONE;
- } else {
- err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
- }
-
- result_data->result = merge_result(result_data->result, vote);
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_mmio_enabled(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->mmio_enabled)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_slot_reset(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_resume(struct pci_dev *dev, void *data)
-{
- const struct pci_error_handlers *err_handler;
-
- device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->resume)
- goto out;
-
- err_handler = dev->driver->err_handler;
- err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- */
- if (state == pci_channel_io_normal)
- /*
- * the error is non fatal so the bus is ok, just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- else
- pci_walk_bus(dev->bus, cb, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
- pci_reset_bridge_secondary_bus(dev);
- pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static int find_aer_service_iter(struct device *device, void *data)
-{
- struct pcie_port_service_driver *service_driver, **drv;
-
- drv = (struct pcie_port_service_driver **) data;
-
- if (device->bus == &pcie_port_bus_type && device->driver) {
- service_driver = to_service_driver(device->driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- *drv = service_driver;
- return 1;
- }
- }
-
- return 0;
-}
-
-static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
-{
- struct pcie_port_service_driver *drv = NULL;
-
- device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
-
- return drv;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev)
-{
- struct pci_dev *udev;
- pci_ers_result_t status;
- struct pcie_port_service_driver *driver;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = find_aer_service(udev);
-
- if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
- } else {
- pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-/**
- * do_recovery - handle nonfatal/fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- * @severity: error severity type
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-static void do_recovery(struct pci_dev *dev, int severity)
-{
- pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
- enum pci_channel_state state;
-
- if (severity == AER_FATAL)
- state = pci_channel_io_frozen;
- else
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (severity == AER_FATAL) {
- result = reset_link(dev);
- if (result != PCI_ERS_RESULT_RECOVERED)
- goto failed;
- }
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
- if (status == PCI_ERS_RESULT_NEED_RESET) {
- /*
- * TODO: Should call platform-specific
- * functions to reset slot before calling
- * drivers' slot_reset callbacks?
- */
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED)
- goto failed;
-
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
-
- pci_info(dev, "AER: Device recovery successful\n");
- return;
-
-failed:
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
- pci_info(dev, "AER: Device recovery failed\n");
-}
-
/**
* handle_error_source - handle logging error into an event log
- * @aerdev: pointer to pcie_device data structure of the root port
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pcie_device *aerdev,
- struct pci_dev *dev,
- struct aer_err_info *info)
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
int pos;
@@ -562,12 +248,13 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else
- do_recovery(dev, info->severity);
+ } else if (info->severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(dev);
+ else if (info->severity == AER_FATAL)
+ pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-static void aer_recover_work_func(struct work_struct *work);
#define AER_RECOVER_RING_ORDER 4
#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
@@ -582,6 +269,30 @@ struct aer_recover_entry {
static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
AER_RECOVER_RING_SIZE);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(pdev);
+ else if (entry.severity == AER_FATAL)
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pci_dev_put(pdev);
+ }
+}
+
/*
* Mutual exclusion for writers of aer_recover_ring, reader side don't
* need lock, because there is only one reader and lock is not needed
@@ -611,27 +322,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
-
-static void aer_recover_work_func(struct work_struct *work)
-{
- struct aer_recover_entry entry;
- struct pci_dev *pdev;
-
- while (kfifo_get(&aer_recover_ring, &entry)) {
- pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
- entry.devfn);
- if (!pdev) {
- pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
- continue;
- }
- cper_print_aer(pdev, entry.severity, entry.regs);
- if (entry.severity != AER_CORRECTABLE)
- do_recovery(pdev, entry.severity);
- pci_dev_put(pdev);
- }
-}
#endif
/**
@@ -695,8 +385,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 1;
}
-static inline void aer_process_err_devices(struct pcie_device *p_device,
- struct aer_err_info *e_info)
+static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
@@ -707,19 +396,19 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
- handle_error_source(p_device, e_info->dev[i], e_info);
+ handle_error_source(e_info->dev[i], e_info);
}
}
/**
* aer_isr_one_error - consume an error detected by root port
- * @p_device: pointer to error root port service device
+ * @rpc: pointer to the root port which holds an error
* @e_src: pointer to an error source
*/
-static void aer_isr_one_error(struct pcie_device *p_device,
+static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
- struct aer_rpc *rpc = get_service_data(p_device);
+ struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
/*
@@ -734,11 +423,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
e_info->multi_error_valid = 1;
else
e_info->multi_error_valid = 0;
+ aer_print_port_info(pdev, e_info);
- aer_print_port_info(p_device->port, e_info);
-
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
@@ -754,10 +442,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
else
e_info->multi_error_valid = 0;
- aer_print_port_info(p_device->port, e_info);
+ aer_print_port_info(pdev, e_info);
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
}
@@ -799,11 +487,10 @@ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
- struct pcie_device *p_device = rpc->rpd;
struct aer_err_source uninitialized_var(e_src);
mutex_lock(&rpc->rpc_mutex);
while (get_e_source(rpc, &e_src))
- aer_isr_one_error(p_device, &e_src);
+ aer_isr_one_error(rpc, &e_src);
mutex_unlock(&rpc->rpc_mutex);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index cfc89dd57831..4985bdf64c2e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -163,17 +163,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
int id = ((dev->bus->number << 8) | dev->devfn);
if (!info->status) {
- pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
- aer_error_severity_string[info->severity], id);
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity]);
goto out;
}
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
- aer_error_layer[layer], id, aer_agent_string[agent]);
+ aer_error_layer[layer], aer_agent_string[agent]);
pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device,
@@ -186,17 +186,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
- pci_err(dev, " Error of this Agent(%04x) is reported first\n", id);
+ pci_err(dev, " Error of this Agent is reported first\n");
trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity);
+ info->severity, info->tlp_header_valid, &info->tlp);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
{
- pci_info(dev, "AER: %s%s error received: id=%04x\n",
+ u8 bus = info->id >> 8;
+ u8 devfn = info->id & 0xff;
+
+ pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity], info->id);
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -216,28 +220,30 @@ EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- const char **status_strs;
+ struct aer_err_info info;
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
- status_strs = aer_correctable_error_string;
- status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
- status_strs = aer_uncorrectable_error_string;
- status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
+ memset(&info, 0, sizeof(info));
+ info.severity = aer_severity;
+ info.status = status;
+ info.mask = mask;
+ info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
+
pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
- cper_print_bits("", status, status_strs, status_strs_size);
+ __aer_print_error(dev, &info);
pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
@@ -249,6 +255,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
__print_tlp_header(dev, &aer->header_log);
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity);
+ aer_severity, tlp_header_valid, &aer->header_log);
}
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f76eb7704f64..c687c817b47d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -400,6 +400,15 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
info->l1ss_cap = 0;
return;
}
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!pdev->ltr_path)
+ info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
&info->l1ss_ctl1);
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 8c57d607e603..d6436681c535 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -68,44 +68,35 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static void dpc_wait_link_inactive(struct dpc_dev *dpc)
{
- unsigned long timeout = jiffies + HZ;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
- u16 lnk_status;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
- !time_after(jiffies, timeout)) {
- msleep(10);
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- }
- if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
- dev_warn(dev, "Link state not disabled for DPC event\n");
+ pcie_wait_for_link(pdev, false);
}
-static void dpc_work(struct work_struct *work)
+static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
- struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
- struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
- struct pci_bus *parent = pdev->subordinate;
- u16 cap = dpc->cap_pos, ctl;
-
- pci_lock_rescan_remove();
- list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(dev);
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
- pci_unlock_rescan_remove();
-
+ struct dpc_dev *dpc;
+ struct pcie_device *pciedev;
+ struct device *devdpc;
+ u16 cap, ctl;
+
+ /*
+ * DPC disables the Link automatically in hardware, so it has
+ * already been reset by the time we get here.
+ */
+ devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
+ pciedev = to_pcie_device(devdpc);
+ dpc = get_service_data(pciedev);
+ cap = dpc->cap_pos;
+
+ /*
+ * Wait until the Link is inactive, then clear DPC Trigger Status
+ * to allow the Port to leave DPC.
+ */
dpc_wait_link_inactive(dpc);
+
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
- return;
+ return PCI_ERS_RESULT_DISCONNECT;
if (dpc->rp_extensions && dpc->rp_pio_status) {
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
dpc->rp_pio_status);
@@ -113,11 +104,22 @@ static void dpc_work(struct work_struct *work)
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+ PCI_EXP_DPC_STATUS_TRIGGER);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
ctl | PCI_EXP_DPC_CTL_INT_EN);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void dpc_work(struct work_struct *work)
+{
+ struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+ struct pci_dev *pdev = dpc->dev->port;
+
+ /* We configure DPC so it only triggers on ERR_FATAL */
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
@@ -223,6 +225,9 @@ static irqreturn_t dpc_irq(int irq, void *context)
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
+ pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
schedule_work(&dpc->work);
return IRQ_HANDLED;
@@ -270,7 +275,7 @@ static int dpc_probe(struct pcie_device *dev)
}
}
- ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+ ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -288,7 +293,7 @@ static void dpc_remove(struct pcie_device *dev)
u16 ctl;
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
}
@@ -298,6 +303,7 @@ static struct pcie_port_service_driver dpcdriver = {
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
+ .reset_link = dpc_reset_link,
};
static int __init dpc_service_init(void)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
new file mode 100644
index 000000000000..f7ce0cb0b0b7
--- /dev/null
+++ b/drivers/pci/pcie/err.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file implements the error recovery as a core part of PCIe error
+ * reporting. When a PCIe error is delivered, an error message will be
+ * collected and printed to console, then, an error recovery procedure
+ * will be executed by following the PCI error recovery rules.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+#include "portdrv.h"
+#include "../pci.h"
+
+struct aer_broadcast_data {
+ enum pci_channel_state state;
+ enum pci_ers_result result;
+};
+
+static pci_ers_result_t merge_result(enum pci_ers_result orig,
+ enum pci_ers_result new)
+{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
+ if (new == PCI_ERS_RESULT_NONE)
+ return orig;
+
+ switch (orig) {
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_RECOVERED:
+ orig = new;
+ break;
+ case PCI_ERS_RESULT_DISCONNECT:
+ if (new == PCI_ERS_RESULT_NEED_RESET)
+ orig = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ default:
+ break;
+ }
+
+ return orig;
+}
+
+static int report_error_detected(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ dev->error_state = result_data->state;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->error_detected) {
+ if (result_data->state == pci_channel_io_frozen &&
+ dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * In case of fatal recovery, if one of down-
+ * stream device has no driver. We might be
+ * unable to recover because a later insmod
+ * of a driver for this device is unaware of
+ * its hw state.
+ */
+ pci_printk(KERN_DEBUG, dev, "device has %s\n",
+ dev->driver ?
+ "no AER-aware driver" : "no driver");
+ }
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ }
+
+ result_data->result = merge_result(result_data->result, vote);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->mmio_enabled)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->mmio_enabled(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_slot_reset(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->slot_reset)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->slot_reset(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_resume(struct pci_dev *dev, void *data)
+{
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ dev->error_state = pci_channel_io_normal;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->resume)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ err_handler->resume(dev);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+/**
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
+ *
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
+ */
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
+{
+ pci_reset_bridge_secondary_bus(dev);
+ pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ pci_ers_result_t status;
+ struct pcie_port_service_driver *driver = NULL;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
+ udev = dev;
+ } else {
+ /* Reset the upstream component (likely downstream port) */
+ udev = dev->bus->self;
+ }
+
+ /* Use the aer driver of the component firstly */
+ driver = pcie_port_find_service(udev, service);
+
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->has_secondary_link) {
+ status = default_reset_link(udev);
+ } else {
+ pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/**
+ * broadcast_error_message - handle message broadcast to downstream drivers
+ * @dev: pointer to from where in a hierarchy message is broadcasted down
+ * @state: error state
+ * @error_mesg: message to print
+ * @cb: callback to be broadcasted
+ *
+ * Invoked during error recovery process. Once being invoked, the content
+ * of error severity will be broadcasted to all downstream drivers in a
+ * hierarchy in question.
+ */
+static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ enum pci_channel_state state,
+ char *error_mesg,
+ int (*cb)(struct pci_dev *, void *))
+{
+ struct aer_broadcast_data result_data;
+
+ pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
+ result_data.state = state;
+ if (cb == report_error_detected)
+ result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
+ else
+ result_data.result = PCI_ERS_RESULT_RECOVERED;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ if (cb == report_error_detected)
+ dev->error_state = state;
+ pci_walk_bus(dev->subordinate, cb, &result_data);
+ if (cb == report_resume) {
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ dev->error_state = pci_channel_io_normal;
+ }
+ } else {
+ /*
+ * If the error is reported by an end point, we think this
+ * error is related to the upstream link of the end point.
+ */
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
+ }
+
+ return result_data.result;
+}
+
+/**
+ * pcie_do_fatal_recovery - handle fatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is fatal. Once being invoked, removes the devices
+ * beneath this AER agent, followed by reset link e.g. secondary bus reset
+ * followed by re-enumeration of devices.
+ */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ struct pci_bus *parent;
+ struct pci_dev *pdev, *temp;
+ pci_ers_result_t result;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ udev = dev;
+ else
+ udev = dev->bus->self;
+
+ parent = udev->subordinate;
+ pci_lock_rescan_remove();
+ list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(pdev);
+ pci_dev_set_disconnected(pdev, NULL);
+ if (pci_has_subordinate(pdev))
+ pci_walk_bus(pdev->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ }
+
+ result = reset_link(udev, service);
+
+ if ((service == PCIE_PORT_SERVICE_AER) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ }
+
+ if (result == PCI_ERS_RESULT_RECOVERED) {
+ if (pcie_wait_for_link(udev, true))
+ pci_rescan_bus(udev->bus);
+ pci_info(dev, "Device recovery from fatal error successful\n");
+ } else {
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ pci_info(dev, "Device recovery from fatal error failed\n");
+ }
+
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
+ * error detected message to all downstream drivers within a hierarchy in
+ * question and return the returned code.
+ */
+void pcie_do_nonfatal_recovery(struct pci_dev *dev)
+{
+ pci_ers_result_t status;
+ enum pci_channel_state state;
+
+ state = pci_channel_io_normal;
+
+ status = broadcast_error_message(dev,
+ state,
+ "error_detected",
+ report_error_detected);
+
+ if (status == PCI_ERS_RESULT_CAN_RECOVER)
+ status = broadcast_error_message(dev,
+ state,
+ "mmio_enabled",
+ report_mmio_enabled);
+
+ if (status == PCI_ERS_RESULT_NEED_RESET) {
+ /*
+ * TODO: Should call platform-specific
+ * functions to reset slot before calling
+ * drivers' slot_reset callbacks?
+ */
+ status = broadcast_error_message(dev,
+ state,
+ "slot_reset",
+ report_slot_reset);
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
+ state,
+ "resume",
+ report_resume);
+
+ pci_info(dev, "AER: Device recovery successful\n");
+ return;
+
+failed:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+
+ /* TODO: Should kernel panic here? */
+ pci_info(dev, "AER: Device recovery failed\n");
+}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d0c6783dbfe3..2bb5db7b53e6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,8 +11,6 @@
#include <linux/compiler.h>
-extern bool pcie_ports_native;
-
/* Service Type */
#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
@@ -112,4 +110,7 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service);
+struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
deleted file mode 100644
index 8ab5d434b9c6..000000000000
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PCIe Port Native Services Support, ACPI-Related Part
- *
- * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-
-#include "aer/aerdrv.h"
-#include "../pci.h"
-#include "portdrv.h"
-
-/**
- * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
- * @port: PCIe Port service for a root port or event collector.
- * @srv_mask: Bit mask of services that can be enabled for @port.
- *
- * Invoked when @port is identified as a PCIe port device. To avoid conflicts
- * with the BIOS PCIe port native services support requires the BIOS to yield
- * control of these services to the kernel. The mask of services that the BIOS
- * allows to be enabled for @port is written to @srv_mask.
- *
- * NOTE: It turns out that we cannot do that for individual port services
- * separately, because that would make some systems work incorrectly.
- */
-void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
-{
- struct acpi_pci_root *root;
- acpi_handle handle;
- u32 flags;
-
- if (acpi_pci_disabled)
- return;
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return;
-
- root = acpi_pci_find_root(handle);
- if (!root)
- return;
-
- flags = root->osc_control_set;
-
- *srv_mask = 0;
- if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_HP;
- if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_PME;
- if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
-}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index c9c0663db282..e0261ad4bcdd 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,6 +19,12 @@
#include "../pci.h"
#include "portdrv.h"
+struct portdrv_service_data {
+ struct pcie_port_service_driver *drv;
+ struct device *dev;
+ u32 service;
+};
+
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -199,7 +205,7 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0;
if (dev->is_hotplug_bridge &&
- (pcie_ports_native || host->native_hotplug)) {
+ (pcie_ports_native || host->native_pcie_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
/*
@@ -398,6 +404,69 @@ static int remove_iter(struct device *dev, void *data)
return 0;
}
+static int find_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver;
+ struct portdrv_service_data *pdrvs;
+ u32 service;
+
+ pdrvs = (struct portdrv_service_data *) data;
+ service = pdrvs->service;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == service) {
+ pdrvs->drv = service_driver;
+ pdrvs->dev = device;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcie_port_find_service - find the service driver
+ * @dev: PCI Express port the service is associated with
+ * @service: Service to find
+ *
+ * Find PCI Express port service driver associated with given service
+ */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service)
+{
+ struct pcie_port_service_driver *drv;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.drv = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ drv = pdrvs.drv;
+ return drv;
+}
+
+/**
+ * pcie_port_find_device - find the struct device
+ * @dev: PCI Express port the service is associated with
+ * @service: For the service to find
+ *
+ * Find the struct device associated with given service on a pci_dev
+ */
+struct device *pcie_port_find_device(struct pci_dev *dev,
+ u32 service)
+{
+ struct device *device;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.dev = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ device = pdrvs.dev;
+ return device;
+}
+
/**
* pcie_port_device_remove - unregister PCI Express port service devices
* @dev: PCI Express port the service devices to unregister are associated with
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac91b6fd0bcd..ac876e32de4b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -526,12 +526,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
if (bridge->release_fn)
bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
}
static void pci_release_host_bridge_dev(struct device *dev)
{
devm_pci_release_host_bridge_dev(dev);
- pci_free_host_bridge(to_pci_host_bridge(dev));
+ kfree(to_pci_host_bridge(dev));
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -552,8 +554,10 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
* OS from interfering.
*/
bridge->native_aer = 1;
- bridge->native_hotplug = 1;
+ bridge->native_pcie_hotplug = 1;
+ bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
+ bridge->native_ltr = 1;
return bridge;
}
@@ -882,6 +886,45 @@ free:
return err;
}
+static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
+{
+ int pos;
+ u32 status;
+
+ /*
+ * If extended config space isn't accessible on a bridge's primary
+ * bus, we certainly can't access it on the secondary bus.
+ */
+ if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return false;
+
+ /*
+ * PCIe Root Ports and switch ports are PCIe on both sides, so if
+ * extended config space is accessible on the primary, it's also
+ * accessible on the secondary.
+ */
+ if (pci_is_pcie(bridge) &&
+ (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
+ return true;
+
+ /*
+ * For the other bridge types:
+ * - PCI-to-PCI bridges
+ * - PCIe-to-PCI/PCI-X forward bridges
+ * - PCI/PCI-X-to-PCIe reverse bridges
+ * extended config space on the secondary side is only accessible
+ * if the bridge supports PCI-X Mode 2.
+ */
+ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+ if (!pos)
+ return false;
+
+ pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
+ return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -923,6 +966,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
pci_set_bus_of_node(child);
pci_set_bus_speed(child);
+ /*
+ * Check whether extended config space is accessible on the child
+ * bus. Note that we currently assume it is always accessible on
+ * the root bus.
+ */
+ if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
+ child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
+ pci_info(child, "extended config space not accessible\n");
+ }
+
/* Set up default resource pointers and names */
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -998,6 +1051,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
int max, unsigned int available_buses,
@@ -1188,20 +1243,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
pci_domain_nr(bus), child->number);
- /* Has only triggered on CardBus, fixup is in yenta_socket */
+ /* Check that all devices are accessible */
while (bus->parent) {
if ((child->busn_res.end > bus->busn_res.end) ||
(child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
(child->busn_res.end < bus->number)) {
- dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
- &child->busn_res,
- (bus->number > child->busn_res.end &&
- bus->busn_res.end < child->number) ?
- "wholly" : "partially",
- bus->self->transparent ? " transparent" : "",
- dev_name(&bus->dev),
- &bus->busn_res);
+ dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
+ &child->busn_res);
+ break;
}
bus = bus->parent;
}
@@ -1230,6 +1280,8 @@ out:
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
{
@@ -1393,6 +1445,9 @@ int pci_cfg_space_size(struct pci_dev *dev)
u32 status;
u16 class;
+ if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return PCI_CFG_SPACE_SIZE;
+
class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_HOST)
return pci_cfg_space_size_ext(dev);
@@ -1954,9 +2009,13 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_ltr(struct pci_dev *dev)
{
#ifdef CONFIG_PCIEASPM
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 cap;
struct pci_dev *bridge;
+ if (!host->native_ltr)
+ return;
+
if (!pci_is_pcie(dev))
return;
@@ -2638,7 +2697,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
for_each_pci_bridge(dev, bus) {
cmax = max;
max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
- used_buses += cmax - max;
+
+ /*
+ * Reserve one bus for each bridge now to avoid extending
+ * hotplug bridges too much during the second scan below.
+ */
+ used_buses++;
+ if (cmax - max > 1)
+ used_buses += cmax - max - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2661,12 +2727,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* bridges if any.
*/
buses = available_buses / hotplug_bridges;
- buses = min(buses, available_buses - used_buses);
+ buses = min(buses, available_buses - used_buses + 1);
}
cmax = max;
max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
- used_buses += max - cmax;
+ /* One bus is already accounted so don't add it again */
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2990ad1e7c99..341d3a138217 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4230,11 +4230,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
* 0xa290-0xa29f PCI Express Root port #{0-16}
* 0xa2e7-0xa2ee PCI Express Root port #{17-24}
*
+ * Mobile chipsets are also affected, 7th & 8th Generation
+ * Specification update confirms ACS errata 22, status no fix: (7th Generation
+ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
+ * Processor Family I/O for U Quad Core Platforms Specification Update,
+ * August 2017, Revision 002, Document#: 334660-002)[6]
+ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
+ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
+ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
+ *
+ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
+ *
+ * The 300 series chipset suffers from the same bug so include those root
+ * ports here as well.
+ *
+ * 0xa32c-0xa343 PCI Express Root port #{0-24}
+ *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
* [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
* [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
* [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
+ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
@@ -4244,6 +4262,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
switch (dev->device) {
case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
+ case 0xa32c ... 0xa343: /* 300 series */
return true;
}
@@ -4361,8 +4381,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* QCOM QDF2xxx root ports */
- { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
- { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 072784f55ea5..79b1824e83b4 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1943,56 +1943,56 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
/*
+ * There is only one bridge on the bus so it gets all available
+ * resources which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ if (hotplug_bridges + normal_bridges == 1) {
+ dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+ if (dev->subordinate) {
+ pci_bus_distribute_available_resources(dev->subordinate,
+ add_list, available_io, available_mmio,
+ available_mmio_pref);
+ }
+ return;
+ }
+
+ /*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
+ resource_size_t align, io, mmio, mmio_pref;
struct pci_bus *b;
b = dev->subordinate;
- if (!b)
+ if (!b || !dev->is_hotplug_bridge)
continue;
- if (!hotplug_bridges && normal_bridges == 1) {
- /*
- * There is only one bridge on the bus (upstream
- * port) so it gets all available resources
- * which it can then distribute to the possible
- * hotplug bridges below.
- */
- pci_bus_distribute_available_resources(b, add_list,
- available_io, available_mmio,
- available_mmio_pref);
- } else if (dev->is_hotplug_bridge) {
- resource_size_t align, io, mmio, mmio_pref;
-
- /*
- * Distribute available extra resources equally
- * between hotplug-capable downstream ports
- * taking alignment into account.
- *
- * Here hotplug_bridges is always != 0.
- */
- align = pci_resource_alignment(bridge, io_res);
- io = div64_ul(available_io, hotplug_bridges);
- io = min(ALIGN(io, align), remaining_io);
- remaining_io -= io;
-
- align = pci_resource_alignment(bridge, mmio_res);
- mmio = div64_ul(available_mmio, hotplug_bridges);
- mmio = min(ALIGN(mmio, align), remaining_mmio);
- remaining_mmio -= mmio;
-
- align = pci_resource_alignment(bridge, mmio_pref_res);
- mmio_pref = div64_ul(available_mmio_pref,
- hotplug_bridges);
- mmio_pref = min(ALIGN(mmio_pref, align),
- remaining_mmio_pref);
- remaining_mmio_pref -= mmio_pref;
-
- pci_bus_distribute_available_resources(b, add_list, io,
- mmio, mmio_pref);
- }
+ /*
+ * Distribute available extra resources equally between
+ * hotplug-capable downstream ports taking alignment into
+ * account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io, mmio,
+ mmio_pref);
}
}