summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c2
-rw-r--r--drivers/ata/pata_scc.c15
-rw-r--r--drivers/bus/arm-ccn.c2
-rw-r--r--drivers/firmware/efi/vars.c8
-rw-r--r--drivers/gpio/devres.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c18
-rw-r--r--drivers/gpio/gpio-zynq.c36
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c43
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c19
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/hid/hid-cherry.c2
-rw-r--r--drivers/hid/hid-huion.c128
-rw-r--r--drivers/hid/hid-kye.c2
-rw-r--r--drivers/hid/hid-lg.c4
-rw-r--r--drivers/hid/hid-lg4ff.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c15
-rw-r--r--drivers/hid/hid-monterey.c2
-rw-r--r--drivers/hid/hid-petalynx.c2
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hid-sensor-hub.c33
-rw-r--r--drivers/hid/hid-sunplus.c2
-rw-r--r--drivers/i2c/Kconfig15
-rw-r--r--drivers/i2c/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/i2c-acpi.c2
-rw-r--r--drivers/iommu/amd_iommu.c10
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/isdn/hardware/eicon/xdi_msg.h2
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/flexcan.c9
-rw-r--r--drivers/net/can/sja1000/sja1000.c62
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c92
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c33
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c35
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c57
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c16
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/phy/bcm7xxx.c42
-rw-r--r--drivers/net/phy/smsc.c33
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c17
-rw-r--r--drivers/of/selftest.c40
-rw-r--r--drivers/pci/host/Kconfig11
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-dra7xx.c458
-rw-r--r--drivers/pci/host/pci-tegra.c118
-rw-r--r--drivers/pci/host/pcie-designware.c134
-rw-r--r--drivers/pci/host/pcie-designware.h11
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/pwm/core.c8
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/sh/Makefile3
-rw-r--r--drivers/sh/intc/Kconfig6
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-davinci.c16
-rw-r--r--drivers/spi/spi-dw-pci.c2
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-rspi.c94
-rw-r--r--drivers/spi/spi.c1
122 files changed, 1714 insertions, 631 deletions
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index fc3df47fca35..f1fef74e503c 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -24,8 +24,8 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/tegra-powergate.h>
#include <linux/regulator/consumer.h>
+#include <soc/tegra/pmc.h>
#include "ahci.h"
#define SATA_CONFIGURATION_0 0x180
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index bc281115490b..c6962300b93c 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -344,7 +344,7 @@ static struct ata_port_operations xgene_ahci_ops = {
};
static const struct ata_port_info xgene_ahci_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &xgene_ahci_ops,
@@ -480,7 +480,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
/* Configure the host controller */
xgene_ahci_hw_init(hpriv);
- hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+ hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
if (rc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index dbdc5d32343f..f3e7b9f894cd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4228,7 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 2578fc16960a..1a24a5dc3940 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -360,7 +360,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link,
/*
* pata_s3c_bus_softreset - PATA device software reset
*/
-static unsigned int pata_s3c_bus_softreset(struct ata_port *ap,
+static int pata_s3c_bus_softreset(struct ata_port *ap,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 4e006d74bef8..7f4cb76ed9fa 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
* Note: Original code is ata_bus_softreset().
*/
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20);
out_be32(ioaddr->ctl_addr, ap->ctl);
- scc_wait_after_reset(&ap->link, devmask, deadline);
-
- return 0;
+ return scc_wait_after_reset(&ap->link, devmask, deadline);
}
/**
@@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
{
struct ata_port *ap = link->ap;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0, err_mask;
+ unsigned int devmask = 0;
+ int rc;
u8 err;
DPRINTK("ENTER\n");
@@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
/* issue bus reset */
DPRINTK("about to softreset, devmask=%x\n", devmask);
- err_mask = scc_bus_softreset(ap, devmask, deadline);
- if (err_mask) {
- ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
+ rc = scc_bus_softreset(ap, devmask, deadline);
+ if (rc) {
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
return -EIO;
}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 3266f8ff9311..6f550d9e7a2d 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -662,7 +662,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
}
if (e->num_vcs && vc >= e->num_vcs) {
dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
- port, node_xp);
+ vc, node_xp);
return -EINVAL;
}
valid = 1;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index f0a43646a2f3..5abe943e3404 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
*/
static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
{
- WARN_ON(!spin_is_locked(&__efivars->lock));
+ lockdep_assert_held(&__efivars->lock);
list_del(&entry->list);
spin_unlock_irq(&__efivars->lock);
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- WARN_ON(!spin_is_locked(&__efivars->lock));
+ lockdep_assert_held(&__efivars->lock);
status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
int strsize1, strsize2;
bool found = false;
- WARN_ON(!spin_is_locked(&__efivars->lock));
+ lockdep_assert_held(&__efivars->lock);
list_for_each_entry_safe(entry, n, head, list) {
strsize1 = ucs2_strsize(name, 1024);
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- WARN_ON(!spin_is_locked(&__efivars->lock));
+ lockdep_assert_held(&__efivars->lock);
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 41b2f40578d5..954b9f6b0ef8 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -90,7 +90,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
struct gpio_desc **dr;
struct gpio_desc *desc;
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *),
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index ff9eb911b5e4..fa945ec9ccff 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -407,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev)
return 0;
}
+static int lp_gpio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lp_gpio *lg = platform_get_drvdata(pdev);
+ unsigned long reg;
+ int i;
+
+ /* on some hardware suspend clears input sensing, re-enable it here */
+ for (i = 0; i < lg->chip.ngpio; i++) {
+ if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+ reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+ outl(inl(reg) & ~GPINDIS_BIT, reg);
+ }
+ }
+ return 0;
+}
+
static const struct dev_pm_ops lp_gpio_pm_ops = {
.runtime_suspend = lp_gpio_runtime_suspend,
.runtime_resume = lp_gpio_runtime_resume,
+ .resume = lp_gpio_resume,
};
static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index c3145f91fda3..31ad5df5dbc9 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -95,6 +95,9 @@ struct zynq_gpio {
struct clk *clk;
};
+static struct irq_chip zynq_gpio_level_irqchip;
+static struct irq_chip zynq_gpio_edge_irqchip;
+
/**
* zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
* for a given pin in the GPIO device
@@ -410,6 +413,15 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
writel_relaxed(int_any,
gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ __irq_set_chip_handler_name_locked(irq_data->irq,
+ &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
+ } else {
+ __irq_set_chip_handler_name_locked(irq_data->irq,
+ &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
+ }
+
return 0;
}
@@ -424,9 +436,21 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
}
/* irq chip descriptor */
-static struct irq_chip zynq_gpio_irqchip = {
+static struct irq_chip zynq_gpio_level_irqchip = {
.name = DRIVER_NAME,
.irq_enable = zynq_gpio_irq_enable,
+ .irq_eoi = zynq_gpio_irq_ack,
+ .irq_mask = zynq_gpio_irq_mask,
+ .irq_unmask = zynq_gpio_irq_unmask,
+ .irq_set_type = zynq_gpio_set_irq_type,
+ .irq_set_wake = zynq_gpio_set_wake,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+};
+
+static struct irq_chip zynq_gpio_edge_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_enable = zynq_gpio_irq_enable,
+ .irq_ack = zynq_gpio_irq_ack,
.irq_mask = zynq_gpio_irq_mask,
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
@@ -469,10 +493,6 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
offset);
generic_handle_irq(gpio_irq);
}
-
- /* clear IRQ in HW */
- writel_relaxed(int_sts, gpio->base_addr +
- ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
}
}
@@ -610,14 +630,14 @@ static int zynq_gpio_probe(struct platform_device *pdev)
writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
- ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
+ handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev, "Failed to add irq chip\n");
goto err_rm_gpiochip;
}
- gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq,
+ gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq,
zynq_gpio_irqhandler);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 7cfdc2278905..604dbe60bdee 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip)
void of_gpiochip_remove(struct gpio_chip *chip)
{
gpiochip_remove_pin_ranges(chip);
-
- if (chip->of_node)
- of_node_put(chip->of_node);
+ of_node_put(chip->of_node);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec96f9a9724c..e27cdbe9d524 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true;
}
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ dev_priv->long_hpd_port_mask = 0;
+ dev_priv->short_hpd_port_mask = 0;
+ dev_priv->hpd_event_bits = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ cancel_work_sync(&dev_priv->dig_port_work);
+ cancel_work_sync(&dev_priv->hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_encoder *encoder;
+
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (intel_encoder->suspend)
+ intel_encoder->suspend(intel_encoder);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev);
+ intel_hpd_cancel_work(dev_priv);
+
+ intel_suspend_encoders(dev_priv);
intel_suspend_gt_powersave(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4412f6a4383b..7a830eac5ba3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1458,7 +1458,7 @@ struct drm_i915_private {
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
- struct timer_list hotplug_reenable_timer;
+ struct delayed_work hotplug_reenable_work;
struct i915_fbc fbc;
struct i915_drrs drrs;
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 390ccc2a3096..0050ee9470f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
* some connectors */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_timer(&dev_priv->hotplug_reenable_timer,
- jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
-{
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
-}
-
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
gen8_irq_reset(dev);
}
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
- intel_hpd_irq_uninstall(dev_priv);
-
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
ironlake_irq_reset(dev);
}
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- intel_hpd_irq_uninstall(dev_priv);
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable(unsigned long data)
+static void intel_hpd_irq_reenable(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
int i;
+ intel_runtime_pm_get(dev_priv);
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
- setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
- (unsigned long) dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
+ intel_hpd_irq_reenable);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2efaf8e8d9c4..e8abfce40976 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
goto out;
}
+ drm_modeset_acquire_init(&ctx, 0);
+
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
out:
intel_display_power_put(dev_priv, power_domain);
return status;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 018fb7222f60..d074d704f458 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, connector->name,
encoder->base.id, encoder->name);
- drm_modeset_acquire_init(ctx, 0);
-
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
@@ -8502,10 +8500,14 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (!possible_crtc->enabled) {
- crtc = possible_crtc;
- break;
- }
+ if (possible_crtc->enabled)
+ continue;
+ /* This can occur when applying the pipe A quirk on resume. */
+ if (to_intel_crtc(possible_crtc)->new_enabled)
+ continue;
+
+ crtc = possible_crtc;
+ break;
}
/*
@@ -8574,15 +8576,11 @@ fail_unlock:
goto retry;
}
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-
return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
+ struct intel_load_detect_pipe *old)
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
drm_framebuffer_unreference(old->release_fb);
}
- goto unlock;
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
-
-unlock:
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
const struct drm_rect clip = {
/* integer pixels */
- .x2 = intel_crtc->config.pipe_src_w,
- .y2 = intel_crtc->config.pipe_src_h,
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
if (!crt)
return;
- if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
-
-
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
}
static bool
@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
- cancel_work_sync(&dev_priv->hotplug_work);
+ intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true;
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee3942f0b068..67cfed6d911a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (WARN_ON(!intel_encoder->base.crtc))
return;
+ if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+ return;
+
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
+static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (!is_edp(intel_dp))
+ return;
+
+ edp_panel_vdd_off_sync(intel_dp);
+}
+
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,15 +4050,21 @@ bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ enum intel_display_power_domain power_domain;
+ bool ret = true;
+
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
long_hpd ? "long" : "short");
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
if (long_hpd) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
} else {
if (intel_dp->is_mst) {
- ret = intel_dp_check_mst_status(intel_dp);
- if (ret == -EINVAL)
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
goto mst_fail;
}
@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
- return false;
+ ret = false;
+ goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) {
@@ -4084,7 +4103,10 @@ mst_fail:
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
- return true;
+put_power:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4b2664bd5b81..b8c8bbd8e5f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,12 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
+ /*
+ * Called during system suspend after all pending requests for the
+ * encoder are flushed (for example for DP AUX transactions) and
+ * device interrupts are disabled.
+ */
+ void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
};
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
+ struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *pipelined);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e211eef4b7e4..32186a656816 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
+ drm_modeset_acquire_init(&ctx, 0);
+
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp, &ctx);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
} else
return connector->status;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0013ad0db9ef..f77b7135ee4c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
- si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
+ si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 022561e28707..d416bb2ff48d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
WREG32_SMC(CG_THERMAL_CTRL, tmp);
#endif
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b625646bf3e2..79a5a5519bd6 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j, k;
+ int i, j;
switch (rdev->family) {
case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x130B) ||
(rdev->pdev->device == 0x130E) ||
(rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x1318) ||
(rdev->pdev->device == 0x131B)) {
rdev->config.cik.max_cu_per_sh = 4;
rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.max_sh_per_se,
rdev->config.cik.max_backends_per_se);
+ rdev->config.cik.active_cus = 0;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
- for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) {
- rdev->config.cik.active_cus +=
- hweight32(cik_get_cu_active_bitmap(rdev, i, j));
- }
+ rdev->config.cik.active_cus +=
+ hweight32(cik_get_cu_active_bitmap(rdev, i, j));
}
}
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
+/**
+ * cik_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+ if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+ /* Prevent the PFP from running ahead of the semaphore wait */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+ }
+
return true;
}
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return 0;
}
@@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, VMID(vm->id));
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
radeon_ring_write(ring, 0);
@@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index bcf480510ac2..192278bc993c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4fedd14e670a..dbca60c7d097 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 478caefe0fef..afaba388c36d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9ef8c38f2d66..8b58e11b64fa 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_uvd_dpm(rdev, !gate);
}
-static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
{
u8 i;
struct radeon_vce_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= 0) /* XXX */
+ if (table->entries[i].evclk >= evclk)
break;
}
@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
- pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+ pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = true;
+ if (radeon_bapm == 0)
+ pi->bapm_enable = false;
+ else
+ pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 327b85f7fd0d..ba89375f197f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cayman_cp_enable(rdev, true);
@@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
/* XXX init other rings */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 04b5940b8923..4c5ec44ff328 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return r;
}
@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 58f0473aa73f..67780374a652 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 75b30338c226..1bc4704034ce 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
static void r300_errata(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 802b19220a21..2828605aef3f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c70a504d96af..e8bf0ea2dade 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
+/**
+ * r600_semaphore_ring_emit - emit a semaphore on the CP ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring buffer object
+ * @semaphore: radeon semaphore object
+ * @emit_wait: Is this a sempahore wait?
+ *
+ * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
+ * from running ahead of semaphore waits.
+ */
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+ /* PFP_SYNC_ME packet only exists on 7xx+ */
+ if (emit_wait && (rdev->family >= CHIP_RV770)) {
+ /* Prevent the PFP from running ahead of the semaphore wait */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+ }
+
return true;
}
@@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
@@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 4969cef44a19..51fd98553eaf 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f94e7a9afe75..0c4a7d8d93e0 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1597,6 +1597,7 @@
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9e1732eb402c..b281886f6f51 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -105,6 +105,7 @@ extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
extern int radeon_use_pflipirq;
+extern int radeon_bapm;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -967,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib);
+ struct radeon_ib *const_ib, bool hdp_flush);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool hdp_flush);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool hdp_flush);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..83f382e8e40e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* the buffers used for read only, which doubles the range
* to 0 to 31. 32 is reserved for the kernel driver.
*/
- priority = (r->flags & 0xf) * 2 + !!r->write_domain;
+ priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ + !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards to avoid
@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
radeon_vce_note_usage(rdev);
radeon_cs_sync_rings(parser);
- r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
- r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
}
out:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c8ea050c8fa4..6a219bcee66d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
- radeon_pm_suspend(rdev);
radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1726,9 +1726,39 @@ retry:
}
}
- radeon_pm_resume(rdev);
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ r = radeon_pm_late_init(rdev);
+ if (r) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ }
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
+ }
+
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
+ radeon_atom_encoder_init(rdev);
+ radeon_atom_disp_eng_pll_init(rdev);
+ /* turn on the BL */
+ if (rdev->mode_info.bl_encoder) {
+ u8 bl_level = radeon_get_backlight_level(rdev,
+ rdev->mode_info.bl_encoder);
+ radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+ bl_level);
+ }
+ }
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
+
drm_helper_resume_force_mode(rdev->ddev);
+ /* set the power state here in case we are a PX system or headless */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+ radeon_pm_compute_clocks(rdev);
+
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 092d067f93e1..8df888908833 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -180,6 +180,7 @@ int radeon_vm_size = 8;
int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
+int radeon_bapm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, radeon_bapm, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 65b0c213488d..5bf2c0a05827 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* @rdev: radeon_device pointer
* @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only)
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* to SI there was just a DE IB.
*/
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
+ struct radeon_ib *const_ib, bool hdp_flush)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm)
radeon_vm_fence(rdev, ib->vm, ib->fence);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, hdp_flush);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 23314be49480..164898b0010c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
-
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
- /* Can't set dpm state when the card is off */
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
goto fail;
}
mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+
+ /* Can't set dpm state when the card is off */
+ if (!(rdev->flags & RADEON_IS_PX) ||
+ (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+ radeon_pm_compute_clocks(rdev);
+
fail:
return count;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5b4e0cf231a0..d65607902537 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+ bool hdp_flush)
{
/* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding.
*/
- if (rdev->asic->ring[ring->idx]->hdp_flush)
+ if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
/* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished.
*/
- if (rdev->asic->mmio_hdp_flush)
+ if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
+ * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
+ bool hdp_flush)
{
- radeon_ring_commit(rdev, ring);
+ radeon_ring_commit(rdev, ring, hdp_flush);
mutex_unlock(&rdev->ring_lock);
}
@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, data[i]);
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
kfree(data);
return 0;
}
@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- (rdev->flags & RADEON_IS_PCIE) ?
- RADEON_GEM_GTT_WC : 0,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index dbd6bcde92de..56d9fd66d8ae 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
continue;
}
- radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring);
semaphore->gpu_addr += 8;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5adf4207453d..17bc3dced9f1 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
return r;
}
radeon_fence_emit(rdev, fence, ring->idx);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
}
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence1, false);
if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence2, false);
if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB);
+ radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
if (r)
goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
+ radeon_ring_unlock_commit(rdev, ringC, false);
for (i = 0; i < 30; ++i) {
mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC);
+ radeon_ring_unlock_commit(rdev, ringC, false);
mdelay(1000);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6bf55ec85b62..341848a14376 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.ptr[i] = PACKET2(0);
ib.length_dw = 16;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
goto err;
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f9b70a43aa52..c7190aadbd89 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0;
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
radeon_ring_write(ring, VCE_CMD_END);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
if (vce_v1_0_get_rptr(rdev, ring) != rptr)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ccae4d9dc3de..088ffdc2f577 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > 64);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
goto error;
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* add a clone of the bo_va to clear the old address */
struct radeon_bo_va *tmp;
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
tmp->vm = vm;
@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
- r = radeon_ib_schedule(rdev, &ib, NULL);
+ r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 3e21e869015f..8a477bf1fdb3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index bbf2e076ee45..74426ac2bb5c 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 011779bd2b3d..a1274a31405c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j, k;
+ int i, j;
switch (rdev->family) {
case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
+ rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
- for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
- rdev->config.si.active_cus +=
- hweight32(si_get_cu_active_bitmap(rdev, i, j));
- }
+ rdev->config.si.active_cus +=
+ hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
}
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
si_cp_enable(rdev, true);
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
@@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 716505129450..7c22baaf94db 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence);
return r;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 32e50be9c4ac..57f780053b3e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on with
- * bapm enabled when switching between AC and battery
- * power. At the same time, some MSI boards hang
- * if it's not enabled and dpm is enabled. Just enable
- * it for MSI boards right now.
- */
- if (rdev->pdev->subsystem_vendor == 0x1462)
- pi->enable_bapm = true;
- else
+ if (radeon_bapm == -1) {
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
+ */
+ if (rdev->pdev->subsystem_vendor == 0x1462)
+ pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
+ } else if (radeon_bapm == 0) {
pi->enable_bapm = false;
+ } else {
+ pi->enable_bapm = true;
+ }
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index be42c8125203..cda391347286 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
radeon_ring_write(ring, 3);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
done:
/* lower clocks again */
@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 1bdcccc54a1d..f745d2c1325e 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -28,7 +28,7 @@
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+ if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
rdesc[11] = rdesc[16] = 0xff;
rdesc[12] = rdesc[17] = 0x03;
diff --git a/drivers/hid/hid-huion.c b/drivers/hid/hid-huion.c
index 60f44cd1b0ed..61b68ca27790 100644
--- a/drivers/hid/hid-huion.c
+++ b/drivers/hid/hid-huion.c
@@ -84,6 +84,15 @@ static const __u8 huion_tablet_rdesc_template[] = {
0xC0 /* End Collection */
};
+/* Parameter indices */
+enum huion_prm {
+ HUION_PRM_X_LM = 1,
+ HUION_PRM_Y_LM = 2,
+ HUION_PRM_PRESSURE_LM = 4,
+ HUION_PRM_RESOLUTION = 5,
+ HUION_PRM_NUM
+};
+
/* Driver data */
struct huion_drvdata {
__u8 *rdesc;
@@ -115,7 +124,12 @@ static int huion_tablet_enable(struct hid_device *hdev)
int rc;
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
struct huion_drvdata *drvdata = hid_get_drvdata(hdev);
- __le16 buf[6];
+ __le16 *buf = NULL;
+ size_t len;
+ s32 params[HUION_PH_ID_NUM];
+ s32 resolution;
+ __u8 *p;
+ s32 v;
/*
* Read string descriptor containing tablet parameters. The specific
@@ -123,65 +137,79 @@ static int huion_tablet_enable(struct hid_device *hdev)
* driver traffic.
* NOTE: This enables fully-functional tablet mode.
*/
+ len = HUION_PRM_NUM * sizeof(*buf);
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL) {
+ hid_err(hdev, "failed to allocate parameter buffer\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(USB_DT_STRING << 8) + 0x64,
- 0x0409, buf, sizeof(buf),
+ 0x0409, buf, len,
USB_CTRL_GET_TIMEOUT);
- if (rc == -EPIPE)
- hid_warn(hdev, "device parameters not found\n");
- else if (rc < 0)
- hid_warn(hdev, "failed to get device parameters: %d\n", rc);
- else if (rc != sizeof(buf))
- hid_warn(hdev, "invalid device parameters\n");
- else {
- s32 params[HUION_PH_ID_NUM];
- s32 resolution;
- __u8 *p;
- s32 v;
+ if (rc == -EPIPE) {
+ hid_err(hdev, "device parameters not found\n");
+ rc = -ENODEV;
+ goto cleanup;
+ } else if (rc < 0) {
+ hid_err(hdev, "failed to get device parameters: %d\n", rc);
+ rc = -ENODEV;
+ goto cleanup;
+ } else if (rc != len) {
+ hid_err(hdev, "invalid device parameters\n");
+ rc = -ENODEV;
+ goto cleanup;
+ }
- /* Extract device parameters */
- params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[1]);
- params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[2]);
- params[HUION_PH_ID_PRESSURE_LM] = le16_to_cpu(buf[4]);
- resolution = le16_to_cpu(buf[5]);
- if (resolution == 0) {
- params[HUION_PH_ID_X_PM] = 0;
- params[HUION_PH_ID_Y_PM] = 0;
- } else {
- params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
- 1000 / resolution;
- params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
- 1000 / resolution;
- }
+ /* Extract device parameters */
+ params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[HUION_PRM_X_LM]);
+ params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[HUION_PRM_Y_LM]);
+ params[HUION_PH_ID_PRESSURE_LM] =
+ le16_to_cpu(buf[HUION_PRM_PRESSURE_LM]);
+ resolution = le16_to_cpu(buf[HUION_PRM_RESOLUTION]);
+ if (resolution == 0) {
+ params[HUION_PH_ID_X_PM] = 0;
+ params[HUION_PH_ID_Y_PM] = 0;
+ } else {
+ params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
+ 1000 / resolution;
+ params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
+ 1000 / resolution;
+ }
- /* Allocate fixed report descriptor */
- drvdata->rdesc = devm_kmalloc(&hdev->dev,
- sizeof(huion_tablet_rdesc_template),
- GFP_KERNEL);
- if (drvdata->rdesc == NULL) {
- hid_err(hdev, "failed to allocate fixed rdesc\n");
- return -ENOMEM;
- }
- drvdata->rsize = sizeof(huion_tablet_rdesc_template);
+ /* Allocate fixed report descriptor */
+ drvdata->rdesc = devm_kmalloc(&hdev->dev,
+ sizeof(huion_tablet_rdesc_template),
+ GFP_KERNEL);
+ if (drvdata->rdesc == NULL) {
+ hid_err(hdev, "failed to allocate fixed rdesc\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ drvdata->rsize = sizeof(huion_tablet_rdesc_template);
- /* Format fixed report descriptor */
- memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
- drvdata->rsize);
- for (p = drvdata->rdesc;
- p <= drvdata->rdesc + drvdata->rsize - 4;) {
- if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
- v = params[p[3]];
- put_unaligned(cpu_to_le32(v), (s32 *)p);
- p += 4;
- } else {
- p++;
- }
+ /* Format fixed report descriptor */
+ memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
+ drvdata->rsize);
+ for (p = drvdata->rdesc;
+ p <= drvdata->rdesc + drvdata->rsize - 4;) {
+ if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
+ p[3] < sizeof(params)) {
+ v = params[p[3]];
+ put_unaligned(cpu_to_le32(v), (s32 *)p);
+ p += 4;
+ } else {
+ p++;
}
}
- return 0;
+ rc = 0;
+
+cleanup:
+ kfree(buf);
+ return rc;
}
static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index e77696367591..b92bf01a1ae8 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* - change the button usage range to 4-7 for the extra
* buttons
*/
- if (*rsize >= 74 &&
+ if (*rsize >= 75 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a976f48263f6..f91ff145db9a 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
struct usb_device_descriptor *udesc;
__u16 bcdDevice, rev_maj, rev_min;
- if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
+ if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
hid_info(hdev,
"fixing up Logitech keyboard report descriptor\n");
rdesc[84] = rdesc[89] = 0x4d;
rdesc[85] = rdesc[90] = 0x10;
}
- if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
+ if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
hid_info(hdev,
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index cc2bd2022198..7835717bc020 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -451,13 +451,13 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Private driver data not found!\n");
- return 0;
+ return -EINVAL;
}
entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found!\n");
- return 0;
+ return -EINVAL;
}
if (range == 0)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 486dbde2ba2d..b7ba82960c79 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
return;
}
- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
- return;
- }
-
if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
@@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
if (!out_buf)
return -ENOMEM;
- if (count < DJREPORT_SHORT_LENGTH - 2)
+ if (count > DJREPORT_SHORT_LENGTH - 2)
count = DJREPORT_SHORT_LENGTH - 2;
out_buf[0] = REPORT_ID_DJ_SHORT;
@@ -690,6 +683,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
* device (via hid_input_report() ) and return 1 so hid-core does not do
* anything else with it.
*/
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return false;
+ }
spin_lock_irqsave(&djrcv_dev->lock, flags);
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index 9e14c00eb1b6..25daf28b26bd 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -24,7 +24,7 @@
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+ if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 736b2502df4f..6aca4f2554bf 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -25,7 +25,7 @@
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+ if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 0dc25142f451..8389e8109218 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -909,10 +909,15 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- if (!test_bit(RMI_STARTED, &data->flags)) {
- hid_hw_stop(hdev);
- return -EIO;
- }
+ if (!test_bit(RMI_STARTED, &data->flags))
+ /*
+ * The device maybe in the bootloader if rmi_input_configured
+ * failed to find F11 in the PDT. Print an error, but don't
+ * return an error from rmi_probe so that hidraw will be
+ * accessible from userspace. That way a userspace tool
+ * can be used to reload working firmware on the touchpad.
+ */
+ hid_err(hdev, "Device failed to be properly configured\n");
return 0;
}
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index e244e449cbba..2ac25760a9a9 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -604,9 +604,9 @@ static int sensor_hub_probe(struct hid_device *hdev,
ret = -EINVAL;
goto err_stop_hw;
}
- sd->hid_sensor_hub_client_devs = kzalloc(dev_cnt *
- sizeof(struct mfd_cell),
- GFP_KERNEL);
+ sd->hid_sensor_hub_client_devs = devm_kzalloc(&hdev->dev, dev_cnt *
+ sizeof(struct mfd_cell),
+ GFP_KERNEL);
if (sd->hid_sensor_hub_client_devs == NULL) {
hid_err(hdev, "Failed to allocate memory for mfd cells\n");
ret = -ENOMEM;
@@ -618,11 +618,12 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (collection->type == HID_COLLECTION_PHYSICAL) {
- hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
+ hsdev = devm_kzalloc(&hdev->dev, sizeof(*hsdev),
+ GFP_KERNEL);
if (!hsdev) {
hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
ret = -ENOMEM;
- goto err_no_mem;
+ goto err_stop_hw;
}
hsdev->hdev = hdev;
hsdev->vendor_id = hdev->vendor;
@@ -631,13 +632,13 @@ static int sensor_hub_probe(struct hid_device *hdev,
if (last_hsdev)
last_hsdev->end_collection_index = i;
last_hsdev = hsdev;
- name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x",
- collection->usage);
+ name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "HID-SENSOR-%x",
+ collection->usage);
if (name == NULL) {
hid_err(hdev, "Failed MFD device name\n");
ret = -ENOMEM;
- kfree(hsdev);
- goto err_no_mem;
+ goto err_stop_hw;
}
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].id =
@@ -661,16 +662,10 @@ static int sensor_hub_probe(struct hid_device *hdev,
ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs,
sd->hid_sensor_client_cnt, NULL, 0, NULL);
if (ret < 0)
- goto err_no_mem;
+ goto err_stop_hw;
return ret;
-err_no_mem:
- for (i = 0; i < sd->hid_sensor_client_cnt; ++i) {
- kfree(sd->hid_sensor_hub_client_devs[i].name);
- kfree(sd->hid_sensor_hub_client_devs[i].platform_data);
- }
- kfree(sd->hid_sensor_hub_client_devs);
err_stop_hw:
hid_hw_stop(hdev);
@@ -681,7 +676,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
{
struct sensor_hub_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int i;
hid_dbg(hdev, " hardware removed\n");
hid_hw_close(hdev);
@@ -691,11 +685,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
complete(&data->pending.ready);
spin_unlock_irqrestore(&data->lock, flags);
mfd_remove_devices(&hdev->dev);
- for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
- kfree(data->hid_sensor_hub_client_devs[i].name);
- kfree(data->hid_sensor_hub_client_devs[i].platform_data);
- }
- kfree(data->hid_sensor_hub_client_devs);
hid_set_drvdata(hdev, NULL);
mutex_destroy(&data->mutex);
}
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 87fc91e1c8de..91072fa54663 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -24,7 +24,7 @@
static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+ if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 3e3b680dc007..b51a402752c4 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -23,17 +23,14 @@ config I2C
This I2C support can also be built as a module. If so, the module
will be called i2c-core.
-config I2C_ACPI
- bool "I2C ACPI support"
- select I2C
- depends on ACPI
+config ACPI_I2C_OPREGION
+ bool "ACPI I2C Operation region support"
+ depends on I2C=y && ACPI
default y
help
- Say Y here if you want to enable ACPI I2C support. This includes support
- for automatic enumeration of I2C slave devices and support for ACPI I2C
- Operation Regions. Operation Regions allow firmware (BIOS) code to
- access I2C slave devices, such as smart batteries through an I2C host
- controller driver.
+ Say Y here if you want to enable ACPI I2C operation region support.
+ Operation Regions allow firmware (BIOS) code to access I2C slave devices,
+ such as smart batteries through an I2C host controller driver.
if I2C
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index a1f590cbb435..e0228b228256 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -3,7 +3,7 @@
#
i2ccore-y := i2c-core.o
-i2ccore-$(CONFIG_I2C_ACPI) += i2c-acpi.o
+i2ccore-$(CONFIG_ACPI) += i2c-acpi.o
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2ccore.o
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 2994690b26e9..10467a327749 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -164,6 +164,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -828,6 +829,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c
index e8b61967334b..0dbc18c15c43 100644
--- a/drivers/i2c/i2c-acpi.c
+++ b/drivers/i2c/i2c-acpi.c
@@ -126,6 +126,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+#ifdef CONFIG_ACPI_I2C_OPREGION
static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
u8 cmd, u8 *data, u8 data_len)
{
@@ -360,3 +361,4 @@ void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_bus_detach_private_data(handle);
}
+#endif
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 18405314168b..ecb0109a5360 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3149,14 +3149,16 @@ free_domains:
static void cleanup_domain(struct protection_domain *domain)
{
- struct iommu_dev_data *dev_data, *next;
+ struct iommu_dev_data *entry;
unsigned long flags;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
- __detach_device(dev_data);
- atomic_set(&dev_data->bind, 0);
+ while (!list_empty(&domain->dev_list)) {
+ entry = list_first_entry(&domain->dev_list,
+ struct iommu_dev_data, list);
+ __detach_device(entry);
+ atomic_set(&entry->bind, 0);
}
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d1f5caad04f9..5619f264862d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3869,6 +3869,14 @@ static int device_notifier(struct notifier_block *nb,
action != BUS_NOTIFY_DEL_DEVICE)
return 0;
+ /*
+ * If the device is still attached to a device driver we can't
+ * tear down the domain yet as DMA mappings may still be in use.
+ * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
+ */
+ if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
+ return 0;
+
domain = find_domain(dev);
if (!domain)
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 169836020208..ac4adb337038 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -995,7 +995,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size_t orig_size = size;
int ret = 0;
- if (unlikely(domain->ops->unmap == NULL ||
+ if (unlikely(domain->ops->map == NULL ||
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h
index 58368f7b5cba..2498c349a32e 100644
--- a/drivers/isdn/hardware/eicon/xdi_msg.h
+++ b/drivers/isdn/hardware/eicon/xdi_msg.h
@@ -1,6 +1,6 @@
/* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
-#ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__
+#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
#define __DIVA_XDI_UM_CFG_MESSAGE_H__
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b08c18871323..6703751d87d7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
*/
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf);
+ close_sync(conf);
return 0;
}
@@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
}
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio->state = 0;
raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0);
@@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (sync_blocks < max_sync)
max_sync = sync_blocks;
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio->state = 0;
r10_bio->mddev = mddev;
atomic_set(&r10_bio->remaining, 0);
@@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_more:
/* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio->state = 0;
raise_barrier(conf, sectors_done != 0);
atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev;
@@ -4398,6 +4402,7 @@ read_more:
* on all the target devices.
*/
// FIXME
+ mempool_free(r10_bio, conf->r10buf_pool);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return sectors_done;
}
@@ -4410,7 +4415,7 @@ read_more:
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 6234b2e84587..183588b11fc1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
!test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
(sh->raid_conf->level == 6 && s->failed && s->to_write &&
- s->to_write < sh->raid_conf->raid_disks - 2 &&
+ s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
@@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(R5_Wantwrite, &dev->flags);
if (prexor)
continue;
+ if (s.failed > 1)
+ continue;
if (!test_bit(R5_Insync, &dev->flags) ||
((i == sh->pd_idx || i == sh->qd_idx) &&
s.failed == 0))
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5dede6e64376..109cb44291f5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+ if (!priv->raminit_ctrlreg || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
priv->raminit = c_can_hw_raminit_ti;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index f425ec2c7839..944aa5d3af6e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -549,6 +549,13 @@ static void do_state(struct net_device *dev,
/* process state changes depending on the new state */
switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ netdev_dbg(dev, "Error Warning\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ break;
case CAN_STATE_ERROR_ACTIVE:
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
@@ -852,6 +859,8 @@ static int flexcan_chip_start(struct net_device *dev)
if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+ else
+ reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
/* save for later use */
priv->reg_ctrl_default = reg_ctrl;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index d1692154ed1b..b27ac6074afb 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev)
netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
}
+/*
+ * initialize SJA1000 chip:
+ * - reset chip
+ * - set output mode
+ * - set baudrate
+ * - enable interrupts
+ * - start operating mode
+ */
+static void chipset_init(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ /* set clock divider and output control register */
+ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+
+ /* set acceptance filter (accept all) */
+ priv->write_reg(priv, SJA1000_ACCC0, 0x00);
+ priv->write_reg(priv, SJA1000_ACCC1, 0x00);
+ priv->write_reg(priv, SJA1000_ACCC2, 0x00);
+ priv->write_reg(priv, SJA1000_ACCC3, 0x00);
+
+ priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
+ priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
+ priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
+ priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
+
+ priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
+}
+
static void sja1000_start(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev)
if (priv->can.state != CAN_STATE_STOPPED)
set_reset_mode(dev);
+ /* Initialize chip if uninitialized at this stage */
+ if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+ chipset_init(dev);
+
/* Clear error counters and error code capture */
priv->write_reg(priv, SJA1000_TXERR, 0x0);
priv->write_reg(priv, SJA1000_RXERR, 0x0);
@@ -237,35 +270,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev,
}
/*
- * initialize SJA1000 chip:
- * - reset chip
- * - set output mode
- * - set baudrate
- * - enable interrupts
- * - start operating mode
- */
-static void chipset_init(struct net_device *dev)
-{
- struct sja1000_priv *priv = netdev_priv(dev);
-
- /* set clock divider and output control register */
- priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
-
- /* set acceptance filter (accept all) */
- priv->write_reg(priv, SJA1000_ACCC0, 0x00);
- priv->write_reg(priv, SJA1000_ACCC1, 0x00);
- priv->write_reg(priv, SJA1000_ACCC2, 0x00);
- priv->write_reg(priv, SJA1000_ACCC3, 0x00);
-
- priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
- priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
- priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
- priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
-
- priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
-}
-
-/*
* transmit a CAN message
* message layout in the sk_buff should be like this:
* xx xx xx xx ff ll 00 11 22 33 44 55 66 77
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index e1a8f4e19983..e4222af2baa6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
struct xgene_enet_desc_ring *ring;
ring = pdata->tx_ring;
- if (ring && ring->cp_ring && ring->cp_ring->cp_skb)
- devm_kfree(dev, ring->cp_ring->cp_skb);
- xgene_enet_free_desc_ring(ring);
+ if (ring) {
+ if (ring->cp_ring && ring->cp_ring->cp_skb)
+ devm_kfree(dev, ring->cp_ring->cp_skb);
+ xgene_enet_free_desc_ring(ring);
+ }
ring = pdata->rx_ring;
- if (ring && ring->buf_pool && ring->buf_pool->rx_skb)
- devm_kfree(dev, ring->buf_pool->rx_skb);
- xgene_enet_free_desc_ring(ring->buf_pool);
- xgene_enet_free_desc_ring(ring);
+ if (ring) {
+ if (ring->buf_pool) {
+ if (ring->buf_pool->rx_skb)
+ devm_kfree(dev, ring->buf_pool->rx_skb);
+ xgene_enet_free_desc_ring(ring->buf_pool);
+ }
+ xgene_enet_free_desc_ring(ring);
+ }
}
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4e6c82e20224..4ccc806b1150 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
- DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
fp->tpa_queue_used);
#endif
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c13364b6cc19..900cab420810 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10052,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
}
#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
+ 0x1848 + ((f) << 4))
#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
@@ -10059,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
{
@@ -10079,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
return false;
}
-static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
-{
- u8 major, minor, version;
- u32 fw;
-
- /* Must check that FW is loaded */
- if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
- MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
- BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
- return false;
- }
-
- /* Read Currently loaded FW version */
- fw = REG_RD(bp, XSEM_REG_PRAM);
- major = fw & 0xff;
- minor = (fw >> 0x8) & 0xff;
- version = (fw >> 0x10) & 0xff;
- BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
- fw, major, minor, version);
-
- if (major > BCM_5710_UNDI_FW_MF_MAJOR)
- return true;
-
- if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
- (minor > BCM_5710_UNDI_FW_MF_MINOR))
- return true;
-
- if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
- (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
- (version >= BCM_5710_UNDI_FW_MF_VERS))
- return true;
-
- return false;
-}
-
-static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
-{
- int i;
-
- /* Due to legacy (FW) code, the first function on each engine has a
- * different offset macro from the rest of the functions.
- * Setting this for all 8 functions is harmless regardless of whether
- * this is actually a multi-function device.
- */
- for (i = 0; i < 2; i++)
- REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
-
- for (i = 2; i < 8; i++)
- REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
-
- BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
-}
-
-static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
{
u16 rcq, bd;
- u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
+ u32 addr, tmp_reg;
+ if (BP_FUNC(bp) < 2)
+ addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
+ else
+ addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
+
+ tmp_reg = REG_RD(bp, addr);
rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
- REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
+ REG_WR(bp, addr, tmp_reg);
- BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
- port, bd, rcq);
+ BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+ BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
}
static int bnx2x_prev_mcp_done(struct bnx2x *bp)
@@ -10383,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
- bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10420,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* New UNDI FW supports MF and contains better
- * cleaning methods - might be redundant but harmless.
- */
- if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
- if (need_write) {
- bnx2x_prev_unload_undi_mf(bp);
- need_write = false;
- }
- } else if (prev_undi) {
- /* If UNDI resides in memory,
- * manually increment it
- */
- bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
- }
+ /* If UNDI resides in memory, manually increment it */
+ if (prev_undi)
+ bnx2x_prev_unload_undi_inc(bp, 1);
+
udelay(10);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d57282172ea5..c067b7888ac4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -652,6 +652,7 @@ struct adapter {
struct tid_info tids;
void **tid_release_head;
spinlock_t tid_release_lock;
+ struct workqueue_struct *workq;
struct work_struct tid_release_task;
struct work_struct db_full_task;
struct work_struct db_drop_task;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1afee70ce856..18fb9c61d7ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
return ret;
}
-static struct workqueue_struct *workq;
-
/**
* link_start - enable a port
* @dev: the port to enable
@@ -3340,7 +3338,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
adap->tid_release_head = (void **)((uintptr_t)p | chan);
if (!adap->tid_release_task_busy) {
adap->tid_release_task_busy = true;
- queue_work(workq, &adap->tid_release_task);
+ queue_work(adap->workq, &adap->tid_release_task);
}
spin_unlock_bh(&adap->tid_release_lock);
}
@@ -4140,7 +4138,7 @@ void t4_db_full(struct adapter *adap)
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
- queue_work(workq, &adap->db_full_task);
+ queue_work(adap->workq, &adap->db_full_task);
}
}
@@ -4150,7 +4148,7 @@ void t4_db_dropped(struct adapter *adap)
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
}
- queue_work(workq, &adap->db_drop_task);
+ queue_work(adap->workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -6517,6 +6515,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter->workq = create_singlethread_workqueue("cxgb4");
+ if (!adapter->workq) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
@@ -6715,6 +6719,9 @@ sriov:
out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
+ if (adapter->workq)
+ destroy_workqueue(adapter->workq);
+
kfree(adapter);
out_disable_device:
pci_disable_pcie_error_reporting(pdev);
@@ -6736,6 +6743,11 @@ static void remove_one(struct pci_dev *pdev)
if (adapter) {
int i;
+ /* Tear down per-adapter Work Queue first since it can contain
+ * references to our adapter data structure.
+ */
+ destroy_workqueue(adapter->workq);
+
if (is_offload(adapter))
detach_ulds(adapter);
@@ -6788,20 +6800,14 @@ static int __init cxgb4_init_module(void)
{
int ret;
- workq = create_singlethread_workqueue("cxgb4");
- if (!workq)
- return -ENOMEM;
-
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
- if (ret < 0) {
+ if (ret < 0)
debugfs_remove(cxgb4_debugfs_root);
- destroy_workqueue(workq);
- }
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
@@ -6813,8 +6819,6 @@ static void __exit cxgb4_cleanup_module(void)
unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
pci_unregister_driver(&cxgb4_driver);
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
- flush_workqueue(workq);
- destroy_workqueue(workq);
}
module_init(cxgb4_init_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b0bba32d69d5..d22d728d4e5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
+ FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO(1) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0549170d7e2e..5f2729ebadbe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
struct fw_eq_ctrl_cmd {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index bdfa80ca5e31..a5fb9493dee8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
FW_EQ_ETH_CMD_EQSTART |
FW_LEN16(cmd));
- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
+ FW_EQ_ETH_CMD_VIID(pi->viid));
cmd.fetchszm_to_iqid =
cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 9f7fa644a397..ee41d98b44b6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -275,6 +275,9 @@ struct fec_enet_private {
struct clk *clk_enet_out;
struct clk *clk_ptp;
+ bool ptp_clk_on;
+ struct mutex ptp_clk_mutex;
+
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
@@ -335,7 +338,7 @@ struct fec_enet_private {
u32 cycle_speed;
int hwts_rx_en;
int hwts_tx_en;
- struct timer_list time_keep;
+ struct delayed_work time_keep;
struct regulator *reg_phy;
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4f87dffcb9b2..89355a719625 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
goto failed_clk_enet_out;
}
if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
+ if (ret) {
+ mutex_unlock(&fep->ptp_clk_mutex);
goto failed_clk_ptp;
+ } else {
+ fep->ptp_clk_on = true;
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
}
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
- if (fep->clk_ptp)
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
}
return 0;
@@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev)
if (IS_ERR(fep->clk_enet_out))
fep->clk_enet_out = NULL;
+ fep->ptp_clk_on = false;
+ mutex_init(&fep->ptp_clk_mutex);
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
fep->bufdesc_ex =
pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ cancel_delayed_work_sync(&fep->time_keep);
cancel_work_sync(&fep->tx_timeout_work);
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- del_timer_sync(&fep->time_keep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
if (fep->ptp_clock)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 82386b29914a..cca3617a2321 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
u64 ns;
unsigned long flags;
+ mutex_lock(&fep->ptp_clk_mutex);
+ /* Check the ptp clock */
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EINVAL;
+ }
+
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
spin_lock_irqsave(&fep->tmreg_lock, flags);
timecounter_init(&fep->tc, &fep->cc, ns);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
return 0;
}
@@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
*/
-static void fec_time_keep(unsigned long _data)
+static void fec_time_keep(struct work_struct *work)
{
- struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
u64 ns;
unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags);
- ns = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (fep->ptp_clk_on) {
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
- mod_timer(&fep->time_keep, jiffies + HZ);
+ schedule_delayed_work(&fep->time_keep, HZ);
}
/**
@@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev)
fec_ptp_start_cyclecounter(ndev);
- init_timer(&fep->time_keep);
- fep->time_keep.data = (unsigned long)fep;
- fep->time_keep.function = fec_time_keep;
- fep->time_keep.expires = jiffies + HZ;
- add_timer(&fep->time_keep);
+ INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
pr_err("ptp_clock_register failed\n");
}
+
+ schedule_delayed_work(&fep->time_keep, HZ);
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c9127562bd22..21978cc019e7 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -292,6 +292,18 @@ failure:
atomic_add(buffers_added, &(pool->available));
}
+/*
+ * The final 8 bytes of the buffer list is a counter of frames dropped
+ * because there was not a buffer in the buffer list capable of holding
+ * the frame.
+ */
+static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
+{
+ __be64 *p = adapter->buffer_list_addr + 4096 - 8;
+
+ adapter->rx_no_buffer = be64_to_cpup(p);
+}
+
/* replenish routine */
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
{
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
ibmveth_replenish_buffer_pool(adapter, pool);
}
- adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
- 4096 - 8);
+ ibmveth_update_rx_no_buffer(adapter);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
free_irq(netdev->irq, netdev);
- adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
- 4096 - 8);
+ ibmveth_update_rx_no_buffer(adapter);
ibmveth_cleanup(adapter);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index bb7fe98b3a6c..537b6216971d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
u32 prttsyn_stat;
int n;
- if (pf->flags & I40E_FLAG_PTP)
+ if (!(pf->flags & I40E_FLAG_PTP))
return;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 89672551dce9..3ac6a0d2f143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int abs_vf_id;
i40e_status aq_ret;
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+ hw = &pf->hw;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_invalid_msgs++;
@@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf = pf->vf;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ continue;
+
/* Ignore return value on purpose - a given VF may fail, but
* we need to keep going and send to all of them
*/
i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- vf++;
- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
}
}
@@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf = pf->vf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
int i;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
@@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe),
NULL);
- vf++;
- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
}
}
@@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
{
struct i40e_virtchnl_pf_event pfe;
- int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+ int abs_vf_id;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return;
+
+ abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 16039d1497b8..b84f5ea3d659 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -268,7 +268,7 @@ struct qlcnic_fdt {
u16 cksum;
u16 unused;
u8 model[16];
- u16 mfg_id;
+ u8 mfg_id;
u16 id;
u8 flag;
u8 erase_cmd;
@@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
return QLC_DEFAULT_VNIC_COUNT;
}
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+ u32 *tmp = buffer;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ *tmp = swab32(*tmp);
+ tmp++;
+ }
+#endif
+}
+
#ifdef CONFIG_QLCNIC_HWMON
void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a4a4ec0b68f8..476e4998ef99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
}
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
- (addr));
+ (addr & 0xFFFF0000));
range = flash_offset + (count * sizeof(u32));
/* Check if data is spread across multiple sectors */
@@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
(u8 *)&adapter->ahw->fdt,
count);
-
+ qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
qlcnic_83xx_unlock_flash(adapter);
return ret;
}
@@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
addr1 = (sector_start_addr & 0xFF) << 16;
addr2 = (sector_start_addr & 0xFF0000) >> 16;
- reversed_addr = addr1 | addr2;
+ reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
reversed_addr);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f33559b72528..86783e1afcf7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
{
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
const struct firmware *fw = fw_info->fw;
- u32 dest, *p_cache;
+ u32 dest, *p_cache, *temp;
int i, ret = -EIO;
+ __le32 *temp_le;
u8 data[16];
size_t size;
u64 addr;
+ temp = kzalloc(fw->size, GFP_KERNEL);
+ if (!temp) {
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ return -ENOMEM;
+ }
+
+ temp_le = (__le32 *)fw->data;
+
+ /* FW image in file is in little endian, swap the data to nullify
+ * the effect of writel() operation on big endian platform.
+ */
+ for (i = 0; i < fw->size / sizeof(u32); i++)
+ temp[i] = __le32_to_cpu(temp_le[i]);
+
dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
size = (fw->size & ~0xF);
- p_cache = (u32 *)fw->data;
+ p_cache = temp;
addr = (u64)dest;
ret = qlcnic_ms_mem_write128(adapter, addr,
p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
- release_firmware(fw);
- fw_info->fw = NULL;
- return -EIO;
+ goto exit;
}
/* alignment check */
if (fw->size & 0xF) {
addr = dest + size;
for (i = 0; i < (fw->size & 0xF); i++)
- data[i] = fw->data[size + i];
+ data[i] = temp[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
- release_firmware(fw);
- fw_info->fw = NULL;
- return -EIO;
+ goto exit;
}
}
+
+exit:
release_firmware(fw);
fw_info->fw = NULL;
+ kfree(temp);
- return 0;
+ return ret;
}
static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index e46fc39d425d..c9f57fb84b9e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr {
u32 type;
u32 offset;
u32 cap_size;
+#if defined(__LITTLE_ENDIAN)
u8 mask;
u8 rsvd[2];
u8 flags;
+#else
+ u8 flags;
+ u8 rsvd[2];
+ u8 mask;
+#endif
} __packed;
struct __crb {
u32 addr;
+#if defined(__LITTLE_ENDIAN)
u8 stride;
u8 rsvd1[3];
+#else
+ u8 rsvd1[3];
+ u8 stride;
+#endif
u32 data_size;
u32 no_ops;
u32 rsvd2[4];
@@ -63,15 +74,28 @@ struct __crb {
struct __ctrl {
u32 addr;
+#if defined(__LITTLE_ENDIAN)
u8 stride;
u8 index_a;
u16 timeout;
+#else
+ u16 timeout;
+ u8 index_a;
+ u8 stride;
+#endif
u32 data_size;
u32 no_ops;
+#if defined(__LITTLE_ENDIAN)
u8 opcode;
u8 index_v;
u8 shl_val;
u8 shr_val;
+#else
+ u8 shr_val;
+ u8 shl_val;
+ u8 index_v;
+ u8 opcode;
+#endif
u32 val1;
u32 val2;
u32 val3;
@@ -79,16 +103,27 @@ struct __ctrl {
struct __cache {
u32 addr;
+#if defined(__LITTLE_ENDIAN)
u16 stride;
u16 init_tag_val;
+#else
+ u16 init_tag_val;
+ u16 stride;
+#endif
u32 size;
u32 no_ops;
u32 ctrl_addr;
u32 ctrl_val;
u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
u8 read_addr_stride;
u8 read_addr_num;
u8 rsvd1[2];
+#else
+ u8 rsvd1[2];
+ u8 read_addr_num;
+ u8 read_addr_stride;
+#endif
} __packed;
struct __ocm {
@@ -122,23 +157,39 @@ struct __mux {
struct __queue {
u32 sel_addr;
+#if defined(__LITTLE_ENDIAN)
u16 stride;
u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u16 stride;
+#endif
u32 size;
u32 no_ops;
u8 rsvd2[8];
u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
u8 read_addr_stride;
u8 read_addr_cnt;
u8 rsvd3[2];
+#else
+ u8 rsvd3[2];
+ u8 read_addr_cnt;
+ u8 read_addr_stride;
+#endif
} __packed;
struct __pollrd {
u32 sel_addr;
u32 read_addr;
u32 sel_val;
+#if defined(__LITTLE_ENDIAN)
u16 sel_val_stride;
u16 no_ops;
+#else
+ u16 no_ops;
+ u16 sel_val_stride;
+#endif
u32 poll_wait;
u32 poll_mask;
u32 data_size;
@@ -153,9 +204,15 @@ struct __mux2 {
u32 no_ops;
u32 sel_val_mask;
u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
u8 sel_val_stride;
u8 data_size;
u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u8 data_size;
+ u8 sel_val_stride;
+#endif
} __packed;
struct __pollrdmwr {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index f5786d5792df..59a721fba018 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
if (ret != 0)
return ret;
qlcnic_read_crb(adapter, buf, offset, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
}
@@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
if (ret != 0)
return ret;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
@@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
return -EIO;
memcpy(buf, &data, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
}
@@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
if (ret != 0)
return ret;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
memcpy(&data, buf, size);
if (qlcnic_pci_mem_write_2M(adapter, offset, data))
@@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
if (rem)
return QL_STATUS_INVALID_PARAM;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
ret = validate_pm_config(adapter, pm_cfg, count);
@@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
}
@@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
if (rem)
return QL_STATUS_INVALID_PARAM;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
ret = validate_esw_config(adapter, esw_cfg, count);
if (ret)
@@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
}
@@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (rem)
return QL_STATUS_INVALID_PARAM;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
ret = validate_npar_config(adapter, np_cfg, count);
if (ret)
@@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
}
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
}
@@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
count = size / sizeof(struct qlcnic_pci_func_cfg);
+ qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
@@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
}
qlcnic_83xx_unlock_flash(adapter);
+ qlcnic_swap32_buffer((u32 *)p_read_buf, count);
memcpy(buf, p_read_buf, size);
kfree(p_read_buf);
@@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
if (!p_cache)
return -ENOMEM;
+ count = size / sizeof(u32);
+ qlcnic_swap32_buffer((u32 *)buf, count);
memcpy(p_cache, buf, size);
p_src = p_cache;
- count = size / sizeof(u32);
if (qlcnic_83xx_lock_flash(adapter) != 0) {
kfree(p_cache);
@@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
if (!p_cache)
return -ENOMEM;
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
memcpy(p_cache, buf, size);
p_src = p_cache;
count = size / sizeof(u32);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 60e4ca01ccbb..a96955597755 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -739,7 +739,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
- if (!vlan->port->passthru)
+ /* Support unicast filter only on passthru devices.
+ * Multicast filter should be allowed on all devices.
+ */
+ if (!vlan->port->passthru && is_unicast_ether_addr(addr))
return -EOPNOTSUPP;
if (flags & NLM_F_REPLACE)
@@ -760,7 +763,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
- if (!vlan->port->passthru)
+ /* Support unicast filter only on passthru devices.
+ * Multicast filter should be allowed on all devices.
+ */
+ if (!vlan->port->passthru && is_unicast_ether_addr(addr))
return -EOPNOTSUPP;
if (is_unicast_ether_addr(addr))
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 526b94cea569..fdce1ea28790 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
return bcm7xxx_28nm_afe_config_init(phydev);
}
+static int bcm7xxx_28nm_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm7xxx_28nm_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* 28nm Gigabit PHYs come out of reset without any half-duplex
+ * or "hub" compliant advertised mode, fix that. This does not
+ * cause any problems with the PHY library since genphy_config_aneg()
+ * gracefully handles auto-negotiated and forced modes.
+ */
+ return genphy_config_aneg(phydev);
+}
+
static int phy_set_clr_bits(struct phy_device *dev, int location,
int set_mask, int clr_mask)
{
@@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
}
/* Workaround for putting the PHY in IDDQ mode, required
- * for all BCM7XXX PHYs
+ * for all BCM7XXX 40nm and 65nm PHYs
*/
static int bcm7xxx_suspend(struct phy_device *phydev)
{
@@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = {
.config_init = bcm7xxx_28nm_afe_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_28nm_afe_config_init,
+ .resume = bcm7xxx_28nm_resume,
.driver = { .owner = THIS_MODULE },
}, {
.phy_id = PHY_ID_BCM7439,
@@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = {
.config_init = bcm7xxx_28nm_afe_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_28nm_afe_config_init,
+ .resume = bcm7xxx_28nm_resume,
.driver = { .owner = THIS_MODULE },
}, {
.phy_id = PHY_ID_BCM7445,
@@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = {
.config_init = bcm7xxx_28nm_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_28nm_config_init,
- .driver = { .owner = THIS_MODULE },
-}, {
- .name = "Broadcom BCM7XXX 28nm",
- .phy_id = PHY_ID_BCM7XXX_28,
- .phy_id_mask = PHY_BCM_OUI_MASK,
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_28nm_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_28nm_config_init,
+ .resume = bcm7xxx_28nm_afe_config_init,
.driver = { .owner = THIS_MODULE },
}, {
.phy_id = PHY_BCM_OUI_4,
@@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7366, 0xfffffff0, },
{ PHY_ID_BCM7439, 0xfffffff0, },
{ PHY_ID_BCM7445, 0xfffffff0, },
- { PHY_ID_BCM7XXX_28, 0xfffffc00 },
{ PHY_BCM_OUI_4, 0xffff0000 },
{ PHY_BCM_OUI_5, 0xffffff00 },
{ }
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 180c49479c42..a4b08198fb9f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
+ return smsc_phy_ack_interrupt(phydev);
+}
+
+static int smsc_phy_reset(struct phy_device *phydev)
+{
int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
if (rc < 0)
return rc;
@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
rc = phy_read(phydev, MII_BMCR);
} while (rc & BMCR_RESET);
}
-
- rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- if (rc < 0)
- return rc;
-
- /* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
- rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
-
- return smsc_phy_ack_interrupt (phydev);
+ return 0;
}
static int lan911x_config_init(struct phy_device *phydev)
@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
+ .soft_reset = smsc_phy_reset,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
+ .soft_reset = smsc_phy_reset,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
+ .soft_reset = smsc_phy_reset,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
+ .soft_reset = smsc_phy_reset,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f46a24ffa3fe..79cb8313c7d8 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -453,7 +453,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
- if (base && size &&
+ if (size &&
early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 3e06a699352d..1471e0a223a5 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Get the reg property (if any) */
addr = of_get_property(device, "reg", NULL);
+ /* Try the new-style interrupts-extended first */
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
+ "#interrupt-cells", index, out_irq);
+ if (!res)
+ return of_irq_parse_raw(addr, out_irq);
+
/* Get the interrupts property */
intspec = of_get_property(device, "interrupts", &intlen);
- if (intspec == NULL) {
- /* Try the new-style interrupts-extended */
- res = of_parse_phandle_with_args(device, "interrupts-extended",
- "#interrupt-cells", index, out_irq);
- if (res)
- return -EINVAL;
- return of_irq_parse_raw(addr, out_irq);
- }
+ if (intspec == NULL)
+ return -EINVAL;
+
intlen /= sizeof(*intspec);
pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index d41002667833..a737cb5974de 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -27,6 +27,7 @@ static struct selftest_results {
#define NO_OF_NODES 2
static struct device_node *nodes[NO_OF_NODES];
static int last_node_index;
+static bool selftest_live_tree;
#define selftest(result, fmt, ...) { \
if (!(result)) { \
@@ -630,13 +631,6 @@ static int attach_node_and_children(struct device_node *np)
{
struct device_node *next, *root = np, *dup;
- if (!np) {
- pr_warn("%s: No tree to attach; not running tests\n",
- __func__);
- return -ENODATA;
- }
-
-
/* skip root node */
np = np->child;
/* storing a copy in temporary node */
@@ -672,12 +666,12 @@ static int attach_node_and_children(struct device_node *np)
static int __init selftest_data_add(void)
{
void *selftest_data;
- struct device_node *selftest_data_node;
+ struct device_node *selftest_data_node, *np;
extern uint8_t __dtb_testcases_begin[];
extern uint8_t __dtb_testcases_end[];
const int size = __dtb_testcases_end - __dtb_testcases_begin;
- if (!size || !of_allnodes) {
+ if (!size) {
pr_warn("%s: No testcase data to attach; not running tests\n",
__func__);
return -ENODATA;
@@ -692,6 +686,22 @@ static int __init selftest_data_add(void)
return -ENOMEM;
}
of_fdt_unflatten_tree(selftest_data, &selftest_data_node);
+ if (!selftest_data_node) {
+ pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ return -ENODATA;
+ }
+
+ if (!of_allnodes) {
+ /* enabling flag for removing nodes */
+ selftest_live_tree = true;
+ of_allnodes = selftest_data_node;
+
+ for_each_of_allnodes(np)
+ __of_attach_node_sysfs(np);
+ of_aliases = of_find_node_by_path("/aliases");
+ of_chosen = of_find_node_by_path("/chosen");
+ return 0;
+ }
/* attach the sub-tree to live tree */
return attach_node_and_children(selftest_data_node);
@@ -723,6 +733,18 @@ static void selftest_data_remove(void)
struct device_node *np;
struct property *prop;
+ if (selftest_live_tree) {
+ of_node_put(of_aliases);
+ of_node_put(of_chosen);
+ of_aliases = NULL;
+ of_chosen = NULL;
+ for_each_child_of_node(of_allnodes, np)
+ detach_node_and_children(np);
+ __of_detach_node_sysfs(of_allnodes);
+ of_allnodes = NULL;
+ return;
+ }
+
while (last_node_index >= 0) {
if (nodes[last_node_index]) {
np = of_find_node_by_path(nodes[last_node_index]->full_name);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 2d8a4d05d78f..8922c376456a 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -1,9 +1,18 @@
menu "PCI host controller drivers"
depends on PCI
+config PCI_DRA7XX
+ bool "TI DRA7xx PCIe controller"
+ select PCIE_DW
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC. There
+ are two instances of PCIe controller in DRA7xx. This controller can
+ act both as EP and RC. This reuses the Designware core.
+
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD
+ depends on ARCH_MVEBU || ARCH_DOVE
depends on OF
config PCIE_DW
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 0daec7941aba..d0e88f114ff9 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
new file mode 100644
index 000000000000..52b34fee07fd
--- /dev/null
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -0,0 +1,458 @@
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
+#define ERR_SYS BIT(0)
+#define ERR_FATAL BIT(1)
+#define ERR_NONFATAL BIT(2)
+#define ERR_COR BIT(3)
+#define ERR_AXI BIT(4)
+#define ERR_ECRC BIT(5)
+#define PME_TURN_OFF BIT(8)
+#define PME_TO_ACK BIT(9)
+#define PM_PME BIT(10)
+#define LINK_REQ_RST BIT(11)
+#define LINK_UP_EVT BIT(12)
+#define CFG_BME_EVT BIT(13)
+#define CFG_MSE_EVT BIT(14)
+#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+ ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+ LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
+#define INTA BIT(0)
+#define INTB BIT(1)
+#define INTC BIT(2)
+#define INTD BIT(3)
+#define MSI BIT(4)
+#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
+#define LTSSM_EN 0x1
+
+#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
+#define LINK_UP BIT(16)
+
+struct dra7xx_pcie {
+ void __iomem *base;
+ struct phy **phy;
+ int phy_count;
+ struct device *dev;
+ struct pcie_port pp;
+};
+
+#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+ return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->base + offset);
+}
+
+static int dra7xx_pcie_link_up(struct pcie_port *pp)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+ return !!(reg & LINK_UP);
+}
+
+static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 reg;
+ unsigned int retries = 1000;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "link is already up\n");
+ return 0;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg |= LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ while (retries--) {
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+ if (reg & LINK_UP)
+ break;
+ usleep_range(10, 20);
+ }
+
+ if (retries == 0) {
+ dev_err(pp->dev, "link is not up\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ ~INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+ ~LEG_EP_INTERRUPTS & ~MSI);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
+ else
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+ LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+ dw_pcie_setup_rc(pp);
+ dra7xx_pcie_establish_link(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+ dra7xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops dra7xx_pcie_host_ops = {
+ .link_up = dra7xx_pcie_link_up,
+ .host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = dra7xx_pcie_intx_map,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+ struct device *dev = pp->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ &intx_domain_ops, pp);
+ if (!pp->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(pp->irq_domain);
+ }
+
+ return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+ switch (reg) {
+ case MSI:
+ dw_handle_msi_irq(pp);
+ break;
+ case INTA:
+ case INTB:
+ case INTC:
+ case INTD:
+ generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg)));
+ break;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct dra7xx_pcie *dra7xx = arg;
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+ if (reg & ERR_SYS)
+ dev_dbg(dra7xx->dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_dbg(dra7xx->dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+
+ if (reg & ERR_COR)
+ dev_dbg(dra7xx->dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_ECRC)
+ dev_dbg(dra7xx->dev, "ECRC Error\n");
+
+ if (reg & PME_TURN_OFF)
+ dev_dbg(dra7xx->dev,
+ "Power Management Event Turn-Off message received\n");
+
+ if (reg & PME_TO_ACK)
+ dev_dbg(dra7xx->dev,
+ "Power Management Turn-Off Ack message received\n");
+
+ if (reg & PM_PME)
+ dev_dbg(dra7xx->dev,
+ "PM Power Management Event message received\n");
+
+ if (reg & LINK_REQ_RST)
+ dev_dbg(dra7xx->dev, "Link Request Reset\n");
+
+ if (reg & LINK_UP_EVT)
+ dev_dbg(dra7xx->dev, "Link-up state change\n");
+
+ if (reg & CFG_BME_EVT)
+ dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+
+ if (reg & CFG_MSE_EVT)
+ dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+ return IRQ_HANDLED;
+}
+
+static int add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct pcie_port *pp;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ pp = &dra7xx->pp;
+ pp->dev = dev;
+ pp->ops = &dra7xx_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0) {
+ dev_err(dev, "missing IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->irq,
+ dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+ "dra7-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+ ret = dra7xx_pcie_init_irq_domain(pp);
+ if (ret < 0)
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+ pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pp->dbi_base)
+ return -ENOMEM;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dra7xx->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+ u32 reg;
+ int ret;
+ int irq;
+ int i;
+ int phy_count;
+ struct phy **phy;
+ void __iomem *base;
+ struct resource *res;
+ struct dra7xx_pcie *dra7xx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char name[10];
+
+ dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+ if (!dra7xx)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "missing IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+ base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 0) {
+ dev_err(dev, "unable to find the strings\n");
+ return phy_count;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ for (i = 0; i < phy_count; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i]))
+ return PTR_ERR(phy[i]);
+
+ ret = phy_init(phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(phy[i]);
+ if (ret < 0) {
+ phy_exit(phy[i]);
+ goto err_phy;
+ }
+ }
+
+ dra7xx->base = base;
+ dra7xx->phy = phy;
+ dra7xx->dev = dev;
+ dra7xx->phy_count = phy_count;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_phy;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ platform_set_drvdata(pdev, dra7xx);
+
+ ret = add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_add_port;
+
+ return 0;
+
+err_add_port:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(phy[i]);
+ phy_exit(phy[i]);
+ }
+
+ return ret;
+}
+
+static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
+{
+ struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = &pdev->dev;
+ int count = dra7xx->phy_count;
+
+ if (pp->irq_domain)
+ irq_domain_remove(pp->irq_domain);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ while (count--) {
+ phy_power_off(dra7xx->phy[count]);
+ phy_exit(dra7xx->phy[count]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ { .compatible = "ti,dra7-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+static struct platform_driver dra7xx_pcie_driver = {
+ .remove = __exit_p(dra7xx_pcie_remove),
+ .driver = {
+ .name = "dra7-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_dra7xx_pcie_match,
+ },
+};
+
+module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("TI PCIe controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index abd65784618d..0fb0fdb223d5 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -25,6 +25,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -276,6 +277,7 @@ struct tegra_pcie {
unsigned int num_supplies;
const struct tegra_pcie_soc_data *soc_data;
+ struct dentry *debugfs;
};
struct tegra_pcie_port {
@@ -1739,6 +1741,115 @@ static const struct of_device_id tegra_pcie_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ if (list_empty(&pcie->ports))
+ return NULL;
+
+ seq_printf(s, "Index Status\n");
+
+ return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+ bool up = false, active = false;
+ struct tegra_pcie_port *port;
+ unsigned int value;
+
+ port = list_entry(v, struct tegra_pcie_port, list);
+
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ up = true;
+
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ active = true;
+
+ seq_printf(s, "%2u ", port->index);
+
+ if (up)
+ seq_printf(s, "up");
+
+ if (active) {
+ if (up)
+ seq_printf(s, ", ");
+
+ seq_printf(s, "active");
+ }
+
+ seq_printf(s, "\n");
+ return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_seq_ops = {
+ .start = tegra_pcie_ports_seq_start,
+ .next = tegra_pcie_ports_seq_next,
+ .stop = tegra_pcie_ports_seq_stop,
+ .show = tegra_pcie_ports_seq_show,
+};
+
+static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
+{
+ struct tegra_pcie *pcie = inode->i_private;
+ struct seq_file *s;
+ int err;
+
+ err = seq_open(file, &tegra_pcie_ports_seq_ops);
+ if (err)
+ return err;
+
+ s = file->private_data;
+ s->private = pcie;
+
+ return 0;
+}
+
+static const struct file_operations tegra_pcie_ports_ops = {
+ .owner = THIS_MODULE,
+ .open = tegra_pcie_ports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+ struct dentry *file;
+
+ pcie->debugfs = debugfs_create_dir("pcie", NULL);
+ if (!pcie->debugfs)
+ return -ENOMEM;
+
+ file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
+ pcie, &tegra_pcie_ports_ops);
+ if (!file)
+ goto remove;
+
+ return 0;
+
+remove:
+ debugfs_remove_recursive(pcie->debugfs);
+ pcie->debugfs = NULL;
+ return -ENOMEM;
+}
+
static int tegra_pcie_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1793,6 +1904,13 @@ static int tegra_pcie_probe(struct platform_device *pdev)
goto disable_msi;
}
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_pcie_debugfs_init(pcie);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
+ err);
+ }
+
platform_set_drvdata(pdev, pcie);
return 0;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 1eaf4df3618a..52bd3a143563 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -20,6 +20,7 @@
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include "pcie-designware.h"
@@ -217,27 +218,47 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
return 0;
}
+static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
unsigned int nvec, unsigned int pos)
{
- unsigned int i, res, bit, val;
+ unsigned int i;
for (i = 0; i < nvec; i++) {
irq_set_msi_desc_off(irq_base, i, NULL);
clear_bit(pos + i, pp->msi_irq_in_use);
/* Disable corresponding interrupt on MSI controller */
- res = ((pos + i) / 32) * 12;
- bit = (pos + i) % 32;
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ if (pp->ops->msi_clear_irq)
+ pp->ops->msi_clear_irq(pp, pos + i);
+ else
+ dw_pcie_msi_clear_irq(pp, pos + i);
}
}
+static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
- int res, bit, irq, pos0, pos1, i;
- u32 val;
+ int irq, pos0, pos1, i;
struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
if (!pp) {
@@ -281,11 +302,10 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
}
set_bit(pos0 + i, pp->msi_irq_in_use);
/*Enable corresponding interrupt in MSI interrupt controller */
- res = ((pos0 + i) / 32) * 12;
- bit = (pos0 + i) % 32;
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val |= 1 << bit;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+ if (pp->ops->msi_set_irq)
+ pp->ops->msi_set_irq(pp, pos0 + i);
+ else
+ dw_pcie_msi_set_irq(pp, pos0 + i);
}
*pos = pos0;
@@ -353,7 +373,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
*/
desc->msi_attrib.multiple = msgvec;
- msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ if (pp->ops->get_msi_data)
+ msg.address_lo = pp->ops->get_msi_data(pp);
+ else
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
msg.address_hi = 0x0;
msg.data = pos;
write_msi_msg(irq, &msg);
@@ -396,10 +419,35 @@ static const struct irq_domain_ops msi_domain_ops = {
int __init dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
+ struct platform_device *pdev = to_platform_device(pp->dev);
struct of_pci_range range;
struct of_pci_range_parser parser;
- u32 val;
- int i;
+ struct resource *cfg_res;
+ u32 val, na, ns;
+ const __be32 *addrp;
+ int i, index;
+
+ /* Find the address cell size and the number of cells in order to get
+ * the untranslated address.
+ */
+ of_property_read_u32(np, "#address-cells", &na);
+ ns = of_n_size_cells(np);
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (cfg_res) {
+ pp->config.cfg0_size = resource_size(cfg_res)/2;
+ pp->config.cfg1_size = resource_size(cfg_res)/2;
+ pp->cfg0_base = cfg_res->start;
+ pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
+
+ /* Find the untranslated configuration space address */
+ index = of_property_match_string(np, "reg-names", "config");
+ addrp = of_get_address(np, index, false, false);
+ pp->cfg0_mod_base = of_read_number(addrp, ns);
+ pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
+ } else {
+ dev_err(pp->dev, "missing *config* reg space\n");
+ }
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pp->dev, "missing ranges property\n");
@@ -422,17 +470,33 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
pp->config.io_size = resource_size(&pp->io);
pp->config.io_bus_addr = range.pci_addr;
pp->io_base = range.cpu_addr;
+
+ /* Find the untranslated IO space address */
+ pp->io_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
}
if (restype == IORESOURCE_MEM) {
of_pci_range_to_resource(&range, np, &pp->mem);
pp->mem.name = "MEM";
pp->config.mem_size = resource_size(&pp->mem);
pp->config.mem_bus_addr = range.pci_addr;
+
+ /* Find the untranslated MEM space address */
+ pp->mem_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
}
if (restype == 0) {
of_pci_range_to_resource(&range, np, &pp->cfg);
pp->config.cfg0_size = resource_size(&pp->cfg)/2;
pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+ pp->cfg0_base = pp->cfg.start;
+ pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+
+ /* Find the untranslated configuration space address */
+ pp->cfg0_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
+ pp->cfg1_mod_base = pp->cfg0_mod_base +
+ pp->config.cfg0_size;
}
}
@@ -445,8 +509,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
}
}
- pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
pp->mem_base = pp->mem.start;
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -509,9 +571,9 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
/* Program viewport 0 : OUTBOUND : CFG0 */
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+ dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -525,9 +587,9 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+ dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -540,9 +602,9 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+ dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
@@ -556,9 +618,9 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+ dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
@@ -656,7 +718,11 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
}
if (bus->number != pp->root_bus_nr)
- ret = dw_pcie_rd_other_conf(pp, bus, devfn,
+ if (pp->ops->rd_other_conf)
+ ret = pp->ops->rd_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_rd_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_rd_own_conf(pp, where, size, val);
@@ -679,7 +745,11 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number != pp->root_bus_nr)
- ret = dw_pcie_wr_other_conf(pp, bus, devfn,
+ if (pp->ops->wr_other_conf)
+ ret = pp->ops->wr_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_wr_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_wr_own_conf(pp, where, size, val);
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 77f592faa7bf..daf81f922cda 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -36,11 +36,15 @@ struct pcie_port {
u8 root_bus_nr;
void __iomem *dbi_base;
u64 cfg0_base;
+ u64 cfg0_mod_base;
void __iomem *va_cfg0_base;
u64 cfg1_base;
+ u64 cfg1_mod_base;
void __iomem *va_cfg1_base;
u64 io_base;
+ u64 io_mod_base;
u64 mem_base;
+ u64 mem_mod_base;
struct resource cfg;
struct resource io;
struct resource mem;
@@ -61,8 +65,15 @@ struct pcie_host_ops {
u32 val, void __iomem *dbi_base);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+ int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val);
+ int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val);
int (*link_up)(struct pcie_port *pp);
void (*host_init)(struct pcie_port *pp);
+ void (*msi_set_irq)(struct pcie_port *pp, int irq);
+ void (*msi_clear_irq)(struct pcie_port *pp, int irq);
+ u32 (*get_msi_data)(struct pcie_port *pp);
};
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index e4da61bcbf8b..b062d3d7b373 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1258,7 +1258,7 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
int mode = -1;
int time = -1;
- if (sscanf(buf, "%i", &mode) != 1 || (mode != 2 || mode != 1))
+ if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
return -EINVAL;
/* Set the Keyboard Backlight Mode where:
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 4b66bf09ee55..d2c35920ff08 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -606,6 +606,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
unsigned int best = 0;
struct pwm_lookup *p;
unsigned int match;
+ unsigned int period;
+ enum pwm_polarity polarity;
/* look up via DT first */
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
@@ -653,6 +655,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
if (match > best) {
chip = pwmchip_find_by_name(p->provider);
index = p->index;
+ period = p->period;
+ polarity = p->polarity;
if (match != 3)
best = match;
@@ -668,8 +672,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, p->period);
- pwm_set_polarity(pwm, p->polarity);
+ pwm_set_period(pwm, period);
+ pwm_set_polarity(pwm, polarity);
return pwm;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index df3306019a7e..d81f3cc43ff1 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -377,6 +377,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
pool->slab_flags |= SLAB_CACHE_DMA;
pool->gfp_mask = __GFP_DMA;
}
+
+ if (hostt->cmd_size)
+ hostt->cmd_pool = pool;
+
return pool;
}
@@ -421,8 +425,10 @@ out:
out_free_slab:
kmem_cache_destroy(pool->cmd_slab);
out_free_pool:
- if (hostt->cmd_size)
+ if (hostt->cmd_size) {
scsi_free_host_cmd_pool(pool);
+ hostt->cmd_pool = NULL;
+ }
goto out;
}
@@ -444,8 +450,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
if (!--pool->users) {
kmem_cache_destroy(pool->cmd_slab);
kmem_cache_destroy(pool->sense_slab);
- if (hostt->cmd_size)
+ if (hostt->cmd_size) {
scsi_free_host_cmd_pool(pool);
+ hostt->cmd_pool = NULL;
+ }
}
mutex_unlock(&host_cmd_pool_mutex);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9c44392b748f..ce62e8798cc8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1774,7 +1774,7 @@ static void scsi_request_fn(struct request_queue *q)
blk_requeue_request(q, req);
atomic_dec(&sdev->device_busy);
out_delay:
- if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
+ if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY);
}
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 788ed9b59b4e..114203f32843 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,8 +1,7 @@
#
# Makefile for the SuperH specific drivers.
#
-obj-$(CONFIG_SUPERH) += intc/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
+obj-$(CONFIG_SH_INTC) += intc/
ifneq ($(CONFIG_COMMON_CLK),y)
obj-$(CONFIG_HAVE_CLK) += clk/
endif
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
index 60228fae943f..6a1b05ddc8c9 100644
--- a/drivers/sh/intc/Kconfig
+++ b/drivers/sh/intc/Kconfig
@@ -1,7 +1,9 @@
config SH_INTC
- def_bool y
+ bool
select IRQ_DOMAIN
+if SH_INTC
+
comment "Interrupt controller options"
config INTC_USERIMASK
@@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG
between system IRQs and the per-controller id tables.
If in doubt, say N.
+
+endif
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 40c3d43c9292..f40b34cdf2fc 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -945,7 +945,7 @@ static int au1550_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&hw->bitbang);
free_irq(hw->irq, hw);
iounmap((void __iomem *)hw->regs);
- release_mem_region(r->start, sizeof(psc_spi_t));
+ release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
if (hw->usedma) {
au1550_spi_dma_rxtmp_free(hw);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 276a3884fb3c..48f1d26e6ad9 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -417,16 +417,16 @@ static int davinci_spi_setup(struct spi_device *spi)
flags, dev_name(&spi->dev));
internal_cs = false;
}
- }
- if (retval) {
- dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
- spi->cs_gpio, retval);
- return retval;
- }
+ if (retval) {
+ dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
+ spi->cs_gpio, retval);
+ return retval;
+ }
- if (internal_cs)
- set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+ if (internal_cs)
+ set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+ }
if (spi->mode & SPI_READY)
set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 3f3dc1226edf..e14960470d8d 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -62,6 +62,8 @@ static int spi_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
+ dws->regs = pcim_iomap_table(pdev)[pci_bar];
+
dws->bus_num = 0;
dws->num_cs = 4;
dws->irq = pdev->irq;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 29f33143b795..670f0627f3bf 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -271,7 +271,7 @@ static void giveback(struct dw_spi *dws)
transfer_list);
if (!last_transfer->cs_change)
- spi_chip_sel(dws, dws->cur_msg->spi, 0);
+ spi_chip_sel(dws, msg->spi, 0);
spi_finalize_current_message(dws->master);
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 68441fa448de..352eed7463ac 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -329,7 +329,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
disable_fifo:
if (t->rx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
- else
+
+ if (t->tx_buf != NULL)
chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
mcspi_write_chconf0(spi, chconf);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fe792106bdc5..46f45ca2c694 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1074,6 +1074,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT3430", 0 },
{ "INT3431", 0 },
{ "80860F0E", 0 },
+ { "8086228E", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index c0743604b906..cd0e08b0c9f6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -499,7 +499,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
}
/* div doesn't support odd number */
- div = rs->max_freq / rs->speed;
+ div = max_t(u32, rs->max_freq / rs->speed, 1);
div = (div + 1) & 0xfffe;
spi_enable_chip(rs, 0);
@@ -678,7 +678,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
rs->dma_tx.direction = DMA_MEM_TO_DEV;
- rs->dma_tx.direction = DMA_DEV_TO_MEM;
+ rs->dma_rx.direction = DMA_DEV_TO_MEM;
master->can_dma = rockchip_spi_can_dma;
master->dma_tx = rs->dma_tx.ch;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index c850dfdfa9e3..ad87a98f8f68 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -472,25 +472,52 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
dma_cookie_t cookie;
int ret;
- if (tx) {
- desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
- tx->sgl, tx->nents, DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_tx)
- goto no_dma;
-
- irq_mask |= SPCR_SPTIE;
- }
+ /* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
rx->sgl, rx->nents, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx)
- goto no_dma;
+ if (!desc_rx) {
+ ret = -EAGAIN;
+ goto no_dma_rx;
+ }
+
+ desc_rx->callback = rspi_dma_complete;
+ desc_rx->callback_param = rspi;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_rx;
+ }
irq_mask |= SPCR_SPRIE;
}
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
+ tx->sgl, tx->nents, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = rspi_dma_complete;
+ desc_tx->callback_param = rspi;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+
+ irq_mask |= SPCR_SPTIE;
+ }
+
/*
* DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
@@ -503,34 +530,24 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi_enable_irq(rspi, irq_mask);
rspi->dma_callbacked = 0;
- if (rx) {
- desc_rx->callback = rspi_dma_complete;
- desc_rx->callback_param = rspi;
- cookie = dmaengine_submit(desc_rx);
- if (dma_submit_error(cookie))
- return cookie;
+ /* Now start DMA */
+ if (rx)
dma_async_issue_pending(rspi->master->dma_rx);
- }
- if (tx) {
- if (rx) {
- /* No callback */
- desc_tx->callback = NULL;
- } else {
- desc_tx->callback = rspi_dma_complete;
- desc_tx->callback_param = rspi;
- }
- cookie = dmaengine_submit(desc_tx);
- if (dma_submit_error(cookie))
- return cookie;
+ if (tx)
dma_async_issue_pending(rspi->master->dma_tx);
- }
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked)
ret = 0;
- else if (!ret)
+ else if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
+ if (tx)
+ dmaengine_terminate_all(rspi->master->dma_tx);
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+ }
rspi_disable_irq(rspi, irq_mask);
@@ -541,11 +558,16 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
return ret;
-no_dma:
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->master->dev),
- dev_name(&rspi->master->dev));
- return -EAGAIN;
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+no_dma_rx:
+ if (ret == -EAGAIN) {
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&rspi->master->dev),
+ dev_name(&rspi->master->dev));
+ }
+ return ret;
}
static void rspi_receive_init(const struct rspi_data *rspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e0531baf2782..ca935df80c88 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -848,6 +848,7 @@ out:
/**
* spi_finalize_current_transfer - report completion of a transfer
+ * @master: the master reporting completion
*
* Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven