summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c17
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/intel-iommu.c18
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
16 files changed, 112 insertions, 37 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index eb1fed5bd516..3ccef9eba6f9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
config BLK_DEV_PMEM
tristate "Persistent memory block device support"
+ depends on HAS_IOMEM
help
Saying Y here will allow you to use a contiguous range of reserved
memory as one or more persistent block devices.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8dcbced0eafd..6e134f4759c0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
zram->max_comp_streams = 1;
+
set_capacity(zram->disk, 0);
+ part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 56e437e31580..ae628001fd97 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset;
+ int i = 0, inc, try = 0, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = dev_priv->gpio_mmio_base;
+retry:
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
- for (i = 0; i < num; i++) {
+ for (; i < num; i += inc) {
+ inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- i += 1; /* set i to the index of the read xfer */
+ inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
@@ -525,6 +527,18 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ /*
+ * Passive adapters sometimes NAK the first probe. Retry the first
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+ * has retries internally. See also the retry loop in
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+ */
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
+ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
+ goto retry;
+ }
+
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e87d2f418de4..987b81f31b0e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
DRM_DEBUG_KMS("initialising analog device %d\n", device);
- intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e597ffc26563..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- /* if there is no audio, set MINM_OVER_MAXP */
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
- (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
- drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b7ca4c514621..a7fdfa4f0857 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ /*
+ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+ * after the CP ring have chew one packet at least. Hence here we stop
+ * and restart DPM after the radeon_ib_ring_tests().
+ */
+ if (rdev->pm.dpm_enabled &&
+ (rdev->pm.pm_method == PM_METHOD_DPM) &&
+ (rdev->family == CHIP_TURKS) &&
+ (rdev->flags & RADEON_IS_MOBILITY)) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_dpm_disable(rdev);
+ radeon_dpm_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index de42fc4a22b8..9c3377ca17b7 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
} else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unreserve;
}
}
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error_unreserve;
}
tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_vm_clear_bo(rdev, pt);
if (r) {
radeon_bo_unref(&pt);
- radeon_bo_reserve(bo_va->bo, false);
return r;
}
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
return 0;
+
+error_unreserve:
+ radeon_bo_unreserve(bo_va->bo);
+ return r;
}
/**
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 630af73e98c4..35c8d0ceabee 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1024, 5112, 2024, 4832
},
{
+ (const char * const []){"LEN2000", NULL},
+ {ANY_BOARD_ID, ANY_BOARD_ID},
+ 1024, 5113, 2021, 4832
+ },
+ {
(const char * const []){"LEN2001", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5022, 2508, 4832
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0045",
"LEN0047",
"LEN0049",
- "LEN2000",
+ "LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
"LEN2003",
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2ffe58969944..5ecfaf29933a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations — the ones with
+ * PASID support on bit 28 — have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0;
+ } else if (!strncmp(str, "ecs_off", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable extended context table support\n");
+ intel_iommu_ecs = 0;
}
str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
- if (ecap_ecs(iommu->ecap)) {
+ if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
@@ -806,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
if (context)
free_pgtable_page(context);
- if (!ecap_ecs(iommu->ecap))
+ if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1141,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
- if (ecap_ecs(iommu->ecap))
+ if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 27506302eb7a..4dbed4a67aaf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
}
spin_unlock(&mddev->lock);
- return err;
+ return err ?: len;
}
err = mddev_lock(mddev);
if (err)
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- if (mddev_lock(mddev) == 0) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ mddev_lock(mddev) == 0) {
+ flush_workqueue(md_misc_wq);
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
- mddev_unlock(mddev);
}
+ mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e793ab6b3570..f55c3f35b746 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 553d54b87052..b6793d2e051f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 73874b2575bf..f3f1601a76f3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
+ int err;
- enic_dev_fw_info(enic, &fw_info);
+ err = enic_dev_fw_info(enic, &fw_info);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ * For other failures, like devcmd failure, we return previously
+ * recorded info.
+ */
+ if (err == -ENOMEM)
+ return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
-
- enic_dev_stats_dump(enic, &vstats);
+ int err;
+
+ err = enic_dev_stats_dump(enic, &vstats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return;
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 204bd182473b..eadae1b412c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ int err;
- enic_dev_stats_dump(enic, &stats);
+ err = enic_dev_stats_dump(enic, &stats);
+ /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ * For other failures, like devcmd failure, we return previously
+ * recorded stats.
+ */
+ if (err == -ENOMEM)
+ return net_stats;
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_poll_unlock_napi(&enic->rq[rq]);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[rq]);
return work_done;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 36a2ed606c91..c4b2183bf352 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
struct vnic_rq_buf *buf;
u32 fetch_index;
unsigned int count = rq->ring.desc_count;
+ int i;
buf = rq->to_clean;
- while (vnic_rq_desc_used(rq) > 0) {
-
+ for (i = 0; i < rq->ring.desc_count; i++) {
(*buf_clean)(rq, buf);
-
- buf = rq->to_clean = buf->next;
- rq->ring.desc_avail++;
+ buf = buf->next;
}
+ rq->ring.desc_avail = rq->ring.desc_count - 1;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e3b9b63ad010..c3a9392cbc19 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
- wr32(trgttiml, rq->perout.start.sec);
- wr32(trgttimh, rq->perout.start.nsec);
+ wr32(trgttimh, rq->perout.start.sec);
+ wr32(trgttiml, rq->perout.start.nsec);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {