summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/block/null_blk.h11
-rw-r--r--drivers/block/null_blk_main.c30
-rw-r--r--drivers/block/null_blk_zoned.c57
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c22
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/cdrom/gdrom.c8
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/edac/cpc925_edac.c20
-rw-r--r--drivers/firmware/scpi_pm_domain.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c140
-rw-r--r--drivers/iommu/arm-smmu.c106
-rw-r--r--drivers/iommu/dma-iommu.c55
-rw-r--r--drivers/iommu/fsl_pamu.c2
-rw-r--r--drivers/iommu/fsl_pamu_domain.c119
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c314
-rw-r--r--drivers/iommu/intel-iommu.c32
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c11
-rw-r--r--drivers/iommu/io-pgtable-arm.c23
-rw-r--r--drivers/iommu/io-pgtable.h5
-rw-r--r--drivers/iommu/iommu.c58
-rw-r--r--drivers/iommu/iova.c22
-rw-r--r--drivers/iommu/ipmmu-vmsa.c5
-rw-r--r--drivers/iommu/of_iommu.c25
-rw-r--r--drivers/macintosh/adb-iop.c50
-rw-r--r--drivers/macintosh/adb.c8
-rw-r--r--drivers/macintosh/adbhid.c53
-rw-r--r--drivers/macintosh/macio_asic.c8
-rw-r--r--drivers/macintosh/macio_sysfs.c8
-rw-r--r--drivers/macintosh/via-cuda.c35
-rw-r--r--drivers/macintosh/via-macii.c352
-rw-r--r--drivers/macintosh/via-pmu.c33
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c25
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h10
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--drivers/md/dm-flakey.c30
-rw-r--r--drivers/md/dm-ioctl.c18
-rw-r--r--drivers/md/dm-linear.c35
-rw-r--r--drivers/md/dm-mpath.c26
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c316
-rw-r--r--drivers/md/dm-rq.h4
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c56
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c80
-rw-r--r--drivers/md/dm-zoned-target.c23
-rw-r--r--drivers/md/dm.c194
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/md-bitmap.c9
-rw-r--r--drivers/md/md-cluster.c234
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c113
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c109
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/misc/ocxl/config.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/nfc/nfcmrvl/uart.c5
-rw-r--r--drivers/of/base.c149
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_mdio.c12
-rw-r--r--drivers/of/of_numa.c19
-rw-r--r--drivers/of/of_private.h8
-rw-r--r--drivers/of/overlay.c4
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/of/unittest-data/overlay_15.dts4
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi4
-rw-r--r--drivers/of/unittest.c29
-rw-r--r--drivers/pci/hotplug/pnv_php.c2
-rw-r--r--drivers/pci/of.c101
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/power/supply/twl4030_charger.c5
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h15
-rw-r--r--drivers/scsi/sd_zbc.c501
-rw-r--r--drivers/soc/dove/pmu.c8
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/rockchip/pm_domains.c44
-rw-r--r--drivers/soc/tegra/pmc.c12
-rw-r--r--drivers/soc/ti/knav_dma.c8
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/tc/tc.c8
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/armada_thermal.c4
-rw-r--r--drivers/thermal/da9062-thermal.c4
-rw-r--r--drivers/thermal/hisi_thermal.c249
-rw-r--r--drivers/thermal/imx_thermal.c31
-rw-r--r--drivers/thermal/of-thermal.c152
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c158
-rw-r--r--drivers/thermal/qcom/tsens-8916.c12
-rw-r--r--drivers/thermal/qcom/tsens-8960.c41
-rw-r--r--drivers/thermal/qcom/tsens-8974.c12
-rw-r--r--drivers/thermal/qcom/tsens-common.c62
-rw-r--r--drivers/thermal/qcom/tsens-v2.c8
-rw-r--r--drivers/thermal/qcom/tsens.c19
-rw-r--r--drivers/thermal/qcom/tsens.h23
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/st/Kconfig14
-rw-r--r--drivers/thermal/st/Makefile1
-rw-r--r--drivers/thermal/st/stm_thermal.c760
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/controlfb.c5
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/platinumfb.c5
-rw-r--r--drivers/video/fbdev/valkyriefb.c12
153 files changed, 3509 insertions, 2361 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a9dd4ea7467d..6e594644cb1d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 34e0030f0592..7685df43f1ef 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,7 +87,9 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
@@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline blk_status_t null_zone_report(struct nullb *nullb,
- struct bio *bio)
+static inline int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones, gfp_t gfp_mask)
{
- return BLK_STS_NOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index e94591021682..09339203dfba 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
-static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
-{
- struct nullb_device *dev = cmd->nq->dev;
-
- if (dev->queue_mode == NULL_Q_BIO) {
- if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->bio);
- return true;
- }
- } else {
- if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->rq->bio);
- return true;
- }
- }
-
- return false;
-}
-
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
- if (cmd_report_zone(nullb, cmd))
- goto out;
-
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = {
.owner = THIS_MODULE,
.open = null_open,
.release = null_release,
+ .report_zones = null_zone_report,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1549,6 +1528,13 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (nullb->dev->zoned) {
+ int ret = blk_revalidate_disk_zones(disk);
+
+ if (ret != 0)
+ return ret;
+ }
+
add_disk(disk);
return 0;
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7c6b86d98700..c0b0e4a3fa8f 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
- unsigned int zno, unsigned int nr_zones)
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct blk_zone_report_hdr *hdr = NULL;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
- unsigned int zones_to_cpy;
-
- bio_for_each_segment(bvec, bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
-
- if (!hdr) {
- hdr = (struct blk_zone_report_hdr *)addr;
- hdr->nr_zones = nr_zones;
- zones_to_cpy--;
- addr += sizeof(struct blk_zone_report_hdr);
- }
-
- zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
-
- memcpy(addr, &dev->zones[zno],
- zones_to_cpy * sizeof(struct blk_zone));
-
- kunmap_atomic(addr);
+ struct nullb *nullb = disk->private_data;
+ struct nullb_device *dev = nullb->dev;
+ unsigned int zno, nrz = 0;
- nr_zones -= zones_to_cpy;
- zno += zones_to_cpy;
+ if (!dev->zoned)
+ /* Not a zoned null device */
+ return -EOPNOTSUPP;
- if (!nr_zones)
- break;
+ zno = null_zone_no(dev, sector);
+ if (zno < dev->nr_zones) {
+ nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
+ memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
}
-}
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
-{
- struct nullb_device *dev = nullb->dev;
- unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
- unsigned int nr_zones = dev->nr_zones - zno;
- unsigned int max_zones;
+ *nr_zones = nrz;
- max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
- nr_zones = min_t(unsigned int, nr_zones, max_zones);
- null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
-
- return BLK_STS_OK;
+ return 0;
}
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7c5fc6942f32..2459dcc04b1c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3175,7 +3175,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
@@ -3364,7 +3364,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9eea83ae01c6..56452cabce5b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2493,6 +2493,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
+ if (!info)
+ return 0;
+
blkif_free(info, 0);
mutex_lock(&info->mutex);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 1106c076fa4b..600430685e28 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -191,8 +191,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
vfree(vmalloc (size));
}
- vaddr = (unsigned long) __ioremap (paddr, size,
- _PAGE_WRITETHRU);
+ vaddr = (unsigned long)ioremap_wt(paddr, size);
#else
vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 4552b06fe601..f0404c6d1ff4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -127,6 +127,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ return of_dma_configure(dev, dma_dev->of_node, 0);
+}
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -148,6 +158,7 @@ struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
.dev_groups = fsl_mc_dev_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -621,6 +632,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
dev_set_msi_domain(&mc_dev->dev,
dev_get_msi_domain(&parent_mc_dev->dev));
}
@@ -638,10 +650,6 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
- /* Objects are coherent, unless 'no shareability' flag set. */
- if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
-
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -698,8 +706,8 @@ static int parse_mc_ranges(struct device *dev,
*ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
if (!(*ranges_start) || !ranges_len) {
dev_warn(dev,
- "missing or empty ranges property for device tree node '%s'\n",
- mc_node->name);
+ "missing or empty ranges property for device tree node '%pOFn'\n",
+ mc_node);
return 0;
}
@@ -722,7 +730,7 @@ static int parse_mc_ranges(struct device *dev,
tuple_len = range_tuple_cell_count * sizeof(__be32);
if (ranges_len % tuple_len != 0) {
- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
+ dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node);
return -EINVAL;
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 70db4d5638a6..5b2a11a88951 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1229,7 +1229,7 @@ mbus_parse_ranges(struct device_node *node,
tuple_len = (*cell_count) * sizeof(__be32);
if (ranges_len % tuple_len) {
- pr_warn("malformed ranges entry '%s'\n", node->name);
+ pr_warn("malformed ranges entry '%pOFn'\n", node);
return -EINVAL;
}
return 0;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 757e85b81879..a5b8afe3609c 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -327,15 +327,15 @@ static int get_entry_track(int track)
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
- int fentry, lentry, track, data, tocuse, err;
+ int fentry, lentry, track, data, err;
+
if (!gd.toc)
return -ENOMEM;
- tocuse = 1;
+
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
- tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
@@ -794,7 +794,7 @@ static int probe_gdrom(struct platform_device *devptr)
gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (IS_ERR(gd.gdrom_rq)) {
- rc = PTR_ERR(gd.gdrom_rq);
+ err = PTR_ERR(gd.gdrom_rq);
gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 072aa38374ce..3045067448fb 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -183,7 +183,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
__func__);
- for_each_node_by_type(dn, "cpu")
+ for_each_of_cpu_node(dn)
ncpus++;
cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
@@ -194,7 +194,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
if (WARN_ON(!clks))
goto clks_out;
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
struct clk_init_data init;
struct clk *clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 2c98e020df05..3c0881ac9880 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -593,8 +593,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
/******************** CPU err device********************************/
static u32 cpc925_cpu_mask_disabled(void)
{
- struct device_node *cpus;
- struct device_node *cpunode = NULL;
+ struct device_node *cpunode;
static u32 mask = 0;
/* use cached value if available */
@@ -603,20 +602,8 @@ static u32 cpc925_cpu_mask_disabled(void)
mask = APIMASK_ADI0 | APIMASK_ADI1;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
- return 0;
- }
-
- while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
+ for_each_of_cpu_node(cpunode) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
-
- if (strcmp(cpunode->type, "cpu")) {
- cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
- continue;
- }
-
if (reg == NULL || *reg > 2) {
cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
@@ -633,9 +620,6 @@ static u32 cpc925_cpu_mask_disabled(void)
"Assuming PI id is equal to CPU MPIC id!\n");
}
- of_node_put(cpunode);
- of_node_put(cpus);
-
return mask;
}
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index f395dec27113..390aa13391e4 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -121,7 +121,7 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
- sprintf(scpi_pd->name, "%s.%d", np->name, i);
+ sprintf(scpi_pd->name, "%pOFn.%d", np, i);
scpi_pd->genpd.name = scpi_pd->name;
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 2d45d1dd9554..643f5edd68fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
}
/* The CEC module handles HDMI hotplug detection */
- cec_np = of_find_compatible_node(np->parent, NULL,
- "mediatek,mt8173-cec");
+ cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
if (!cec_np) {
dev_err(dev, "Failed to find CEC node\n");
return -EINVAL;
@@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (!cec_pdev) {
dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
cec_np);
+ of_node_put(cec_np);
return -EPROBE_DEFER;
}
+ of_node_put(cec_np);
hdmi->cec_dev = &cec_pdev->dev;
/*
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index da1363a0c54d..93d70f4a2154 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
struct device_node *child, *node;
int ret;
- node = of_find_compatible_node(dev->of_node, NULL,
- "qcom,gpu-pwrlevels");
+ node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
dev_err(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
@@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
dev_pm_opp_add(dev, val, 0);
}
+ of_node_put(node);
+
return 0;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 83e6d993fca5..d9a25715650e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -186,6 +186,19 @@ config INTEL_IOMMU
and include PCI device scope covered by these DMA
remapping devices.
+config INTEL_IOMMU_DEBUGFS
+ bool "Export Intel IOMMU internals in Debugfs"
+ depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+ Expose Intel IOMMU internals in Debugfs.
+
+ This option is -NOT- intended for production environments, and should
+ only be enabled for debugging Intel IOMMU.
+
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index ab5eba6edf82..a158a68c8ea8 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bee0dfb7b93b..1167ff0416cf 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3083,6 +3083,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ default:
+ break;
}
return false;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3931c7de7c69..bb2cd29e1658 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1719,7 +1719,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5059d09f3202..6947ccf26512 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU API for ARM architected SMMUv3 implementations.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2015 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
@@ -567,7 +556,8 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
- atomic_t sync_nr;
+ u32 sync_nr;
+ u8 prev_cmd_opcode;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -611,6 +601,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
+ bool non_strict;
enum arm_smmu_domain_stage stage;
union {
@@ -708,7 +699,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
}
/*
- * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * Wait for the SMMU to consume items. If sync is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
@@ -901,6 +892,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
struct arm_smmu_queue *q = &smmu->cmdq.q;
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
+
while (queue_insert_raw(q, cmd) == -ENOSPC) {
if (queue_poll_cons(q, false, wfe))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
@@ -948,15 +941,21 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
.sync = {
- .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
.msiaddr = virt_to_phys(&smmu->sync_count),
},
};
- arm_smmu_cmdq_build_cmd(cmd, &ent);
-
spin_lock_irqsave(&smmu->cmdq.lock, flags);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
+
+ /* Piggy-back on the previous command if it's a SYNC */
+ if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
+ ent.sync.msidata = smmu->sync_nr;
+ } else {
+ ent.sync.msidata = ++smmu->sync_nr;
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ }
+
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
@@ -1372,15 +1371,11 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
}
/* IO_PGTABLE API */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
-{
- arm_smmu_cmdq_issue_sync(smmu);
-}
-
static void arm_smmu_tlb_sync(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
+
+ arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
}
static void arm_smmu_tlb_inv_context(void *cookie)
@@ -1398,8 +1393,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+ /*
+ * NOTE: when io-pgtable is in non-strict mode, we may get here with
+ * PTEs previously cleared by unmaps on the current CPU not yet visible
+ * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
+ * to guarantee those are observed before the TLBI. Do be careful, 007.
+ */
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -1624,6 +1625,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops)
return -ENOMEM;
@@ -1772,12 +1776,20 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->smmu)
+ arm_smmu_tlb_inv_context(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
if (smmu)
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static phys_addr_t
@@ -1917,15 +1929,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1935,26 +1959,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch(attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
out_unlock:
@@ -1999,7 +2034,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2180,7 +2215,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
- atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -2353,8 +2387,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
irq = smmu->combined_irq;
if (irq) {
/*
- * Cavium ThunderX2 implementation doesn't not support unique
- * irq lines. Use single irq line for all the SMMUv3 interrupts.
+ * Cavium ThunderX2 implementation doesn't support unique irq
+ * lines. Use a single irq line for all the SMMUv3 interrupts.
*/
ret = devm_request_threaded_irq(smmu->dev, irq,
arm_smmu_combined_irq_handler,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fd1b80ef9490..5a28ae892504 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -52,6 +52,7 @@
#include <linux/spinlock.h>
#include <linux/amba/bus.h>
+#include <linux/fsl/mc.h>
#include "io-pgtable.h"
#include "arm-smmu-regs.h"
@@ -246,6 +247,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
+ bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@@ -447,7 +449,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ /*
+ * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
+ * cleared by the current CPU are visible to the SMMU before the TLBI.
+ */
+ writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie);
}
@@ -457,7 +463,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu);
- writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ /* NOTE: see above */
+ writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu);
}
@@ -469,6 +476,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@@ -510,6 +520,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain = cookie;
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
@@ -863,6 +876,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -1252,6 +1268,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1459,6 +1483,8 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
if (dev_is_pci(dev))
group = pci_device_group(dev);
+ else if (dev_is_fsl_mc(dev))
+ group = fsl_mc_device_group(dev);
else
group = generic_device_group(dev);
@@ -1470,15 +1496,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1488,28 +1526,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
-
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1562,7 +1610,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2036,6 +2084,10 @@ static void arm_smmu_bus_init(void)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+ if (!iommu_present(&fsl_mc_bus_type))
+ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+#endif
}
static int arm_smmu_device_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 511ff9a1d6d9..d1b04753b204 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
};
struct list_head msi_page_list;
spinlock_t msi_lock;
+
+ /* Domain for flush queue callback; NULL if flush queue not in use */
+ struct iommu_domain *fq_domain;
};
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
+static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+
+ cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
+ domain = cookie->fq_domain;
+ /*
+ * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
+ * implies that ops->flush_iotlb_all must be non-NULL.
+ */
+ domain->ops->flush_iotlb_all(domain);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
+
+ if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
+ cookie->fq_domain = domain;
+ init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+ }
+
if (!dev)
return 0;
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
+ else if (cookie->fq_domain) /* non-strict mode */
+ queue_iova(iovad, iova_pfn(iovad, iova),
+ size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
- WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
+ if (!cookie->fq_domain)
+ iommu_tlb_sync(domain);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -491,7 +521,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = IOMMU_MAPPING_ERROR;
}
@@ -518,7 +548,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
@@ -606,9 +636,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, struct iommu_domain *domain)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0;
dma_addr_t iova;
@@ -632,13 +661,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
/*
@@ -726,7 +756,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
@@ -811,20 +841,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -850,7 +881,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
if (iommu_dma_mapping_error(dev, iova))
goto out_free_page;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 8540625796a1..1b955aea44dd 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -543,7 +543,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
return ~(u32)0;
}
- for_each_node_by_type(node, "cpu") {
+ for_each_of_cpu_node(node) {
prop = of_get_property(node, "reg", &len);
for (i = 0; i < len / sizeof(u32); i++) {
if (be32_to_cpup(&prop[i]) == vcpu) {
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index f089136e9c3f..9b528cfcc6c3 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -814,6 +814,55 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
return 0;
}
+static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
+{
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
+ if (dma_domain->enabled) {
+ pr_debug("Can't set geometry attributes as domain is active\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Ensure that the geometry has been set for the domain */
+ if (!dma_domain->geom_size) {
+ pr_debug("Please configure geometry before setting the number of windows\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure we have valid window count i.e. it should be less than
+ * maximum permissible limit and should be a power of two.
+ */
+ if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
+ pr_debug("Invalid window count\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
+ w_count > 1 ? w_count : 0);
+ if (!ret) {
+ kfree(dma_domain->win_arr);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
+ if (!dma_domain->win_arr) {
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENOMEM;
+ }
+ dma_domain->win_cnt = w_count;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
@@ -830,6 +879,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
ret = configure_domain_dma_state(dma_domain, *(int *)data);
break;
+ case DOMAIN_ATTR_WINDOWS:
+ ret = fsl_pamu_set_windows(domain, *(u32 *)data);
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -856,6 +908,9 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMUV1:
*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
break;
+ case DOMAIN_ATTR_WINDOWS:
+ *(u32 *)data = dma_domain->win_cnt;
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -916,13 +971,13 @@ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
{
struct pci_controller *pci_ctl;
- bool pci_endpt_partioning;
+ bool pci_endpt_partitioning;
struct iommu_group *group = NULL;
pci_ctl = pci_bus_to_host(pdev->bus);
- pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
+ pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
- if (pci_endpt_partioning) {
+ if (pci_endpt_partitioning) {
group = pci_device_group(&pdev->dev);
/*
@@ -994,62 +1049,6 @@ static void fsl_pamu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
-static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Ensure that the geometry has been set for the domain */
- if (!dma_domain->geom_size) {
- pr_debug("Please configure geometry before setting the number of windows\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- /*
- * Ensure we have valid window count i.e. it should be less than
- * maximum permissible limit and should be a power of two.
- */
- if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
- pr_debug("Invalid window count\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- w_count > 1 ? w_count : 0);
- if (!ret) {
- kfree(dma_domain->win_arr);
- dma_domain->win_arr = kcalloc(w_count,
- sizeof(*dma_domain->win_arr),
- GFP_ATOMIC);
- if (!dma_domain->win_arr) {
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENOMEM;
- }
- dma_domain->win_cnt = w_count;
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
-
- return dma_domain->win_cnt;
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
@@ -1058,8 +1057,6 @@ static const struct iommu_ops fsl_pamu_ops = {
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,
.domain_window_disable = fsl_pamu_window_disable,
- .domain_get_windows = fsl_pamu_get_windows,
- .domain_set_windows = fsl_pamu_set_windows,
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
new file mode 100644
index 000000000000..7fabf9b1c2dc
--- /dev/null
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation.
+ *
+ * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Sohil Mehta <sohil.mehta@intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dmar.h>
+#include <linux/intel-iommu.h>
+#include <linux/pci.h>
+
+#include <asm/irq_remapping.h>
+
+struct iommu_regset {
+ int offset;
+ const char *regs;
+};
+
+#define IOMMU_REGSET_ENTRY(_reg_) \
+ { DMAR_##_reg_##_REG, __stringify(_reg_) }
+static const struct iommu_regset iommu_regs[] = {
+ IOMMU_REGSET_ENTRY(VER),
+ IOMMU_REGSET_ENTRY(CAP),
+ IOMMU_REGSET_ENTRY(ECAP),
+ IOMMU_REGSET_ENTRY(GCMD),
+ IOMMU_REGSET_ENTRY(GSTS),
+ IOMMU_REGSET_ENTRY(RTADDR),
+ IOMMU_REGSET_ENTRY(CCMD),
+ IOMMU_REGSET_ENTRY(FSTS),
+ IOMMU_REGSET_ENTRY(FECTL),
+ IOMMU_REGSET_ENTRY(FEDATA),
+ IOMMU_REGSET_ENTRY(FEADDR),
+ IOMMU_REGSET_ENTRY(FEUADDR),
+ IOMMU_REGSET_ENTRY(AFLOG),
+ IOMMU_REGSET_ENTRY(PMEN),
+ IOMMU_REGSET_ENTRY(PLMBASE),
+ IOMMU_REGSET_ENTRY(PLMLIMIT),
+ IOMMU_REGSET_ENTRY(PHMBASE),
+ IOMMU_REGSET_ENTRY(PHMLIMIT),
+ IOMMU_REGSET_ENTRY(IQH),
+ IOMMU_REGSET_ENTRY(IQT),
+ IOMMU_REGSET_ENTRY(IQA),
+ IOMMU_REGSET_ENTRY(ICS),
+ IOMMU_REGSET_ENTRY(IRTA),
+ IOMMU_REGSET_ENTRY(PQH),
+ IOMMU_REGSET_ENTRY(PQT),
+ IOMMU_REGSET_ENTRY(PQA),
+ IOMMU_REGSET_ENTRY(PRS),
+ IOMMU_REGSET_ENTRY(PECTL),
+ IOMMU_REGSET_ENTRY(PEDATA),
+ IOMMU_REGSET_ENTRY(PEADDR),
+ IOMMU_REGSET_ENTRY(PEUADDR),
+ IOMMU_REGSET_ENTRY(MTRRCAP),
+ IOMMU_REGSET_ENTRY(MTRRDEF),
+ IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
+ IOMMU_REGSET_ENTRY(VCCAP),
+ IOMMU_REGSET_ENTRY(VCMD),
+ IOMMU_REGSET_ENTRY(VCRSP),
+};
+
+static int iommu_regset_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ unsigned long flag;
+ int i, ret = 0;
+ u64 value;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!drhd->reg_base_addr) {
+ seq_puts(m, "IOMMU: Invalid base address\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
+ iommu->name, drhd->reg_base_addr);
+ seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
+ /*
+ * Publish the contents of the 64-bit hardware registers
+ * by adding the offset to the pointer (virtual address).
+ */
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
+ value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+ seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+ iommu_regs[i].regs, iommu_regs[i].offset,
+ value);
+ }
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ seq_putc(m, '\n');
+ }
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_regset);
+
+static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
+ int bus)
+{
+ struct context_entry *context;
+ int devfn;
+
+ seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
+ seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (!context)
+ return;
+
+ if (!context_present(context))
+ continue;
+
+ seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ context[0].hi, context[0].lo);
+ }
+}
+
+static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ int bus;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+ (u64)virt_to_phys(iommu->root_entry));
+ seq_puts(m, "Root Table Entries:\n");
+
+ for (bus = 0; bus < 256; bus++) {
+ if (!(iommu->root_entry[bus].lo & 1))
+ continue;
+
+ seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
+ iommu->root_entry[bus].hi,
+ iommu->root_entry[bus].lo);
+
+ ctx_tbl_entry_show(m, iommu, bus);
+ seq_putc(m, '\n');
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static int dmar_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ root_tbl_entry_show(m, iommu);
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
+
+#ifdef CONFIG_IRQ_REMAP
+static void ir_tbl_remap_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *ri_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ ri_entry = &iommu->ir_table->base[idx];
+ if (!ri_entry->present || ri_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(ri_entry->sid),
+ PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
+ ri_entry->dest_id, ri_entry->vector,
+ ri_entry->high, ri_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+static void ir_tbl_posted_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *pi_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ pi_entry = &iommu->ir_table->base[idx];
+ if (!pi_entry->present || !pi_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(pi_entry->sid),
+ PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
+ pi_entry->pda_h, pi_entry->pda_l << 6,
+ pi_entry->vector, pi_entry->high,
+ pi_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+/*
+ * For active IOMMUs go through the Interrupt remapping
+ * table and print valid entries in a table format for
+ * Remapped and Posted Interrupts.
+ */
+static int ir_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ u64 irta;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_remap_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "****\n\n");
+
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_pi_support(iommu->cap))
+ continue;
+
+ seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_posted_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
+#endif
+
+void __init intel_iommu_debugfs_init(void)
+{
+ struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
+ iommu_debugfs_dir);
+
+ debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
+ &iommu_regset_fops);
+ debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
+ NULL, &dmar_translation_struct_fops);
+#ifdef CONFIG_IRQ_REMAP
+ debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
+ NULL, &ir_translation_struct_fops);
+#endif
+}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 76f0a5d16ed3..f3ccf025108b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -185,16 +185,6 @@ static int rwbf_quirk;
static int force_on = 0;
int intel_iommu_tboot_noforce;
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
/*
@@ -220,21 +210,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
static inline void context_clear_pasid_enable(struct context_entry *context)
{
@@ -261,7 +236,7 @@ static inline bool __context_present(struct context_entry *context)
return (context->lo & 1);
}
-static inline bool context_present(struct context_entry *context)
+bool context_present(struct context_entry *context)
{
return context_pasid_enabled(context) ?
__context_present(context) :
@@ -788,8 +763,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
-static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
- u8 bus, u8 devfn, int alloc)
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc)
{
struct root_entry *root = &iommu->root_entry[bus];
struct context_entry *context;
@@ -4860,6 +4835,7 @@ int __init intel_iommu_init(void)
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
+ intel_iommu_debugfs_init();
return 0;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 967450bd421a..c2d6c11431de 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
* in single-threaded environment with interrupt disabled, so no need to tabke
* the dmar_global_lock.
*/
-static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static const struct irq_domain_ops intel_ir_domain_ops;
static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index b5948ba6b3b3..445c3bde0480 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
}
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
return size;
}
@@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte[i], lvl);
__arm_v7s_free_table(ptep, lvl + 1, data);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, blk_size,
blk_size, true);
@@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
- IO_PGTABLE_QUIRK_NO_DMA))
+ IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 88641b4560bc..237cacd4a62b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -574,13 +574,13 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
+ } else if (unmap_idx >= 0) {
+ io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
+ return size;
}
- if (unmap_idx < 0)
- return __arm_lpae_unmap(data, iova, size, lvl, tablep);
-
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
- return size;
+ return __arm_lpae_unmap(data, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -610,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
@@ -772,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -864,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 2df79093cad9..47d5ae559329 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -71,12 +71,17 @@ struct io_pgtable_cfg {
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
+ *
+ * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
+ * on unmap, for DMA domains using the flush queue mechanism for
+ * delayed invalidation.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
+ #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c15c5980299..edbdf5d6962c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/property.h>
+#include <linux/fsl/mc.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -41,6 +42,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
#else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
+static bool iommu_dma_strict __read_mostly = true;
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -131,6 +133,12 @@ static int __init iommu_set_def_domain_type(char *str)
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
+static int __init iommu_dma_setup(char *str)
+{
+ return kstrtobool(str, &iommu_dma_strict);
+}
+early_param("iommu.strict", iommu_dma_setup);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1024,6 +1032,18 @@ struct iommu_group *pci_device_group(struct device *dev)
return iommu_group_alloc();
}
+/* Get the IOMMU group for device on fsl-mc bus */
+struct iommu_group *fsl_mc_device_group(struct device *dev)
+{
+ struct device *cont_dev = fsl_mc_cont_dev(dev);
+ struct iommu_group *group;
+
+ group = iommu_group_get(cont_dev);
+ if (!group)
+ group = iommu_group_alloc();
+ return group;
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1072,6 +1092,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
+
+ if (dom && !iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
}
ret = iommu_group_add_device(group, dev);
@@ -1416,7 +1443,16 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
/*
- * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * For IOMMU_DOMAIN_DMA implementations which already provide their own
+ * guarantees that the group and its default domain are valid and correct.
+ */
+struct iommu_domain *iommu_get_dma_domain(struct device *dev)
+{
+ return dev->iommu_group->default_domain;
+}
+
+/*
+ * IOMMU groups are really the natural working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by
* iterating over the devices in a group. Ideally we'd have a single
* device which represents the requestor ID of the group, but we also
@@ -1796,7 +1832,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
struct iommu_domain_geometry *geometry;
bool *paging;
int ret = 0;
- u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -1808,15 +1843,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
paging = data;
*paging = (domain->pgsize_bitmap != 0UL);
break;
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_get_windows != NULL)
- *count = domain->ops->domain_get_windows(domain);
- else
- ret = -ENODEV;
-
- break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -1832,18 +1858,8 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
- u32 *count;
switch (attr) {
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_set_windows != NULL)
- ret = domain->ops->domain_set_windows(domain, *count);
- else
- ret = -ENODEV;
-
- break;
default:
if (domain->ops->domain_set_attr == NULL)
return -EINVAL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 83fe2621effe..f8d3ba247523 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
if (free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo)
+ free->pfn_lo >= cached_iova->pfn_lo) {
iovad->cached32_node = rb_next(&free->node);
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+ }
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
+ size >= iovad->max32_alloc_size)
+ goto iova32_full;
+
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
do {
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ goto iova32_full;
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
__cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
return 0;
+
+iova32_full:
+ iovad->max32_alloc_size = size;
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
static struct kmem_cache *iova_cache;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 22b94f8a9a04..b98a03189580 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPMMU VMSA
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/bitmap.h>
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index f7787e757244..c5dd63072529 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -24,6 +24,7 @@
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/slab.h>
+#include <linux/fsl/mc.h>
#define NO_IOMMU 1
@@ -132,9 +133,8 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_pci_map_rid(info->np, alias, "iommu-map",
- "iommu-map-mask", &iommu_spec.np,
- iommu_spec.args);
+ err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask",
+ &iommu_spec.np, iommu_spec.args);
if (err)
return err == -ENODEV ? NO_IOMMU : err;
@@ -143,6 +143,23 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return err;
}
+static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
+ struct device_node *master_np)
+{
+ struct of_phandle_args iommu_spec = { .args_count = 1 };
+ int err;
+
+ err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
+ if (err)
+ return err == -ENODEV ? NO_IOMMU : err;
+
+ err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ return err;
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -174,6 +191,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
+ } else if (dev_is_fsl_mc(dev)) {
+ err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
} else {
struct of_phandle_args iommu_spec;
int idx = 0;
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index ca623e6446e4..fca31640e3ef 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -20,13 +20,13 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
-#include <asm/macintosh.h>
-#include <asm/macints.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
#include <asm/mac_iop.h>
#include <asm/mac_oss.h>
#include <asm/adb_iop.h>
-#include <linux/adb.h>
+#include <linux/adb.h>
/*#define DEBUG_ADB_IOP*/
@@ -38,9 +38,9 @@ static unsigned char *reply_ptr;
#endif
static enum adb_iop_state {
- idle,
- sending,
- awaiting_reply
+ idle,
+ sending,
+ awaiting_reply
} adb_iop_state;
static void adb_iop_start(void);
@@ -66,7 +66,8 @@ static void adb_iop_end_req(struct adb_request *req, int state)
{
req->complete = 1;
current_req = req->next;
- if (req->done) (*req->done)(req);
+ if (req->done)
+ (*req->done)(req);
adb_iop_state = state;
}
@@ -100,7 +101,7 @@ static void adb_iop_complete(struct iop_msg *msg)
static void adb_iop_listen(struct iop_msg *msg)
{
- struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
+ struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
struct adb_request *req;
unsigned long flags;
#ifdef DEBUG_ADB_IOP
@@ -113,9 +114,9 @@ static void adb_iop_listen(struct iop_msg *msg)
#ifdef DEBUG_ADB_IOP
printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req,
- (uint) amsg->count + 2, (uint) amsg->flags, (uint) amsg->cmd);
+ (uint)amsg->count + 2, (uint)amsg->flags, (uint)amsg->cmd);
for (i = 0; i < amsg->count; i++)
- printk(" %02X", (uint) amsg->data[i]);
+ printk(" %02X", (uint)amsg->data[i]);
printk("\n");
#endif
@@ -168,14 +169,15 @@ static void adb_iop_start(void)
/* get the packet to send */
req = current_req;
- if (!req) return;
+ if (!req)
+ return;
local_irq_save(flags);
#ifdef DEBUG_ADB_IOP
printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes);
- for (i = 0 ; i < req->nbytes ; i++)
- printk(" %02X", (uint) req->data[i]);
+ for (i = 0; i < req->nbytes; i++)
+ printk(" %02X", (uint)req->data[i]);
printk("\n");
#endif
@@ -196,19 +198,20 @@ static void adb_iop_start(void)
/* Now send it. The IOP manager will call adb_iop_complete */
/* when the packet has been sent. */
- iop_send_message(ADB_IOP, ADB_CHAN, req,
- sizeof(amsg), (__u8 *) &amsg, adb_iop_complete);
+ iop_send_message(ADB_IOP, ADB_CHAN, req, sizeof(amsg), (__u8 *)&amsg,
+ adb_iop_complete);
}
int adb_iop_probe(void)
{
- if (!iop_ism_present) return -ENODEV;
+ if (!iop_ism_present)
+ return -ENODEV;
return 0;
}
int adb_iop_init(void)
{
- printk("adb: IOP ISM driver v0.4 for Unified ADB.\n");
+ pr_info("adb: IOP ISM driver v0.4 for Unified ADB\n");
iop_listen(ADB_IOP, ADB_CHAN, adb_iop_listen, "ADB");
return 0;
}
@@ -218,10 +221,12 @@ int adb_iop_send_request(struct adb_request *req, int sync)
int err;
err = adb_iop_write(req);
- if (err) return err;
+ if (err)
+ return err;
if (sync) {
- while (!req->complete) adb_iop_poll();
+ while (!req->complete)
+ adb_iop_poll();
}
return 0;
}
@@ -251,7 +256,9 @@ static int adb_iop_write(struct adb_request *req)
}
local_irq_restore(flags);
- if (adb_iop_state == idle) adb_iop_start();
+
+ if (adb_iop_state == idle)
+ adb_iop_start();
return 0;
}
@@ -263,7 +270,8 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
- if (adb_iop_state == idle) adb_iop_start();
+ if (adb_iop_state == idle)
+ adb_iop_start();
iop_ism_irq_poll(ADB_IOP);
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 76e98f0f7a3e..e49d1f287a17 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -203,15 +203,15 @@ static int adb_scan_bus(void)
}
/* Now fill in the handler_id field of the adb_handler entries. */
- pr_debug("adb devices:\n");
for (i = 1; i < 16; i++) {
if (adb_handler[i].original_address == 0)
continue;
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
(i << 4) | 0xf);
adb_handler[i].handler_id = req.reply[2];
- pr_debug(" [%d]: %d %x\n", i, adb_handler[i].original_address,
- adb_handler[i].handler_id);
+ printk(KERN_DEBUG "adb device [%d]: %d 0x%X\n", i,
+ adb_handler[i].original_address,
+ adb_handler[i].handler_id);
devmask |= 1 << i;
}
return devmask;
@@ -579,6 +579,8 @@ adb_try_handler_change(int address, int new_id)
mutex_lock(&adb_handler_mutex);
ret = try_handler_change(address, new_id);
mutex_unlock(&adb_handler_mutex);
+ if (ret)
+ pr_debug("adb handler change: [%d] 0x%X\n", address, new_id);
return ret;
}
EXPORT_SYMBOL(adb_try_handler_change);
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index a261892c03b3..75482eeab2c4 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -757,6 +757,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
struct input_dev *input_dev;
int err;
int i;
+ char *keyboard_type;
if (adbhid[id]) {
pr_err("Trying to reregister ADB HID on ID %d\n", id);
@@ -798,24 +799,23 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes));
- pr_info("Detected ADB keyboard, type ");
switch (original_handler_id) {
default:
- pr_cont("<unknown>.\n");
+ keyboard_type = "<unknown>";
input_dev->id.version = ADB_KEYBOARD_UNKNOWN;
break;
case 0x01: case 0x02: case 0x03: case 0x06: case 0x08:
case 0x0C: case 0x10: case 0x18: case 0x1B: case 0x1C:
case 0xC0: case 0xC3: case 0xC6:
- pr_cont("ANSI.\n");
+ keyboard_type = "ANSI";
input_dev->id.version = ADB_KEYBOARD_ANSI;
break;
case 0x04: case 0x05: case 0x07: case 0x09: case 0x0D:
case 0x11: case 0x14: case 0x19: case 0x1D: case 0xC1:
case 0xC4: case 0xC7:
- pr_cont("ISO, swapping keys.\n");
+ keyboard_type = "ISO, swapping keys";
input_dev->id.version = ADB_KEYBOARD_ISO;
i = hid->keycode[10];
hid->keycode[10] = hid->keycode[50];
@@ -824,10 +824,11 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A:
case 0x1E: case 0xC2: case 0xC5: case 0xC8: case 0xC9:
- pr_cont("JIS.\n");
+ keyboard_type = "JIS";
input_dev->id.version = ADB_KEYBOARD_JIS;
break;
}
+ pr_info("Detected ADB keyboard, type %s.\n", keyboard_type);
for (i = 0; i < 128; i++)
if (hid->keycode[i])
@@ -972,16 +973,13 @@ adbhid_probe(void)
->get it to send separate codes for left and right shift,
control, option keys */
#if 0 /* handler 5 doesn't send separate codes for R modifiers */
- if (adb_try_handler_change(id, 5))
- printk("ADB keyboard at %d, handler set to 5\n", id);
- else
+ if (!adb_try_handler_change(id, 5))
#endif
- if (adb_try_handler_change(id, 3))
- printk("ADB keyboard at %d, handler set to 3\n", id);
- else
- printk("ADB keyboard at %d, handler 1\n", id);
+ adb_try_handler_change(id, 3);
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB keyboard at %d has handler 0x%X\n",
+ id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, 0);
}
@@ -999,48 +997,44 @@ adbhid_probe(void)
for (i = 0; i < mouse_ids.nids; i++) {
int id = mouse_ids.id[i];
int mouse_kind;
+ char *desc = "standard";
adb_get_infos(id, &default_id, &org_handler_id);
if (adb_try_handler_change(id, 4)) {
- printk("ADB mouse at %d, handler set to 4", id);
mouse_kind = ADBMOUSE_EXTENDED;
}
else if (adb_try_handler_change(id, 0x2F)) {
- printk("ADB mouse at %d, handler set to 0x2F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x42)) {
- printk("ADB mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_TRACKBALLPRO;
}
else if (adb_try_handler_change(id, 0x66)) {
- printk("ADB mouse at %d, handler set to 0x66", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x5F)) {
- printk("ADB mouse at %d, handler set to 0x5F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 3)) {
- printk("ADB mouse at %d, handler set to 3", id);
mouse_kind = ADBMOUSE_MS_A3;
}
else if (adb_try_handler_change(id, 2)) {
- printk("ADB mouse at %d, handler set to 2", id);
mouse_kind = ADBMOUSE_STANDARD_200;
}
else {
- printk("ADB mouse at %d, handler 1", id);
mouse_kind = ADBMOUSE_STANDARD_100;
}
if ((mouse_kind == ADBMOUSE_TRACKBALLPRO)
|| (mouse_kind == ADBMOUSE_MICROSPEED)) {
+ desc = "Microspeed/MacPoint or compatible";
init_microspeed(id);
} else if (mouse_kind == ADBMOUSE_MS_A3) {
+ desc = "Mouse Systems A3 Mouse or compatible";
init_ms_a3(id);
} else if (mouse_kind == ADBMOUSE_EXTENDED) {
+ desc = "extended";
/*
* Register 1 is usually used for device
* identification. Here, we try to identify
@@ -1054,32 +1048,36 @@ adbhid_probe(void)
(req.reply[1] == 0x9a) && ((req.reply[2] == 0x21)
|| (req.reply[2] == 0x20))) {
mouse_kind = ADBMOUSE_TRACKBALL;
+ desc = "trackman/mouseman";
init_trackball(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x74) && (req.reply[2] == 0x70) &&
(req.reply[3] == 0x61) && (req.reply[4] == 0x64)) {
mouse_kind = ADBMOUSE_TRACKPAD;
+ desc = "trackpad";
init_trackpad(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) &&
(req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) {
mouse_kind = ADBMOUSE_TURBOMOUSE5;
+ desc = "TurboMouse 5";
init_turbomouse(id);
}
else if ((req.reply_len == 9) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) &&
(req.reply[3] == 0x49) && (req.reply[4] == 0x54)) {
if (adb_try_handler_change(id, 0x42)) {
- pr_cont("\nADB MacAlly 2-button mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_MACALLY2;
+ desc = "MacAlly 2-button";
}
}
}
- pr_cont("\n");
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB mouse (%s) at %d has handler 0x%X\n",
+ desc, id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, mouse_kind);
}
@@ -1092,12 +1090,10 @@ init_trackpad(int id)
struct adb_request req;
unsigned char r1_buffer[8];
- pr_cont(" (trackpad)");
-
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
ADB_READREG(id,1));
if (req.reply_len < 8)
- pr_cont("bad length for reg. 1\n");
+ pr_err("%s: bad length for reg. 1\n", __func__);
else
{
memcpy(r1_buffer, &req.reply[1], 8);
@@ -1145,8 +1141,6 @@ init_trackball(int id)
{
struct adb_request req;
- pr_cont(" (trackman/mouseman)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 00,0x81);
@@ -1177,8 +1171,6 @@ init_turbomouse(int id)
{
struct adb_request req;
- pr_cont(" (TurboMouse 5)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
@@ -1213,8 +1205,6 @@ init_microspeed(int id)
{
struct adb_request req;
- pr_cont(" (Microspeed/MacPoint or compatible)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
/* This will initialize mice using the Microspeed, MacPoint and
@@ -1253,7 +1243,6 @@ init_ms_a3(int id)
{
struct adb_request req;
- pr_cont(" (Mouse Systems A3 Mouse, or compatible)");
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id, 0x2),
0x00,
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 07074820a167..17d3bc917562 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -360,9 +360,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
struct macio_dev *in_bay,
struct resource *parent_res)
{
+ char name[MAX_NODE_NAME_SIZE + 1];
struct macio_dev *dev;
const u32 *reg;
-
+
if (np == NULL)
return NULL;
@@ -402,6 +403,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#endif
/* MacIO itself has a different reg, we use it's PCI base */
+ snprintf(name, sizeof(name), "%pOFn", np);
if (np == chip->of_node) {
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
@@ -410,12 +412,12 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#else
0, /* NuBus may want to do something better here */
#endif
- MAX_NODE_NAME_SIZE, np->name);
+ MAX_NODE_NAME_SIZE, name);
} else {
reg = of_get_property(np, "reg", NULL);
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
- reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
+ reg ? *reg : 0, MAX_NODE_NAME_SIZE, name);
}
/* Setup interrupts & resources */
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index ca4fcffe454b..d2451e58acb9 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -58,7 +58,13 @@ static ssize_t devspec_show(struct device *dev,
static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_RO(devspec);
-macio_config_of_attr (name, "%s\n");
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%pOFn\n", dev->of_node);
+}
+static DEVICE_ATTR_RO(name);
+
macio_config_of_attr (type, "%s\n");
static struct attribute *macio_dev_attrs[] = {
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 98dd702eb867..bbec6ac0a966 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -766,3 +766,38 @@ cuda_input(unsigned char *buf, int nb)
buf, nb, false);
}
}
+
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t cuda_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
+ if (req.reply_len != 7)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[3] << 24) + (req.reply[4] << 16) +
+ (req.reply[5] << 8) + req.reply[6];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int cuda_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ while (!req.complete)
+ cuda_poll();
+ if ((req.reply_len != 3) && (req.reply_len != 7))
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index cf6f7d52d6be..ac824d7b2dcf 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -12,7 +12,7 @@
*
* 1999-08-02 (jmt) - Initial rewrite for Unified ADB.
* 2000-03-29 Tony Mantler <tonym@mac.linux-m68k.org>
- * - Big overhaul, should actually work now.
+ * - Big overhaul, should actually work now.
* 2006-12-31 Finn Thain - Another overhaul.
*
* Suggested reading:
@@ -23,7 +23,7 @@
* Apple's "ADB Analyzer" bus sniffer is invaluable:
* ftp://ftp.apple.com/developer/Tool_Chest/Devices_-_Hardware/Apple_Desktop_Bus/
*/
-
+
#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -77,7 +77,7 @@ static volatile unsigned char *via;
#define ST_ODD 0x20 /* ADB state: odd data byte */
#define ST_IDLE 0x30 /* ADB state: idle, nothing to send */
-static int macii_init_via(void);
+static int macii_init_via(void);
static void macii_start(void);
static irqreturn_t macii_interrupt(int irq, void *arg);
static void macii_queue_poll(void);
@@ -120,31 +120,15 @@ static int srq_asserted; /* have to poll for the device that asserted it */
static int command_byte; /* the most recent command byte transmitted */
static int autopoll_devs; /* bits set are device addresses to be polled */
-/* Sanity check for request queue. Doesn't check for cycles. */
-static int request_is_queued(struct adb_request *req) {
- struct adb_request *cur;
- unsigned long flags;
- local_irq_save(flags);
- cur = current_req;
- while (cur) {
- if (cur == req) {
- local_irq_restore(flags);
- return 1;
- }
- cur = cur->next;
- }
- local_irq_restore(flags);
- return 0;
-}
-
/* Check for MacII style ADB */
static int macii_probe(void)
{
- if (macintosh_config->adb_type != MAC_ADB_II) return -ENODEV;
+ if (macintosh_config->adb_type != MAC_ADB_II)
+ return -ENODEV;
via = via1;
- printk("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
+ pr_info("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
return 0;
}
@@ -153,15 +137,17 @@ int macii_init(void)
{
unsigned long flags;
int err;
-
+
local_irq_save(flags);
-
+
err = macii_init_via();
- if (err) goto out;
+ if (err)
+ goto out;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
- if (err) goto out;
+ if (err)
+ goto out;
macii_state = idle;
out:
@@ -169,7 +155,7 @@ out:
return err;
}
-/* initialize the hardware */
+/* initialize the hardware */
static int macii_init_via(void)
{
unsigned char x;
@@ -179,7 +165,7 @@ static int macii_init_via(void)
/* Set up state: idle */
via[B] |= ST_IDLE;
- last_status = via[B] & (ST_MASK|CTLR_IRQ);
+ last_status = via[B] & (ST_MASK | CTLR_IRQ);
/* Shift register on input */
via[ACR] = (via[ACR] & ~SR_CTRL) | SR_EXT;
@@ -205,7 +191,8 @@ static void macii_queue_poll(void)
int next_device;
static struct adb_request req;
- if (!autopoll_devs) return;
+ if (!autopoll_devs)
+ return;
device_mask = (1 << (((command_byte & 0xF0) >> 4) + 1)) - 1;
if (autopoll_devs & ~device_mask)
@@ -213,10 +200,7 @@ static void macii_queue_poll(void)
else
next_device = ffs(autopoll_devs) - 1;
- BUG_ON(request_is_queued(&req));
-
- adb_request(&req, NULL, ADBREQ_NOSEND, 1,
- ADB_READREG(next_device, 0));
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0));
req.sent = 0;
req.complete = 0;
@@ -235,45 +219,47 @@ static void macii_queue_poll(void)
static int macii_send_request(struct adb_request *req, int sync)
{
int err;
- unsigned long flags;
- BUG_ON(request_is_queued(req));
-
- local_irq_save(flags);
err = macii_write(req);
- local_irq_restore(flags);
+ if (err)
+ return err;
- if (!err && sync) {
- while (!req->complete) {
+ if (sync)
+ while (!req->complete)
macii_poll();
- }
- BUG_ON(request_is_queued(req));
- }
- return err;
+ return 0;
}
/* Send an ADB request (append to request queue) */
static int macii_write(struct adb_request *req)
{
+ unsigned long flags;
+
if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) {
req->complete = 1;
return -EINVAL;
}
-
+
req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
+ local_irq_save(flags);
+
if (current_req != NULL) {
last_req->next = req;
last_req = req;
} else {
current_req = req;
last_req = req;
- if (macii_state == idle) macii_start();
+ if (macii_state == idle)
+ macii_start();
}
+
+ local_irq_restore(flags);
+
return 0;
}
@@ -287,7 +273,8 @@ static int macii_autopoll(int devs)
/* bit 1 == device 1, and so on. */
autopoll_devs = devs & 0xFFFE;
- if (!autopoll_devs) return 0;
+ if (!autopoll_devs)
+ return 0;
local_irq_save(flags);
@@ -304,7 +291,8 @@ static int macii_autopoll(int devs)
return err;
}
-static inline int need_autopoll(void) {
+static inline int need_autopoll(void)
+{
/* Was the last command Talk Reg 0
* and is the target on the autopoll list?
*/
@@ -317,21 +305,17 @@ static inline int need_autopoll(void) {
/* Prod the chip without interrupts */
static void macii_poll(void)
{
- disable_irq(IRQ_MAC_ADB);
macii_interrupt(0, NULL);
- enable_irq(IRQ_MAC_ADB);
}
/* Reset the bus */
static int macii_reset_bus(void)
{
static struct adb_request req;
-
- if (request_is_queued(&req))
- return 0;
/* Command = 0, Address = ignored */
- adb_request(&req, NULL, 0, 1, ADB_BUSRESET);
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET);
+ macii_send_request(&req, 1);
/* Don't want any more requests during the Global Reset low time. */
udelay(3000);
@@ -346,10 +330,6 @@ static void macii_start(void)
req = current_req;
- BUG_ON(req == NULL);
-
- BUG_ON(macii_state != idle);
-
/* Now send it. Be careful though, that first byte of the request
* is actually ADB_PACKET; the real data begins at index 1!
* And req->nbytes is the number of bytes of real data plus one.
@@ -375,7 +355,7 @@ static void macii_start(void)
* to be activity on the ADB bus. The chip will poll to achieve this.
*
* The basic ADB state machine was left unchanged from the original MacII code
- * by Alan Cox, which was based on the CUDA driver for PowerMac.
+ * by Alan Cox, which was based on the CUDA driver for PowerMac.
* The syntax of the ADB status lines is totally different on MacII,
* though. MacII uses the states Command -> Even -> Odd -> Even ->...-> Idle
* for sending and Idle -> Even -> Odd -> Even ->...-> Idle for receiving.
@@ -387,164 +367,166 @@ static void macii_start(void)
static irqreturn_t macii_interrupt(int irq, void *arg)
{
int x;
- static int entered;
struct adb_request *req;
+ unsigned long flags;
+
+ local_irq_save(flags);
if (!arg) {
/* Clear the SR IRQ flag when polling. */
if (via[IFR] & SR_INT)
via[IFR] = SR_INT;
- else
+ else {
+ local_irq_restore(flags);
return IRQ_NONE;
+ }
}
- BUG_ON(entered++);
-
last_status = status;
- status = via[B] & (ST_MASK|CTLR_IRQ);
+ status = via[B] & (ST_MASK | CTLR_IRQ);
switch (macii_state) {
- case idle:
- if (reading_reply) {
- reply_ptr = current_req->reply;
- } else {
- BUG_ON(current_req != NULL);
- reply_ptr = reply_buf;
- }
+ case idle:
+ if (reading_reply) {
+ reply_ptr = current_req->reply;
+ } else {
+ WARN_ON(current_req);
+ reply_ptr = reply_buf;
+ }
- x = via[SR];
+ x = via[SR];
- if ((status & CTLR_IRQ) && (x == 0xFF)) {
- /* Bus timeout without SRQ sequence:
- * data is "FF" while CTLR_IRQ is "H"
- */
- reply_len = 0;
- srq_asserted = 0;
- macii_state = read_done;
- } else {
- macii_state = reading;
- *reply_ptr = x;
- reply_len = 1;
- }
+ if ((status & CTLR_IRQ) && (x == 0xFF)) {
+ /* Bus timeout without SRQ sequence:
+ * data is "FF" while CTLR_IRQ is "H"
+ */
+ reply_len = 0;
+ srq_asserted = 0;
+ macii_state = read_done;
+ } else {
+ macii_state = reading;
+ *reply_ptr = x;
+ reply_len = 1;
+ }
- /* set ADB state = even for first data byte */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- break;
+ /* set ADB state = even for first data byte */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ break;
- case sending:
- req = current_req;
- if (data_index >= req->nbytes) {
- req->sent = 1;
- macii_state = idle;
-
- if (req->reply_expected) {
- reading_reply = 1;
- } else {
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
-
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
- }
+ case sending:
+ req = current_req;
+ if (data_index >= req->nbytes) {
+ req->sent = 1;
+ macii_state = idle;
- if (macii_state == idle) {
- /* reset to shift in */
- via[ACR] &= ~SR_OUT;
- x = via[SR];
- /* set ADB state idle - might get SRQ */
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- }
+ if (req->reply_expected) {
+ reading_reply = 1;
} else {
- via[SR] = req->data[data_index++];
-
- if ( (via[B] & ST_MASK) == ST_CMD ) {
- /* just sent the command byte, set to EVEN */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- } else {
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- }
- }
- break;
-
- case reading:
- x = via[SR];
- BUG_ON((status & ST_MASK) == ST_CMD ||
- (status & ST_MASK) == ST_IDLE);
-
- /* Bus timeout with SRQ sequence:
- * data is "XX FF" while CTLR_IRQ is "L L"
- * End of packet without SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
- * End of packet SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
- * (where XX is the first response byte and
- * YY is the last byte of valid response data.)
- */
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
- srq_asserted = 0;
- if (!(status & CTLR_IRQ)) {
- if (x == 0xFF) {
- if (!(last_status & CTLR_IRQ)) {
- macii_state = read_done;
- reply_len = 0;
- srq_asserted = 1;
- }
- } else if (x == 0x00) {
- macii_state = read_done;
- if (!(last_status & CTLR_IRQ))
- srq_asserted = 1;
- }
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
}
- if (macii_state == reading) {
- BUG_ON(reply_len > 15);
- reply_ptr++;
- *reply_ptr = x;
- reply_len++;
+ if (macii_state == idle) {
+ /* reset to shift in */
+ via[ACR] &= ~SR_OUT;
+ x = via[SR];
+ /* set ADB state idle - might get SRQ */
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
}
+ } else {
+ via[SR] = req->data[data_index++];
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- break;
+ if ((via[B] & ST_MASK) == ST_CMD) {
+ /* just sent the command byte, set to EVEN */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ } else {
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ }
+ }
+ break;
- case read_done:
- x = via[SR];
+ case reading:
+ x = via[SR];
+ WARN_ON((status & ST_MASK) == ST_CMD ||
+ (status & ST_MASK) == ST_IDLE);
+
+ /* Bus timeout with SRQ sequence:
+ * data is "XX FF" while CTLR_IRQ is "L L"
+ * End of packet without SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
+ * End of packet SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
+ * (where XX is the first response byte and
+ * YY is the last byte of valid response data.)
+ */
- if (reading_reply) {
- reading_reply = 0;
- req = current_req;
- req->reply_len = reply_len;
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
- } else if (reply_len && autopoll_devs)
- adb_input(reply_buf, reply_len, 0);
+ srq_asserted = 0;
+ if (!(status & CTLR_IRQ)) {
+ if (x == 0xFF) {
+ if (!(last_status & CTLR_IRQ)) {
+ macii_state = read_done;
+ reply_len = 0;
+ srq_asserted = 1;
+ }
+ } else if (x == 0x00) {
+ macii_state = read_done;
+ if (!(last_status & CTLR_IRQ))
+ srq_asserted = 1;
+ }
+ }
- macii_state = idle;
+ if (macii_state == reading &&
+ reply_len < ARRAY_SIZE(reply_buf)) {
+ reply_ptr++;
+ *reply_ptr = x;
+ reply_len++;
+ }
- /* SRQ seen before, initiate poll now */
- if (srq_asserted)
- macii_queue_poll();
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ break;
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
+ case read_done:
+ x = via[SR];
- if (macii_state == idle)
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- break;
+ if (reading_reply) {
+ reading_reply = 0;
+ req = current_req;
+ req->reply_len = reply_len;
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
+ } else if (reply_len && autopoll_devs)
+ adb_input(reply_buf, reply_len, 0);
+
+ macii_state = idle;
+
+ /* SRQ seen before, initiate poll now */
+ if (srq_asserted)
+ macii_queue_poll();
+
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
+
+ if (macii_state == idle)
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
+ break;
- default:
+ default:
break;
}
- entered--;
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d72c450aebe5..60f57e2abf21 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1737,6 +1737,39 @@ pmu_enable_irled(int on)
pmu_wait_complete(&req);
}
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t pmu_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+ return 0;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 4)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[0] << 24) + (req.reply[1] << 16) +
+ (req.reply[2] << 8) + req.reply[3];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int pmu_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 0)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
+
void
pmu_restart(void)
{
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index d174c7437337..86d65462a61c 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -277,7 +277,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 0);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "RPM fan %s\n", fan->name);
+ "RPM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
@@ -296,7 +296,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 1);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "PWM fan %s\n", fan->name);
+ "PWM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index da7f4fc1a51d..a0f61eb853c5 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -22,14 +22,6 @@
#define VERSION "1.0"
-#define DEBUG
-
-#ifdef DEBUG
-#define DBG(args...) printk(args)
-#else
-#define DBG(args...) do { } while(0)
-#endif
-
/* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800)
@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3];
buf[i+3] = data[2];
}
-#ifdef DEBUG
- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
- for (i = 0; i < len; ++i)
- DBG(" %x", buf[i]);
- DBG("\n");
-#endif
+ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
if (size)
*size = len;
return (struct smu_sdbp_header *) buf;
@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0)
return err;
sat->last_read = jiffies;
+
#ifdef LOTSA_DEBUG
{
int i;
- DBG(KERN_DEBUG "wf_sat_get: data is");
- for (i = 0; i < 16; ++i)
- DBG(" %.2x", sat->cache[i]);
- DBG("\n");
+ printk(KERN_DEBUG "wf_sat_get: data is");
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, sat->cache, 16, false);
}
#endif
return 0;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8b8c123cae66..3db222509e44 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -215,17 +215,6 @@ config BLK_DEV_DM
If unsure, say N.
-config DM_MQ_DEFAULT
- bool "request-based DM: use blk-mq I/O path by default"
- depends on BLK_DEV_DM
- ---help---
- This option enables the blk-mq based I/O path for request-based
- DM devices by default. With the option the dm_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overriden either way.
-
- If unsure say N.
-
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 1b5b9ad9e492..b61aac00ff40 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1200,7 +1200,7 @@ static void queue_demotion(struct smq_policy *mq)
struct policy_work work;
struct entry *e;
- if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
+ if (WARN_ON_ONCE(!mq->migrations_allowed))
return;
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c930eaf..224d44503a06 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -112,18 +112,8 @@ struct mapped_device {
struct dm_stats stats;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
-
- /* for request-based merge heuristic in dm_request_fn() */
- unsigned seq_rq_merge_deadline_usecs;
- int last_rq_rw;
- sector_t last_rq_pos;
- ktime_t last_rq_start_time;
-
/* for blk-mq request-based DM support */
struct blk_mq_tag_set *tag_set;
- bool use_blk_mq:1;
bool init_tio_pdu:1;
struct srcu_struct io_barrier;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0481223b1deb..b8eec515a003 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2661,6 +2661,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
+ const char *devname = dm_table_device_name(ti->table);
int key_size;
unsigned int align_mask;
unsigned long long tmpll;
@@ -2806,18 +2807,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
- num_online_cpus());
+ num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -2826,7 +2831,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT;
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe27b37c..3cb97fa4c11d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (bio_op(bio) == REQ_OP_ZONE_RESET)
goto map_bio;
- /* We need to remap reported zones, so remember the BIO iter */
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- goto map_bio;
-
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
@@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (bio_op(bio) == REQ_OP_ZONE_RESET)
return DM_ENDIO_DONE;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
- dm_remap_zone_report(ti, bio, fc->start);
- return DM_ENDIO_DONE;
- }
-
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
@@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int flakey_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct flakey_c *fc = ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, fc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
@@ -469,6 +480,7 @@ static struct target_type flakey_target = {
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+ .report_zones = flakey_report_zones,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b810ea77e6b1..f666778ad237 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
- int ioctl_flags,
- struct dm_ioctl **param, int *param_flags)
+ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl *dmi;
int secure_data;
@@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
*param_flags |= DM_PARAMS_MALLOC;
- if (copy_from_user(dmi, user, param_kernel->data_size))
- goto bad;
+ /* Copy from param_kernel (which was already copied from user) */
+ memcpy(dmi, param_kernel, minimum_data_size);
-data_copied:
- /*
- * Abort if something changed the ioctl data while it was being copied.
- */
- if (dmi->data_size != param_kernel->data_size) {
- DMERR("rejecting ioctl: data size modified while processing parameters");
+ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
+ param_kernel->data_size - minimum_data_size))
goto bad;
- }
-
+data_copied:
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 2f7c44a006c4..8d7ddee6ac4d 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
-{
- struct linear_c *lc = ti->private;
-
- if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
- dm_remap_zone_report(ti, bio, lc->start);
-
- return DM_ENDIO_DONE;
-}
-#endif
-
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int linear_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct linear_c *lc = (struct linear_c *) ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, lc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -211,8 +218,8 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
- .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+ .report_zones = linear_report_zones,
#else
.features = DM_TARGET_PASSES_INTEGRITY,
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 419362c2d8ac..d6a66921daf4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -203,14 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{
if (m->queue_mode == DM_TYPE_NONE) {
- /*
- * Default to request-based.
- */
- if (dm_use_blk_mq(dm_table_get_md(ti->table)))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
- else
- m->queue_mode = DM_TYPE_REQUEST_BASED;
-
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
@@ -537,10 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
* get the queue busy feedback (via BLK_STS_RESOURCE),
* otherwise I/O merging can suffer.
*/
- if (q->mq_ops)
- return DM_MAPIO_REQUEUE;
- else
- return DM_MAPIO_DELAY_REQUEUE;
+ return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -668,7 +658,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
static void process_queued_io_list(struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ if (m->queue_mode == DM_TYPE_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
@@ -1089,10 +1079,9 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
- else if (!strcasecmp(queue_mode_name, "rq"))
+ else if (!strcasecmp(queue_mode_name, "rq") ||
+ !strcasecmp(queue_mode_name, "mq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
- else if (!strcasecmp(queue_mode_name, "mq"))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else {
ti->error = "Unknown 'queue_mode' requested";
r = -EINVAL;
@@ -1726,9 +1715,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
- case DM_TYPE_MQ_REQUEST_BASED:
- DMEMIT("queue_mode mq ");
- break;
default:
WARN_ON_ONCE(true);
break;
@@ -1972,7 +1958,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
- return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+ return (m->queue_mode != DM_TYPE_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
pg = READ_ONCE(m->current_pg);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e4e481..e1dd1622a290 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
}
/* Enable bitmap creation for RAID levels != 0 */
- mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
+ mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8dd298..7cd36e4d1310 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,19 +23,6 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
-
-bool dm_use_blk_mq_default(void)
-{
- return use_blk_mq;
-}
-
-bool dm_use_blk_mq(struct mapped_device *md)
-{
- return md->use_blk_mq;
-}
-EXPORT_SYMBOL_GPL(dm_use_blk_mq);
-
unsigned dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
@@ -59,41 +46,13 @@ int dm_request_based(struct mapped_device *md)
return queue_is_rq_based(md->queue);
}
-static void dm_old_start_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_start_queue(struct request_queue *q)
+void dm_start_queue(struct request_queue *q)
{
blk_mq_unquiesce_queue(q);
blk_mq_kick_requeue_list(q);
}
-void dm_start_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_start_queue(q);
- else
- dm_mq_start_queue(q);
-}
-
-static void dm_old_stop_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_stop_queue(struct request_queue *q)
+void dm_stop_queue(struct request_queue *q)
{
if (blk_mq_queue_stopped(q))
return;
@@ -101,14 +60,6 @@ static void dm_mq_stop_queue(struct request_queue *q)
blk_mq_quiesce_queue(q);
}
-void dm_stop_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_stop_queue(q);
- else
- dm_mq_stop_queue(q);
-}
-
/*
* Partial completion handling for request-based dm
*/
@@ -179,9 +130,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- struct request_queue *q = md->queue;
- unsigned long flags;
-
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -189,18 +137,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
wake_up(&md->wait);
/*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
- if (!q->mq_ops && run_queue) {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-
- /*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
@@ -222,27 +158,10 @@ static void dm_end_request(struct request *clone, blk_status_t error)
tio->ti->type->release_clone_rq(clone);
rq_end_stats(md, rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, error);
- else
- blk_mq_end_request(rq, error);
+ blk_mq_end_request(rq, error);
rq_completed(md, rw, true);
}
-/*
- * Requeue the original request of a clone.
- */
-static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, rq);
- blk_delay_queue(q, delay_ms);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
blk_mq_delay_kick_requeue_list(q, msecs);
@@ -273,11 +192,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
tio->ti->type->release_clone_rq(tio->clone);
}
- if (!rq->q->mq_ops)
- dm_old_requeue_request(rq, delay_ms);
- else
- dm_mq_delay_requeue_request(rq, delay_ms);
-
+ dm_mq_delay_requeue_request(rq, delay_ms);
rq_completed(md, rw, false);
}
@@ -340,10 +255,7 @@ static void dm_softirq_done(struct request *rq)
rq_end_stats(md, rq);
rw = rq_data_dir(rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, tio->error);
- else
- blk_mq_end_request(rq, tio->error);
+ blk_mq_end_request(rq, tio->error);
rq_completed(md, rw, false);
return;
}
@@ -363,17 +275,14 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- if (!rq->q->mq_ops)
- blk_complete_request(rq);
- else
- blk_mq_complete_request(rq);
+ blk_mq_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
+ * This may be used when the target's clone_and_map_rq() function fails.
*/
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
{
@@ -381,21 +290,10 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-/*
- * Called with the clone's queue lock held (in the case of .request_fn)
- */
static void end_clone_request(struct request *clone, blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Actual request completion is done in a softirq context which doesn't
- * hold the clone's queue lock. Otherwise, deadlock could occur because:
- * - another request may be submitted by the upper level driver
- * of the stacking during the completion
- * - the submission which requires queue lock may be done
- * against this clone's queue
- */
dm_complete_request(tio->orig, error);
}
@@ -446,8 +344,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static void map_tio_request(struct kthread_work *work);
-
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
@@ -464,8 +360,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
*/
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
- if (md->kworker_task)
- kthread_init_work(&tio->work, map_tio_request);
}
/*
@@ -504,10 +398,7 @@ check_again:
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone);
tio->clone = NULL;
- if (!rq->q->mq_ops)
- r = DM_MAPIO_DELAY_REQUEUE;
- else
- r = DM_MAPIO_REQUEUE;
+ r = DM_MAPIO_REQUEUE;
goto check_again;
}
break;
@@ -530,20 +421,23 @@ check_again:
return r;
}
+/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", 0);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ return count;
+}
+
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
- if (!orig->q->mq_ops)
- blk_start_request(orig);
- else
- blk_mq_start_request(orig);
+ blk_mq_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
- if (md->seq_rq_merge_deadline_usecs) {
- md->last_rq_pos = rq_end_sector(orig);
- md->last_rq_rw = rq_data_dir(orig);
- md->last_rq_start_time = ktime_get();
- }
-
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies;
@@ -563,8 +457,10 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
-static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
+static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
+ struct mapped_device *md = set->driver_data;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
@@ -581,163 +477,6 @@ static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
return 0;
}
-static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
-{
- return __dm_rq_init_rq(q->rq_alloc_data, rq);
-}
-
-static void map_tio_request(struct kthread_work *work)
-{
- struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
-
- if (map_request(tio) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(tio, false);
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
-{
- return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
-}
-
-#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count)
-{
- unsigned deadline;
-
- if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
- return count;
-
- if (kstrtouint(buf, 10, &deadline))
- return -EINVAL;
-
- if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
- deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
-
- md->seq_rq_merge_deadline_usecs = deadline;
-
- return count;
-}
-
-static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
-{
- ktime_t kt_deadline;
-
- if (!md->seq_rq_merge_deadline_usecs)
- return false;
-
- kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
- kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
-
- return !ktime_after(ktime_get(), kt_deadline);
-}
-
-/*
- * q->request_fn for old request-based dm.
- * Called with the queue lock held.
- */
-static void dm_old_request_fn(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_target *ti = md->immutable_target;
- struct request *rq;
- struct dm_rq_target_io *tio;
- sector_t pos = 0;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- if (unlikely(!map)) {
- dm_put_live_table(md, srcu_idx);
- return;
- }
- ti = dm_table_find_target(map, pos);
- dm_put_live_table(md, srcu_idx);
- }
-
- /*
- * For suspend, check blk_queue_stopped() and increment
- * ->pending within a single queue_lock not to increment the
- * number of in-flight I/Os after the queue is stopped in
- * dm_suspend().
- */
- while (!blk_queue_stopped(q)) {
- rq = blk_peek_request(q);
- if (!rq)
- return;
-
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (req_op(rq) != REQ_OP_FLUSH)
- pos = blk_rq_pos(rq);
-
- if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
- (ti->type->busy && ti->type->busy(ti))) {
- blk_delay_queue(q, 10);
- return;
- }
-
- dm_start_request(md, rq);
-
- tio = tio_from_request(rq);
- init_tio(tio, rq, md);
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- kthread_queue_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
- }
-}
-
-/*
- * Fully initialize a .request_fn request-based queue.
- */
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
-{
- struct dm_target *immutable_tgt;
-
- /* Fully initialize the queue */
- md->queue->cmd_size = sizeof(struct dm_rq_target_io);
- md->queue->rq_alloc_data = md;
- md->queue->request_fn = dm_old_request_fn;
- md->queue->init_rq_fn = dm_rq_init_rq;
-
- immutable_tgt = dm_table_get_immutable_target(t);
- if (immutable_tgt && immutable_tgt->per_io_data_size) {
- /* any target-specific per-io data is immediately after the tio */
- md->queue->cmd_size += immutable_tgt->per_io_data_size;
- md->init_tio_pdu = true;
- }
- if (blk_init_allocated_queue(md->queue) < 0)
- return -EINVAL;
-
- /* disable dm_old_request_fn's merge heuristic by default */
- md->seq_rq_merge_deadline_usecs = 0;
-
- blk_queue_softirq_done(md->queue, dm_softirq_done);
-
- /* Initialize the request-based DM worker thread */
- kthread_init_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task)) {
- int error = PTR_ERR(md->kworker_task);
- md->kworker_task = NULL;
- return error;
- }
-
- return 0;
-}
-
-static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- return __dm_rq_init_rq(set->driver_data, rq);
-}
-
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -790,11 +529,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
struct dm_target *immutable_tgt;
int err;
- if (!dm_table_all_blk_mq_devices(t)) {
- DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
- return -EINVAL;
- }
-
md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
if (!md->tag_set)
return -ENOMEM;
@@ -845,6 +579,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+/* Unused, but preserved for userspace compatibility */
+static bool use_blk_mq = true;
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index f43c45460aac..b39245545229 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -46,10 +46,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-bool dm_use_blk_mq_default(void);
-bool dm_use_blk_mq(struct mapped_device *md);
-
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c209b8a19b84..a05fcd50e1b9 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,7 +92,8 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_use_blk_mq(md));
+ /* Purely for userspace compatibility */
+ sprintf(buf, "%d\n", true);
return strlen(buf);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3d0e2c198f06..9038c302d5c2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -47,7 +47,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
- bool all_blk_mq:1;
unsigned integrity_added:1;
/*
@@ -872,8 +871,7 @@ static bool __table_type_bio_based(enum dm_queue_mode table_type)
static bool __table_type_request_based(enum dm_queue_mode table_type)
{
- return (table_type == DM_TYPE_REQUEST_BASED ||
- table_type == DM_TYPE_MQ_REQUEST_BASED);
+ return table_type == DM_TYPE_REQUEST_BASED;
}
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
@@ -999,10 +997,6 @@ verify_bio_based:
BUG_ON(!request_based); /* No targets in this table */
- /*
- * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
- * having a compatible target use dm_table_set_type.
- */
t->type = DM_TYPE_REQUEST_BASED;
verify_rq_based:
@@ -1022,11 +1016,9 @@ verify_rq_based:
int srcu_idx;
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
- /* inherit live table's type and all_blk_mq */
- if (live_table) {
+ /* inherit live table's type */
+ if (live_table)
t->type = live_table->type;
- t->all_blk_mq = live_table->all_blk_mq;
- }
dm_put_live_table(t->md, srcu_idx);
return 0;
}
@@ -1046,17 +1038,10 @@ verify_rq_based:
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count && v.mq_count) {
+ if (v.sq_count > 0) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = v.mq_count > 0;
-
- if (!t->all_blk_mq &&
- (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
- DMERR("table load rejected: all devices are not blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
@@ -1105,11 +1090,6 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-bool dm_table_all_blk_mq_devices(struct dm_table *t)
-{
- return t->all_blk_mq;
-}
-
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
@@ -1937,6 +1917,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+ /*
+ * For a zoned target, the number of zones should be updated for the
+ * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
+ * target, this is all that is needed. For a request based target, the
+ * queue zone bitmaps must also be updated.
+ * Use blk_revalidate_disk_zones() to handle this.
+ */
+ if (blk_queue_is_zoned(q))
+ blk_revalidate_disk_zones(t->md->disk);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -2079,26 +2069,24 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_get_md);
+const char *dm_table_device_name(struct dm_table *t)
+{
+ return dm_device_name(t->md);
+}
+EXPORT_SYMBOL_GPL(dm_table_device_name);
+
void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
- unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
- if (queue) {
- if (queue->mq_ops)
- blk_mq_run_hw_queues(queue, true);
- else {
- spin_lock_irqsave(queue->queue_lock, flags);
- blk_run_queue_async(queue);
- spin_unlock_irqrestore(queue->queue_lock, flags);
- }
- }
+ if (queue)
+ blk_mq_run_hw_queues(queue, true);
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index aaf1ad481ee8..0bd8d498b3b9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -325,7 +325,7 @@ struct thin_c {
* Ensures the thin is not destroyed until the worker has finished
* iterating the active_thins list.
*/
- atomic_t refcount;
+ refcount_t refcount;
struct completion can_destroy;
};
@@ -4044,12 +4044,12 @@ static struct target_type pool_target = {
*--------------------------------------------------------------*/
static void thin_get(struct thin_c *tc)
{
- atomic_inc(&tc->refcount);
+ refcount_inc(&tc->refcount);
}
static void thin_put(struct thin_c *tc)
{
- if (atomic_dec_and_test(&tc->refcount))
+ if (refcount_dec_and_test(&tc->refcount))
complete(&tc->can_destroy);
}
@@ -4193,7 +4193,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
- atomic_set(&tc->refcount, 1);
+ refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5f1f80d424dd..2d50eec94cd7 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -350,10 +350,7 @@ static struct wc_memory_superblock *sb(struct dm_writecache *wc)
static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
{
- if (is_power_of_2(sizeof(struct wc_entry)) && 0)
- return &sb(wc)->entries[e - wc->entries];
- else
- return &sb(wc)->entries[e->index];
+ return &sb(wc)->entries[e->index];
}
static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 969954915566..fa68336560c3 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -99,7 +99,7 @@ struct dmz_mblock {
struct rb_node node;
struct list_head link;
sector_t no;
- atomic_t ref;
+ unsigned int ref;
unsigned long state;
struct page *page;
void *data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link);
- atomic_set(&mblk->ref, 0);
+ mblk->ref = 0;
mblk->state = 0;
mblk->no = mblk_no;
mblk->data = page_address(mblk->page);
@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
}
/*
- * Lookup a metadata block in the rbtree.
+ * Lookup a metadata block in the rbtree. If the block is found, increment
+ * its reference count.
*/
-static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node *node = root->rb_node;
@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
while (node) {
mblk = container_of(node, struct dmz_mblock, node);
- if (mblk->no == mblk_no)
+ if (mblk->no == mblk_no) {
+ /*
+ * If this is the first reference to the block,
+ * remove it from the LRU list.
+ */
+ mblk->ref++;
+ if (mblk->ref == 1 &&
+ !test_bit(DMZ_META_DIRTY, &mblk->state))
+ list_del_init(&mblk->link);
return mblk;
+ }
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
}
@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
}
/*
- * Read a metadata block from disk.
+ * Read an uncached metadata block from disk and add it to the cache.
*/
-static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
- struct dmz_mblock *mblk;
+ struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
- /* Get block and insert it */
+ /* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
return NULL;
- spin_lock(&zmd->mblk_lock);
- atomic_inc(&mblk->ref);
- set_bit(DMZ_META_READING, &mblk->state);
- dmz_insert_mblock(zmd, mblk);
- spin_unlock(&zmd->mblk_lock);
-
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
return NULL;
}
+ spin_lock(&zmd->mblk_lock);
+
+ /*
+ * Make sure that another context did not start reading
+ * the block already.
+ */
+ m = dmz_get_mblock_fast(zmd, mblk_no);
+ if (m) {
+ spin_unlock(&zmd->mblk_lock);
+ dmz_free_mblock(zmd, mblk);
+ bio_put(bio);
+ return m;
+ }
+
+ mblk->ref++;
+ set_bit(DMZ_META_READING, &mblk->state);
+ dmz_insert_mblock(zmd, mblk);
+
+ spin_unlock(&zmd->mblk_lock);
+
+ /* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
@@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
spin_lock(&zmd->mblk_lock);
- if (atomic_dec_and_test(&mblk->ref)) {
+ mblk->ref--;
+ if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
- mblk = dmz_lookup_mblock(zmd, mblk_no);
- if (mblk) {
- /* Cache hit: remove block from LRU list */
- if (atomic_inc_return(&mblk->ref) == 1 &&
- !test_bit(DMZ_META_DIRTY, &mblk->state))
- list_del_init(&mblk->link);
- }
+ mblk = dmz_get_mblock_fast(zmd, mblk_no);
spin_unlock(&zmd->mblk_lock);
if (!mblk) {
/* Cache miss: read the block from disk */
- mblk = dmz_fetch_mblock(zmd, mblk_no);
+ mblk = dmz_get_mblock_slow(zmd, mblk_no);
if (!mblk)
return ERR_PTR(-ENOMEM);
}
@@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state);
- if (atomic_read(&mblk->ref) == 0)
+ if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock);
}
@@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
- (u64)mblk->no, atomic_read(&mblk->ref));
+ (u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
- (u64)mblk->no, atomic_read(&mblk->ref));
- atomic_set(&mblk->ref, 0);
+ (u64)mblk->no, mblk->ref);
+ mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index a44183ff4be0..981154e59461 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -19,7 +19,7 @@ struct dmz_bioctx {
struct dmz_target *target;
struct dm_zone *zone;
struct bio *bio;
- atomic_t ref;
+ refcount_t ref;
blk_status_t status;
};
@@ -28,7 +28,7 @@ struct dmz_bioctx {
*/
struct dm_chunk_work {
struct work_struct work;
- atomic_t refcount;
+ refcount_t refcount;
struct dmz_target *target;
unsigned int chunk;
struct bio_list bio_list;
@@ -115,7 +115,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks == dmz_bio_blocks(bio)) {
/* Setup and submit the BIO */
bio->bi_iter.bi_sector = sector;
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
return 0;
}
@@ -134,7 +134,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
/* Submit the clone */
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(clone);
return 0;
@@ -240,7 +240,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
/* Setup and submit the BIO */
bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
if (dmz_is_seq(zone))
@@ -456,7 +456,7 @@ out:
*/
static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
{
- atomic_inc(&cw->refcount);
+ refcount_inc(&cw->refcount);
}
/*
@@ -465,7 +465,7 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
- if (atomic_dec_and_test(&cw->refcount)) {
+ if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
kfree(cw);
@@ -546,7 +546,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
goto out;
INIT_WORK(&cw->work, dmz_chunk_work);
- atomic_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 0);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -599,7 +599,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->target = dmz;
bioctx->zone = NULL;
bioctx->bio = bio;
- atomic_set(&bioctx->ref, 1);
+ refcount_set(&bioctx->ref, 1);
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
@@ -633,7 +633,7 @@ static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error
if (bioctx->status == BLK_STS_OK && *error)
bioctx->status = *error;
- if (!atomic_dec_and_test(&bioctx->ref))
+ if (!refcount_dec_and_test(&bioctx->ref))
return DM_ENDIO_INCOMPLETE;
/* Done */
@@ -702,8 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
- dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
- >> dev->zone_nr_sectors_shift;
+ dev->nr_zones = blkdev_nr_zones(dev->bdev);
dmz->dev = dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45abb54037fc..c510179a7f84 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct mapped_device *md = disk->private_data;
+ struct dm_target *tgt;
+ struct dm_table *map;
+ int srcu_idx, ret;
+
+ if (dm_suspended_md(md))
+ return -EAGAIN;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ return -EIO;
+
+ tgt = dm_table_find_target(map, sector);
+ if (!dm_target_is_valid(tgt)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * If we are executing this, we already know that the block device
+ * is a zoned device and so each target should have support for that
+ * type of drive. A missing report_zones method means that the target
+ * driver has a problem.
+ */
+ if (WARN_ON(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * blkdev_report_zones() will loop and call this again to cover all the
+ * zones of the target, eventually moving on to the next target.
+ * So there is no need to loop here trying to fill the entire array
+ * of zones.
+ */
+ ret = tgt->type->report_zones(tgt, sector, zones,
+ nr_zones, gfp_mask);
+
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
__acquires(md->io_barrier)
@@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate zone positions
- * within the target backing device, regardless of that device is a partition
- * and regardless of the target mapping start sector on the device or partition.
- * The zone descriptors start sector and write pointer position must be adjusted
- * to match their relative position within the dm device.
- * A target may call dm_remap_zone_report() after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
- * backing device.
+ * The zone descriptors obtained with a zone report indicate
+ * zone positions within the underlying device of the target. The zone
+ * descriptors must be remapped to match their position within the dm device.
+ * The caller target should obtain the zones information using
+ * blkdev_report_zones() to ensure that remapping for partition offset is
+ * already handled.
*/
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ struct blk_zone *zones, unsigned int *nr_zones)
{
#ifdef CONFIG_BLK_DEV_ZONED
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->orig_bio;
- struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
- unsigned int nr_rep = 0;
- unsigned int ofst;
- sector_t part_offset;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
-
- if (bio->bi_status)
- return;
-
- /*
- * bio sector was incremented by the request size on completion. Taking
- * into account the original request sector, the target start offset on
- * the backing device and the target mapping offset (ti->begin), the
- * start sector of the backing device. The partition offset is always 0
- * if the target uses a whole device.
- */
- part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+ unsigned int nrz = *nr_zones;
+ int i;
/*
- * Remap the start sector of the reported zones. For sequential zones,
- * also remap the write pointer position.
+ * Remap the start sector and write pointer position of the zones in
+ * the array. Since we may have obtained from the target underlying
+ * device more zones that the target size, also adjust the number
+ * of zones.
*/
- bio_for_each_segment(bvec, report_bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- /* Remember the report header in the first page */
- if (!hdr) {
- hdr = addr;
- ofst = sizeof(struct blk_zone_report_hdr);
- } else
- ofst = 0;
-
- /* Set zones start sector */
- while (hdr->nr_zones && ofst < bvec.bv_len) {
- zone = addr + ofst;
- zone->start -= part_offset;
- if (zone->start >= start + ti->len) {
- hdr->nr_zones = 0;
- break;
- }
- zone->start = zone->start + ti->begin - start;
- if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start - part_offset;
- }
- ofst += sizeof(struct blk_zone);
- hdr->nr_zones--;
- nr_rep++;
+ for (i = 0; i < nrz; i++) {
+ zone = zones + i;
+ if (zone->start >= start + ti->len) {
+ memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
+ break;
}
- if (addr != hdr)
- kunmap_atomic(addr);
-
- if (!hdr->nr_zones)
- break;
- }
+ zone->start = zone->start + ti->begin - start;
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
- if (hdr) {
- hdr->nr_zones = nr_rep;
- kunmap_atomic(hdr);
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp = zone->wp + ti->begin - start;
}
- bio_advance(report_bio, report_bio->bi_iter.bi_size);
-
+ *nr_zones = i;
#else /* !CONFIG_BLK_DEV_ZONED */
- bio->bi_status = BLK_STS_NOTSUPP;
+ *nr_zones = 0;
#endif
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
@@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r;
}
- if (bio_op(bio) != REQ_OP_ZONE_REPORT)
- bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
@@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
- struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
int r;
@@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (unlikely(__process_abnormal_io(ci, ti, &r)))
return r;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- len = ci->sector_count;
- else
- len = min_t(sector_t, max_io_len(ci->sector, ti),
- ci->sector_count);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
@@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* We take a clone of the original to store in
* ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
- * As this path is not used for REQ_OP_ZONE_REPORT,
- * the usage of io->orig_bio in dm_remap_zone_report()
- * won't be affected by this reassignment.
*/
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
@@ -1666,7 +1664,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
* Defend against IO still getting in during teardown
* - as was seen for a time with nvme-fcloop
*/
- if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
error = -EIO;
goto out;
}
@@ -1808,8 +1806,6 @@ static void dm_wq_work(struct work_struct *work);
static void dm_init_normal_md_queue(struct mapped_device *md)
{
- md->use_blk_mq = false;
-
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
@@ -1820,8 +1816,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
- if (md->kworker_task)
- kthread_stop(md->kworker_task);
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
@@ -1888,7 +1882,6 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
- md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
@@ -1919,7 +1912,6 @@ static struct mapped_device *alloc_dev(int minor)
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
- md->kworker_task = NULL;
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2219,14 +2211,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
- dm_init_normal_md_queue(md);
- r = dm_old_init_request_queue(md, t);
- if (r) {
- DMERR("Cannot initialize queue for request-based mapped device");
- return r;
- }
- break;
- case DM_TYPE_MQ_REQUEST_BASED:
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
@@ -2331,9 +2315,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
blk_set_queue_dying(md->queue);
- if (dm_request_based(md) && md->kworker_task)
- kthread_flush_worker(&md->kworker);
-
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
@@ -2586,11 +2567,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md)) {
+ if (dm_request_based(md))
dm_stop_queue(md->queue);
- if (md->kworker_task)
- kthread_flush_worker(&md->kworker);
- }
flush_workqueue(md->wq);
@@ -2965,7 +2943,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
goto out;
break;
case DM_TYPE_REQUEST_BASED:
- case DM_TYPE_MQ_REQUEST_BASED:
pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
@@ -3167,6 +3144,7 @@ static const struct block_device_operations dm_blk_dops = {
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 114a81b27c37..2d539b82ec08 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 2fc8c113977f..1cd4f991792c 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2288,9 +2288,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
goto out;
}
if (mddev->pers) {
- mddev->pers->quiesce(mddev, 1);
+ mddev_suspend(mddev);
md_bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
}
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
@@ -2327,8 +2327,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
- mddev->pers->quiesce(mddev, 1);
bitmap = md_bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap);
else {
@@ -2337,11 +2337,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (rv)
mddev->bitmap_info.offset = 0;
}
- mddev->pers->quiesce(mddev, 0);
if (rv) {
md_bitmap_destroy(mddev);
+ mddev_resume(mddev);
goto out;
}
+ mddev_resume(mddev);
}
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0b2af6e74fc3..8dff19d5502e 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -33,13 +33,6 @@ struct dlm_lock_resource {
int mode;
};
-struct suspend_info {
- int slot;
- sector_t lo;
- sector_t hi;
- struct list_head list;
-};
-
struct resync_info {
__le64 lo;
__le64 hi;
@@ -80,7 +73,13 @@ struct md_cluster_info {
struct dlm_lock_resource **other_bitmap_lockres;
struct dlm_lock_resource *resync_lockres;
struct list_head suspend_list;
+
spinlock_t suspend_lock;
+ /* record the region which write should be suspended */
+ sector_t suspend_lo;
+ sector_t suspend_hi;
+ int suspend_from; /* the slot which broadcast suspend_lo/hi */
+
struct md_thread *recovery_thread;
unsigned long recovery_map;
/* communication loc resources */
@@ -105,6 +104,7 @@ enum msg_type {
RE_ADD,
BITMAP_NEEDS_SYNC,
CHANGE_CAPACITY,
+ BITMAP_RESIZE,
};
struct cluster_msg {
@@ -270,25 +270,22 @@ static void add_resync_info(struct dlm_lock_resource *lockres,
ri->hi = cpu_to_le64(hi);
}
-static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+static int read_resync_info(struct mddev *mddev,
+ struct dlm_lock_resource *lockres)
{
struct resync_info ri;
- struct suspend_info *s = NULL;
- sector_t hi = 0;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
dlm_lock_sync(lockres, DLM_LOCK_CR);
memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
- hi = le64_to_cpu(ri.hi);
- if (hi > 0) {
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- goto out;
- s->hi = hi;
- s->lo = le64_to_cpu(ri.lo);
+ if (le64_to_cpu(ri.hi) > 0) {
+ cinfo->suspend_hi = le64_to_cpu(ri.hi);
+ cinfo->suspend_lo = le64_to_cpu(ri.lo);
+ ret = 1;
}
dlm_unlock_sync(lockres);
-out:
- return s;
+ return ret;
}
static void recover_bitmaps(struct md_thread *thread)
@@ -298,7 +295,6 @@ static void recover_bitmaps(struct md_thread *thread)
struct dlm_lock_resource *bm_lockres;
char str[64];
int slot, ret;
- struct suspend_info *s, *tmp;
sector_t lo, hi;
while (cinfo->recovery_map) {
@@ -325,13 +321,17 @@ static void recover_bitmaps(struct md_thread *thread)
/* Clear suspend_area associated with the bitmap */
spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
+ cinfo->suspend_from = -1;
spin_unlock_irq(&cinfo->suspend_lock);
+ /* Kick off a reshape if needed */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector)
+ md_wakeup_thread(mddev->sync_thread);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
@@ -434,34 +434,23 @@ static void ack_bast(void *arg, int mode)
}
}
-static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
-{
- struct suspend_info *s, *tmp;
-
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- break;
- }
-}
-
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- __remove_suspend_info(cinfo, slot);
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
-
static void process_suspend_info(struct mddev *mddev,
int slot, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- struct suspend_info *s;
+ struct mdp_superblock_1 *sb = NULL;
+ struct md_rdev *rdev;
if (!hi) {
/*
@@ -475,6 +464,12 @@ static void process_suspend_info(struct mddev *mddev,
return;
}
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
+ sb = page_address(rdev->sb_page);
+ break;
+ }
+
/*
* The bitmaps are not same for different nodes
* if RESYNCING is happening in one node, then
@@ -487,26 +482,26 @@ static void process_suspend_info(struct mddev *mddev,
* sync_low/hi is used to record the region which
* arrived in the previous RESYNCING message,
*
- * Call bitmap_sync_with_cluster to clear
- * NEEDED_MASK and set RESYNC_MASK since
- * resync thread is running in another node,
- * so we don't need to do the resync again
- * with the same section */
- md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
+ * Call md_bitmap_sync_with_cluster to clear NEEDED_MASK
+ * and set RESYNC_MASK since resync thread is running
+ * in another node, so we don't need to do the resync
+ * again with the same section.
+ *
+ * Skip md_bitmap_sync_with_cluster in case reshape
+ * happening, because reshaping region is small and
+ * we don't want to trigger lots of WARN.
+ */
+ if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE))
+ md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
+ cinfo->sync_hi, lo, hi);
cinfo->sync_low = lo;
cinfo->sync_hi = hi;
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- return;
- s->slot = slot;
- s->lo = lo;
- s->hi = hi;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- /* Remove existing entry (if exists) before adding */
- __remove_suspend_info(cinfo, slot);
- list_add(&s->list, &cinfo->suspend_list);
+ cinfo->suspend_from = slot;
+ cinfo->suspend_lo = lo;
+ cinfo->suspend_hi = hi;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
@@ -612,6 +607,11 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
case BITMAP_NEEDS_SYNC:
__recover_slot(mddev, le32_to_cpu(msg->slot));
break;
+ case BITMAP_RESIZE:
+ if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
+ ret = md_bitmap_resize(mddev->bitmap,
+ le64_to_cpu(msg->high), 0, 0);
+ break;
default:
ret = -1;
pr_warn("%s:%d Received unknown message from %d\n",
@@ -800,7 +800,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
struct md_cluster_info *cinfo = mddev->cluster_info;
int i, ret = 0;
struct dlm_lock_resource *bm_lockres;
- struct suspend_info *s;
char str[64];
sector_t lo, hi;
@@ -819,16 +818,13 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- s = read_resync_info(mddev, bm_lockres);
- if (s) {
+ if (read_resync_info(mddev, bm_lockres)) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
__func__, __LINE__,
- (unsigned long long) s->lo,
- (unsigned long long) s->hi, i);
- spin_lock_irq(&cinfo->suspend_lock);
- s->slot = i;
- list_add(&s->list, &cinfo->suspend_list);
- spin_unlock_irq(&cinfo->suspend_lock);
+ (unsigned long long) cinfo->suspend_lo,
+ (unsigned long long) cinfo->suspend_hi,
+ i);
+ cinfo->suspend_from = i;
}
ret = 0;
lockres_free(bm_lockres);
@@ -1001,10 +997,17 @@ static int leave(struct mddev *mddev)
if (!cinfo)
return 0;
- /* BITMAP_NEEDS_SYNC message should be sent when node
+ /*
+ * BITMAP_NEEDS_SYNC message should be sent when node
* is leaving the cluster with dirty bitmap, also we
- * can only deliver it when dlm connection is available */
- if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector)
+ * can only deliver it when dlm connection is available.
+ *
+ * Also, we should send BITMAP_NEEDS_SYNC message in
+ * case reshaping is interrupted.
+ */
+ if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ (mddev->reshape_position != MaxSector &&
+ test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
@@ -1102,6 +1105,80 @@ static void metadata_update_cancel(struct mddev *mddev)
unlock_comm(cinfo);
}
+static int update_bitmap_size(struct mddev *mddev, sector_t size)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg = {0};
+ int ret;
+
+ cmsg.type = cpu_to_le32(BITMAP_RESIZE);
+ cmsg.high = cpu_to_le64(size);
+ ret = sendmsg(cinfo, &cmsg, 0);
+ if (ret)
+ pr_err("%s:%d: failed to send BITMAP_RESIZE message (%d)\n",
+ __func__, __LINE__, ret);
+ return ret;
+}
+
+static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
+{
+ struct bitmap_counts *counts;
+ char str[64];
+ struct dlm_lock_resource *bm_lockres;
+ struct bitmap *bitmap = mddev->bitmap;
+ unsigned long my_pages = bitmap->counts.pages;
+ int i, rv;
+
+ /*
+ * We need to ensure all the nodes can grow to a larger
+ * bitmap size before make the reshaping.
+ */
+ rv = update_bitmap_size(mddev, newsize);
+ if (rv)
+ return rv;
+
+ for (i = 0; i < mddev->bitmap_info.nodes; i++) {
+ if (i == md_cluster_ops->slot_number(mddev))
+ continue;
+
+ bitmap = get_bitmap_from_slot(mddev, i);
+ if (IS_ERR(bitmap)) {
+ pr_err("can't get bitmap from slot %d\n", i);
+ goto out;
+ }
+ counts = &bitmap->counts;
+
+ /*
+ * If we can hold the bitmap lock of one node then
+ * the slot is not occupied, update the pages.
+ */
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("Cannot initialize %s lock\n", str);
+ goto out;
+ }
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (!rv)
+ counts->pages = my_pages;
+ lockres_free(bm_lockres);
+
+ if (my_pages != counts->pages)
+ /*
+ * Let's revert the bitmap size if one node
+ * can't resize bitmap
+ */
+ goto out;
+ }
+
+ return 0;
+out:
+ md_bitmap_free(bitmap);
+ update_bitmap_size(mddev, oldsize);
+ return -1;
+}
+
/*
* return 0 if all the bitmaps have the same sync_size
*/
@@ -1243,6 +1320,16 @@ static int resync_start(struct mddev *mddev)
return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
}
+static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ *lo = cinfo->suspend_lo;
+ *hi = cinfo->suspend_hi;
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -1295,21 +1382,14 @@ static int area_resyncing(struct mddev *mddev, int direction,
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
- struct suspend_info *s;
if ((direction == READ) &&
test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
return 1;
spin_lock_irq(&cinfo->suspend_lock);
- if (list_empty(&cinfo->suspend_list))
- goto out;
- list_for_each_entry(s, &cinfo->suspend_list, list)
- if (hi > s->lo && lo < s->hi) {
- ret = 1;
- break;
- }
-out:
+ if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi)
+ ret = 1;
spin_unlock_irq(&cinfo->suspend_lock);
return ret;
}
@@ -1482,6 +1562,7 @@ static struct md_cluster_operations cluster_ops = {
.resync_start = resync_start,
.resync_finish = resync_finish,
.resync_info_update = resync_info_update,
+ .resync_info_get = resync_info_get,
.metadata_update_start = metadata_update_start,
.metadata_update_finish = metadata_update_finish,
.metadata_update_cancel = metadata_update_cancel,
@@ -1492,6 +1573,7 @@ static struct md_cluster_operations cluster_ops = {
.remove_disk = remove_disk,
.load_bitmaps = load_bitmaps,
.gather_bitmaps = gather_bitmaps,
+ .resize_bitmaps = resize_bitmaps,
.lock_all_bitmaps = lock_all_bitmaps,
.unlock_all_bitmaps = unlock_all_bitmaps,
.update_size = update_size,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index c0240708f443..a78e3021775d 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -14,6 +14,7 @@ struct md_cluster_operations {
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi);
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
void (*metadata_update_cancel)(struct mddev *mddev);
@@ -26,6 +27,7 @@ struct md_cluster_operations {
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
void (*load_bitmaps)(struct mddev *mddev, int total_slots);
int (*gather_bitmaps)(struct md_rdev *rdev);
+ int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize);
int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev);
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 63ceabb4e020..fc488cb30a94 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio)
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
rcu_read_unlock();
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-}
-
-void md_stop(struct mddev *mddev)
-{
- /* stop the array and free an attached data structures.
- * This is called from dm-raid
- */
- __md_stop(mddev);
if (mddev->flush_bio_pool) {
mempool_destroy(mddev->flush_bio_pool);
mddev->flush_bio_pool = NULL;
@@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev)
mempool_destroy(mddev->flush_pool);
mddev->flush_pool = NULL;
}
+}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
@@ -8370,9 +8372,17 @@ void md_do_sync(struct md_thread *thread)
else if (!mddev->bitmap)
j = mddev->recovery_cp;
- } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
max_sectors = mddev->resync_max_sectors;
- else {
+ /*
+ * If the original node aborts reshaping then we continue the
+ * reshaping, so set j again to avoid restart reshape from the
+ * first beginning
+ */
+ if (mddev_is_clustered(mddev) &&
+ mddev->reshape_position != MaxSector)
+ j = mddev->reshape_position;
+ } else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
@@ -8623,8 +8633,10 @@ void md_do_sync(struct md_thread *thread)
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (!mddev_is_clustered(mddev)) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
}
spin_lock(&mddev->lock);
@@ -8790,6 +8802,18 @@ static void md_start_sync(struct work_struct *ws)
*/
void md_check_recovery(struct mddev *mddev)
{
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+ /* Write superblock - thread that called mddev_suspend()
+ * holds reconfig_mutex for us.
+ */
+ set_bit(MD_UPDATING_SB, &mddev->flags);
+ smp_mb__after_atomic();
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+ md_update_sb(mddev, 0);
+ clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ }
+
if (mddev->suspended)
return;
@@ -8949,16 +8973,6 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
- } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
- /* Write superblock - thread that called mddev_suspend()
- * holds reconfig_mutex for us.
- */
- set_bit(MD_UPDATING_SB, &mddev->flags);
- smp_mb__after_atomic();
- if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
- md_update_sb(mddev, 0);
- clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
- wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);
@@ -8966,6 +8980,8 @@ EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
+ sector_t old_dev_sectors = mddev->dev_sectors;
+ bool is_reshaped = false;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
@@ -8980,8 +8996,11 @@ void md_reap_sync_thread(struct mddev *mddev)
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- mddev->pers->finish_reshape)
+ mddev->pers->finish_reshape) {
mddev->pers->finish_reshape(mddev);
+ if (mddev_is_clustered(mddev))
+ is_reshaped = true;
+ }
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
@@ -9002,6 +9021,14 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ /*
+ * We call md_cluster_ops->update_size here because sync_size could
+ * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
+ * so it is time to update size across cluster.
+ */
+ if (mddev_is_clustered(mddev) && is_reshaped
+ && !test_bit(MD_CLOSING, &mddev->flags))
+ md_cluster_ops->update_size(mddev, old_dev_sectors);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9201,8 +9228,12 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
if (role != rdev2->raid_disk) {
- /* got activated */
- if (rdev2->raid_disk == -1 && role != 0xffff) {
+ /*
+ * got activated except reshape is happening.
+ */
+ if (rdev2->raid_disk == -1 && role != 0xffff &&
+ !(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
@@ -9228,6 +9259,30 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ /*
+ * Since mddev->delta_disks has already updated in update_raid_disks,
+ * so it is time to check reshape.
+ */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /*
+ * reshape is happening in the remote node, we need to
+ * update reshape_position and call start_reshape.
+ */
+ mddev->reshape_position = sb->reshape_position;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ if (mddev->pers->start_reshape)
+ mddev->pers->start_reshape(mddev);
+ } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector &&
+ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /* reshape is just done in another node. */
+ mddev->reshape_position = MaxSector;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ }
+
/* Finally set the event to be up to date */
mddev->events = le64_to_cpu(sb->events);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8afd6bfdbfb9..c52afb52c776 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -557,6 +557,7 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
+ void (*update_reshape_pos) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e990246225e..1d54109071cc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d6f7978b4449..b98e746e7fc4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <linux/raid/md_p.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
@@ -1808,6 +1809,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->geo.raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
@@ -3079,6 +3081,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
+ int need_recover = 0;
+ int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3086,11 +3090,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mrdev = rcu_dereference(mirror->rdev);
mreplace = rcu_dereference(mirror->replacement);
- if ((mrdev == NULL ||
- test_bit(Faulty, &mrdev->flags) ||
- test_bit(In_sync, &mrdev->flags)) &&
- (mreplace == NULL ||
- test_bit(Faulty, &mreplace->flags))) {
+ if (mrdev != NULL &&
+ !test_bit(Faulty, &mrdev->flags) &&
+ !test_bit(In_sync, &mrdev->flags))
+ need_recover = 1;
+ if (mreplace != NULL &&
+ !test_bit(Faulty, &mreplace->flags))
+ need_replace = 1;
+
+ if (!need_recover && !need_replace) {
rcu_read_unlock();
continue;
}
@@ -3213,7 +3221,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- if (!test_bit(In_sync, &mrdev->flags)) {
+ if (need_recover) {
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
@@ -3230,16 +3238,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- /* Note: if mreplace != NULL, then bio
+ /* Note: if need_replace, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
- * So the second test here is pointless.
- * But it keeps semantic-checkers happy, and
- * this comment keeps human reviewers
- * happy.
*/
- if (mreplace == NULL || bio == NULL ||
- test_bit(Faulty, &mreplace->flags))
+ if (!need_replace)
break;
bio->bi_next = biolist;
biolist = bio;
@@ -4286,12 +4289,46 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
- ret = md_bitmap_resize(mddev->bitmap,
- raid10_size(mddev, 0, conf->geo.raid_disks),
- 0, 0);
+ struct mdp_superblock_1 *sb = NULL;
+ sector_t oldsize, newsize;
+
+ oldsize = raid10_size(mddev, 0, 0);
+ newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
+
+ if (!mddev_is_clustered(mddev)) {
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ if (ret)
+ goto abort;
+ else
+ goto out;
+ }
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk > -1 &&
+ !test_bit(Faulty, &rdev->flags))
+ sb = page_address(rdev->sb_page);
+ }
+
+ /*
+ * some node is already performing reshape, and no need to
+ * call md_bitmap_resize again since it should be called when
+ * receiving BITMAP_RESIZE msg
+ */
+ if ((sb && (le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
+ goto out;
+
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret)
goto abort;
+
+ ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
+ if (ret) {
+ md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
+ goto abort;
+ }
}
+out:
if (mddev->delta_disks > 0) {
rdev_for_each(rdev, mddev)
if (rdev->raid_disk < 0 &&
@@ -4568,6 +4605,32 @@ read_more:
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
+ /*
+ * Broadcast RESYNC message to other nodes, so all nodes would not
+ * write to the region to avoid conflict.
+ */
+ if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
+ struct mdp_superblock_1 *sb = NULL;
+ int sb_reshape_pos = 0;
+
+ conf->cluster_sync_low = sector_nr;
+ conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
+ sb = page_address(rdev->sb_page);
+ if (sb) {
+ sb_reshape_pos = le64_to_cpu(sb->reshape_position);
+ /*
+ * Set cluster_sync_low again if next address for array
+ * reshape is less than cluster_sync_low. Since we can't
+ * update cluster_sync_low until it has finished reshape.
+ */
+ if (sb_reshape_pos < conf->cluster_sync_low)
+ conf->cluster_sync_low = sb_reshape_pos;
+ }
+
+ md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+
/* Now find the locations in the new layout */
__raid10_find_phys(&conf->geo, r10_bio);
@@ -4719,6 +4782,19 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
+static void raid10_update_reshape_pos(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t lo, hi;
+
+ md_cluster_ops->resync_info_get(mddev, &lo, &hi);
+ if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
+ || mddev->reshape_position == MaxSector)
+ conf->reshape_progress = mddev->reshape_position;
+ else
+ WARN_ON_ONCE(1);
+}
+
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{
@@ -4887,6 +4963,7 @@ static struct md_personality raid10_personality =
.check_reshape = raid10_check_reshape,
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
+ .update_reshape_pos = raid10_update_reshape_pos,
.congested = raid10_congested,
};
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e6e925add700..ec3a5ef7fee0 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3151,8 +3151,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
- rcu_assign_pointer(conf->log, NULL);
- md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_exit(&log->meta_pool);
out_mempool:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4e98f47865d..4990f0319f6c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2681,6 +2681,18 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
pr_debug("raid456: error called\n");
spin_lock_irqsave(&conf->device_lock, flags);
+
+ if (test_bit(In_sync, &rdev->flags) &&
+ mddev->degraded == conf->max_degraded) {
+ /*
+ * Don't allow to achieve failed state
+ * Don't try to recover this device
+ */
+ conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
+
set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 2e30de9c694a..57a6bb1fd3c9 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev,
u32 val;
int rc, templ_major, templ_minor, len;
- pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx);
+ pci_write_config_byte(dev,
+ fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
+ afu_idx);
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d96a84a62d78..0cc911f928b1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -119,7 +119,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
-#include <net/busy_poll.h>
#include <linux/clk.h>
#include <linux/if_ether.h>
#include <linux/net_tstamp.h>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5a727d4729da..686899d7e555 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -27,7 +27,6 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
-#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 35564a8a48f9..a6cbaca37e94 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -341,7 +341,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
if (!compat)
return NULL;
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8f5bf9166c11..1d86b4d5645a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1684,7 +1684,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
- padlen = ETH_FCS_LEN - tailroom;
+ padlen = ETH_FCS_LEN;
} else {
/* Add room for FCS. */
padlen += ETH_FCS_LEN;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index f7e363b90fe0..dca6f2326c26 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -859,10 +859,12 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
reset_level = HNAE3_FUNC_RESET;
}
- err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst3, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
}
/* clear PPP INT */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 740ea58ba938..aef3c89ee79c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
-#include <net/busy_poll.h>
#include <linux/bpf_trace.h>
#include <net/xdp.h>
#include "i40e.h"
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index edc349f49748..fb9bfad96daf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
-#include <net/busy_poll.h>
#include "iavf.h"
#include "iavf_trace.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index c52f450f2c0d..8cd6a2401fd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -43,6 +43,23 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_dev_onetime_setup - Temporary HW/FW workarounds
+ * @hw: pointer to the HW structure
+ *
+ * This function provides temporary workarounds for certain issues
+ * that are expected to be fixed in the HW/FW.
+ */
+void ice_dev_onetime_setup(struct ice_hw *hw)
+{
+ /* configure Rx - set non pxe mode */
+ wr32(hw, GLLAN_RCTL_0, 0x1);
+
+#define MBX_PF_VT_PFALLOC 0x00231E80
+ /* set VFs per PF */
+ wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -218,7 +235,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+static enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -740,6 +757,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
+ ice_dev_onetime_setup(hw);
+
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -1531,9 +1550,7 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
- *cap_count =
- DIV_ROUND_UP(le16_to_cpu(desc.datalen),
- sizeof(struct ice_aqc_list_caps_elem));
+ *cap_count = le32_to_cpu(cmd->count);
return status;
}
@@ -1988,33 +2005,6 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
- * ice_aq_set_event_mask
- * @hw: pointer to the hw struct
- * @port_num: port number of the physical function
- * @mask: event mask to be set
- * @cd: pointer to command details structure or NULL
- *
- * Set event mask (0x0613)
- */
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.set_event_mask;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
-
- cmd->lport_num = port_num;
-
- cmd->event_mask = cpu_to_le16(mask);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 1900681289a4..cf760c24a6aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -34,6 +34,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
+
+void ice_dev_onetime_setup(struct ice_hw *hw);
+
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
@@ -83,12 +86,6 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
-ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
- struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
- struct ice_sq_cd *cd);
-enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 437f832fd7c4..0038a4109c99 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -19,11 +19,10 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
- *
*/
#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x00
-#define EXP_FW_API_VER_MINOR 0x01
+#define EXP_FW_API_VER_MAJOR 0x01
+#define EXP_FW_API_VER_MINOR 0x03
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a6f0a5c0c305..f8d5c661d0ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,10 +6,10 @@
/* Device IDs */
/* Intel(R) Ethernet Controller E810-C for backplane */
-#define ICE_DEV_ID_C810_BACKPLANE 0x1591
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
-#define ICE_DEV_ID_C810_QSFP 0x1592
+#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
-#define ICE_DEV_ID_C810_SFP 0x1593
+#define ICE_DEV_ID_E810C_SFP 0x1593
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a6679a9bfd3a..5fdea6ec7675 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -157,6 +157,13 @@
#define VPINT_ALLOC_LAST_S 12
#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_VALID_M BIT(31)
+#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4))
+#define VPINT_ALLOC_PCI_FIRST_S 0
+#define VPINT_ALLOC_PCI_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_PCI_LAST_S 12
+#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047
@@ -320,6 +327,7 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 49f1940772ed..5bacad01f0c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -433,7 +433,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
* @irq: interrupt number
* @data: pointer to a q_vector
*/
-irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
@@ -2529,6 +2529,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
+ ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 677db40338f5..3831b4f0960a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -73,5 +73,4 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8f61b375e768..05993451147a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -457,35 +457,6 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/**
- * ice_watchdog_subtask - periodic tasks not using event driven scheduling
- * @pf: board private structure
- */
-static void ice_watchdog_subtask(struct ice_pf *pf)
-{
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__ICE_DOWN, pf->state) ||
- test_bit(__ICE_CFG_BUSY, pf->state))
- return;
-
- /* make sure we don't do these things too often */
- if (time_before(jiffies,
- pf->serv_tmr_prev + pf->serv_tmr_period))
- return;
-
- pf->serv_tmr_prev = jiffies;
-
- /* Update the stats for active netdevs so the network stack
- * can look at updated numbers whenever it cares to
- */
- ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- ice_update_vsi_stats(pf->vsi[i]);
-}
-
-/**
* ice_print_link_msg - print link up or down message
* @vsi: the VSI whose link status is being queried
* @isup: boolean for if the link is now up or down
@@ -555,36 +526,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_init_link_events - enable/initialize link events
- * @pi: pointer to the port_info instance
- *
- * Returns -EIO on failure, 0 on success
- */
-static int ice_init_link_events(struct ice_port_info *pi)
-{
- u16 mask;
-
- mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
- ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
-
- if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
- pi->lport);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
* ice_vsi_link_event - update the vsi's netdev
* @vsi: the vsi on which the link event occurred
* @link_up: whether or not the vsi needs to be set up or down
@@ -671,27 +612,35 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_handle_link_event - handle link event via ARQ
- * @pf: pf that the link event is associated with
- *
- * Return -EINVAL if port_info is null
- * Return status on succes
+ * ice_watchdog_subtask - periodic tasks not using event driven scheduling
+ * @pf: board private structure
*/
-static int ice_handle_link_event(struct ice_pf *pf)
+static void ice_watchdog_subtask(struct ice_pf *pf)
{
- struct ice_port_info *port_info;
- int status;
+ int i;
- port_info = pf->hw.port_info;
- if (!port_info)
- return -EINVAL;
+ /* if interface is down do nothing */
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ test_bit(__ICE_CFG_BUSY, pf->state))
+ return;
- status = ice_link_event(pf, port_info);
- if (status)
- dev_dbg(&pf->pdev->dev,
- "Could not process link event, error %d\n", status);
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies,
+ pf->serv_tmr_prev + pf->serv_tmr_period))
+ return;
- return status;
+ pf->serv_tmr_prev = jiffies;
+
+ if (ice_link_event(pf, pf->hw.port_info))
+ dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ ice_update_pf_stats(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ ice_update_vsi_stats(pf->vsi[i]);
}
/**
@@ -797,11 +746,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
- case ice_aqc_opc_get_link_status:
- if (ice_handle_link_event(pf))
- dev_err(&pf->pdev->dev,
- "Could not handle link event\n");
- break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -2207,12 +2151,6 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- err = ice_init_link_events(pf->hw.port_info);
- if (err) {
- dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
- }
-
return 0;
err_alloc_sw_unroll:
@@ -2271,9 +2209,9 @@ static void ice_remove(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
/* required last entry */
{ 0, }
};
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index c25e486706f3..45f10f8f01dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -173,6 +173,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
vsi = pf->vsi[vf->lan_vsi_idx];
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
last = first + pf->num_vf_msix - 1;
@@ -519,6 +520,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+ reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
+ ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
for (v = first; v <= last; v++) {
reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
@@ -528,10 +533,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
/* VF Tx queues allocation */
if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
- VPLAN_TXQ_MAPENA_TX_ENA_M);
/* set the VF PF Tx queue range
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
@@ -546,10 +552,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
"Scattered mode for VF Tx queues is not yet implemented\n");
}
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
/* VF Rx queues allocation */
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
- VPLAN_RXQ_MAPENA_RX_ENA_M);
/* set the VF PF Rx queue range
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7a7679e7be84..ec1b87cc4410 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -30,7 +30,6 @@
#include "ixgbe_ipsec.h"
#include <net/xdp.h>
-#include <net/busy_poll.h>
/* common prefix used by pr_<> macros */
#undef pr_fmt
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 8890c95831ca..a5ab7eff2301 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -573,7 +573,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
sizeof(struct nix_cq_ctx_s));
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(&rsp->rss, ctx,
- sizeof(struct nix_cq_ctx_s));
+ sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
@@ -1294,7 +1294,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
/* Add a new one to the list, at the tail */
- mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
if (!mce)
return -ENOMEM;
mce->idx = idx;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fe49384eba48..b744cd49a785 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -39,7 +39,6 @@
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
-#include <net/busy_poll.h>
#include <net/vxlan.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a1aeeb8094c3..5a6d0919533d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -31,7 +31,6 @@
*
*/
-#include <net/busy_poll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mlx4/cq.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 2f7fb8de6967..94224c22ecc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 16bd3f44dbe8..cf1d49149cc8 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -5,7 +5,6 @@
config NET_VENDOR_MICROCHIP
bool "Microchip devices"
default y
- depends on SPI
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b2d2ec8c11e2..5f384f73007d 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -70,7 +70,6 @@
#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
-#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 006b0aa8cec3..1fd01688d37b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4161,10 +4161,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
+ struct phy_device *phydev;
+
+ if (!__rtl8169_get_wol(tp))
return false;
- phy_speed_down(tp->dev->phydev, false);
+ /* phydev may not be attached to netdevice */
+ phydev = mdiobus_get_phy(tp->mii_bus, 0);
+
+ phy_speed_down(phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f9a61f90cfbc..0f660af01a4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
return -ENODEV;
}
- mdio_internal = of_find_compatible_node(mdio_mux, NULL,
+ mdio_internal = of_get_compatible_child(mdio_mux,
"allwinner,sun8i-h3-mdio-internal");
+ of_node_put(mdio_mux);
if (!mdio_internal) {
dev_err(priv->device, "Cannot get internal_mdio node\n");
return -ENODEV;
@@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return ret;
+ }
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return 0;
}
+
+ of_node_put(mdio_internal);
return -ENODEV;
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index e1225545362d..d7636ff03bc7 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -329,7 +329,7 @@ struct phy_driver genphy_10g_driver = {
.name = "Generic 10G PHY",
.soft_reset = gen10g_no_soft_reset,
.config_init = gen10g_config_init,
- .features = 0,
+ .features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = gen10g_read_status,
.suspend = gen10g_suspend,
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 91162f8e0366..9a22056e8d9e 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
struct device_node *matched_node;
int ret;
- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
+ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
if (!matched_node) {
- matched_node = of_find_compatible_node(node, NULL,
- "mrvl,nfc-uart");
+ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
if (!matched_node)
return -ENODEV;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 74eaedd5b860..13ebb16be64e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name)
return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
}
+EXPORT_SYMBOL(of_node_name_eq);
bool of_node_name_prefix(const struct device_node *np, const char *prefix)
{
@@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix)
return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
}
+EXPORT_SYMBOL(of_node_name_prefix);
int of_n_addr_cells(struct device_node *np)
{
@@ -330,6 +332,8 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
+ return true;
if (!cell || !ac)
return false;
prop_len /= sizeof(*cell) * ac;
@@ -390,7 +394,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun;
- for_each_node_by_type(cpun, "cpu") {
+ for_each_of_cpu_node(cpun) {
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
@@ -745,6 +749,45 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_next_cpu_node - Iterate on cpu nodes
+ * @prev: previous child of the /cpus node, or NULL to get first
+ *
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
+ * on it when done. Returns NULL when prev is the last child. Decrements
+ * the refcount of prev.
+ */
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
+{
+ struct device_node *next = NULL;
+ unsigned long flags;
+ struct device_node *node;
+
+ if (!prev)
+ node = of_find_node_by_path("/cpus");
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ if (prev)
+ next = prev->sibling;
+ else if (node) {
+ next = node->child;
+ of_node_put(node);
+ }
+ for (; next; next = next->sibling) {
+ if (!(of_node_name_eq(next, "cpu") ||
+ (next->type && !of_node_cmp(next->type, "cpu"))))
+ continue;
+ if (!__of_device_is_available(next))
+ continue;
+ if (of_node_get(next))
+ break;
+ }
+ of_node_put(prev);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
+}
+EXPORT_SYMBOL(of_get_next_cpu_node);
+
+/**
* of_get_compatible_child - Find compatible child node
* @parent: parent node
* @compatible: compatible string
@@ -2013,7 +2056,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
*/
- if (!strcmp(np->type, "cpu"))
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
for_each_child_of_node(np, child)
if (!strcmp(child->type, "cache"))
return child;
@@ -2045,3 +2088,105 @@ int of_find_last_cache_level(unsigned int cpu)
return cache_level;
}
+
+/**
+ * of_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @rid: device requester ID to map.
+ * @map_name: property name of the map to use.
+ * @map_mask_name: optional property name of the mask to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a device requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
+ * @id_out may be NULL if only the other is required. If @target points to
+ * a non-NULL device node pointer, only entries targeting that node will be
+ * matched; if it points to a NULL value, it will receive the device node of
+ * the first matching target phandle, with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+
+ if (!np || !map_name || (!target && !id_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *id_out = rid;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ if (map_mask_name)
+ of_property_read_u32(np, map_mask_name, &map_mask);
+
+ masked_rid = map_mask & rid;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np, map_name, map_name,
+ map_mask, rid_base);
+ return -EFAULT;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (id_out)
+ *id_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np, map_name, map_mask, rid_base, out_base,
+ rid_len, rid, masked_rid - rid_base + out_base);
+ return 0;
+ }
+
+ pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
+ np, map_name, rid, target && *target ? *target : NULL);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(of_map_rid);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c7fa5a9697c9..0f27fad9fe94 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -207,7 +207,8 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
return -ENODEV;
/* Name & Type */
- csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+ /* %p eats all alphanum characters, so %c must be used here */
+ csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
dev->of_node->type);
tsize = csize;
len -= csize;
@@ -286,7 +287,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
if ((!dev) || (!dev->of_node))
return;
- add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
+ add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 02ad93a304a4..e1f6f392a4c0 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -588,8 +587,8 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
- if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
- "msi-map-mask", np, &rid_out))
+ if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
+ "msi-map-mask", np, &rid_out))
break;
return rid_out;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e92391d6d1bd..5ad1342f5682 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
continue;
/* be noisy to encourage people to set reg property */
- dev_info(&mdio->dev, "scan phy %s at address %i\n",
- child->name, addr);
+ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
+ child, addr);
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 27d9b4bba535..35c64a4295e0 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -24,18 +24,9 @@ static void __init of_numa_parse_cpu_nodes(void)
{
u32 nid;
int r;
- struct device_node *cpus;
- struct device_node *np = NULL;
-
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return;
-
- for_each_child_of_node(cpus, np) {
- /* Skip things that are not CPUs */
- if (of_node_cmp(np->type, "cpu") != 0)
- continue;
+ struct device_node *np;
+ for_each_of_cpu_node(np) {
r = of_property_read_u32(np, "numa-node-id", &nid);
if (r)
continue;
@@ -46,8 +37,6 @@ static void __init of_numa_parse_cpu_nodes(void)
else
node_set(nid, numa_nodes_parsed);
}
-
- of_node_put(cpus);
}
static int __init of_numa_parse_memory_nodes(void)
@@ -163,8 +152,8 @@ int of_node_to_nid(struct device_node *device)
np = of_get_next_parent(np);
}
if (np && r)
- pr_warn("Invalid \"numa-node-id\" property in node %s\n",
- np->name);
+ pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
+ np);
of_node_put(np);
/*
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 216175d11d3d..5d1567025358 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -27,6 +27,14 @@ struct alias_prop {
char stem[0];
};
+#if defined(CONFIG_SPARC)
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
+#else
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#endif
+
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
extern struct mutex of_mutex;
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index eda57ef12fd0..42b1f73ac5f6 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -425,8 +425,8 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
for_each_child_of_node(overlay_node, child) {
ret = add_changeset_node(ovcs, target_node, child);
if (ret) {
- pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
- target_node, child->name, ret);
+ pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
+ target_node, child, ret);
of_node_put(child);
return ret;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6c59673933e9..04ad312fd85b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -91,8 +91,8 @@ static void of_device_make_bus_id(struct device *dev)
*/
reg = of_get_property(node, "reg", NULL);
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
- dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
- (unsigned long long)addr, node->name,
+ dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+ (unsigned long long)addr, node,
dev_name(dev));
return;
}
@@ -142,8 +142,8 @@ struct platform_device *of_device_alloc(struct device_node *np,
WARN_ON(rc);
}
if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
- pr_debug("not all legacy IRQ resources mapped for %s\n",
- np->name);
+ pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
+ np);
}
dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/unittest-data/overlay_15.dts b/drivers/of/unittest-data/overlay_15.dts
index b98f2514df4b..5728490474f6 100644
--- a/drivers/of/unittest-data/overlay_15.dts
+++ b/drivers/of/unittest-data/overlay_15.dts
@@ -20,8 +20,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 25cf397b8f6b..4ea024d908ee 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -103,8 +103,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 41b49716ac75..a3a6866765f2 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -212,8 +212,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np)
for_each_child_of_node(np, child) {
if (child->parent != np) {
- pr_err("Child node %s links to wrong parent %s\n",
- child->name, np->name);
+ pr_err("Child node %pOFn links to wrong parent %pOFn\n",
+ child, np);
rc = -EINVAL;
goto put_child;
}
@@ -299,6 +299,10 @@ static void __init of_unittest_printf(void)
of_unittest_printf_one(np, "%pOF", full_name);
of_unittest_printf_one(np, "%pOFf", full_name);
+ of_unittest_printf_one(np, "%pOFn", "dev");
+ of_unittest_printf_one(np, "%2pOFn", "dev");
+ of_unittest_printf_one(np, "%5pOFn", " dev");
+ of_unittest_printf_one(np, "%pOFnc", "dev:test-sub-device");
of_unittest_printf_one(np, "%pOFp", phandle_str);
of_unittest_printf_one(np, "%pOFP", "dev@100");
of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC");
@@ -1046,16 +1050,16 @@ static void __init of_unittest_platform_populate(void)
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(of_find_device_by_node(grandchild),
- "Could not create device for node '%s'\n",
- grandchild->name);
+ "Could not create device for node '%pOFn'\n",
+ grandchild);
}
of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(!of_find_device_by_node(grandchild),
- "device didn't get destroyed '%s'\n",
- grandchild->name);
+ "device didn't get destroyed '%pOFn'\n",
+ grandchild);
}
platform_device_unregister(test_bus);
@@ -2357,11 +2361,14 @@ static __init void of_unittest_overlay_high_level(void)
}
}
- for (np = overlay_base_root->child; np; np = np->sibling) {
- if (of_get_child_by_name(of_root, np->name)) {
- unittest(0, "illegal node name in overlay_base %s",
- np->name);
- return;
+ for_each_child_of_node(overlay_base_root, np) {
+ struct device_node *base_child;
+ for_each_child_of_node(of_root, base_child) {
+ if (!strcmp(np->full_name, base_child->full_name)) {
+ unittest(0, "illegal node name in overlay_base %pOFn",
+ np);
+ return;
+ }
}
}
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index ee54f5bacad1..6758fd7c382e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -746,7 +746,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
pe = edev ? edev->pe : NULL;
if (pe) {
eeh_serialize_lock(&flags);
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 1836b8ddf292..4c4217d0c3f1 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -355,107 +355,6 @@ failed:
EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
-/**
- * of_pci_map_rid - Translate a requester ID through a downstream mapping.
- * @np: root complex device node.
- * @rid: PCI requester ID to map.
- * @map_name: property name of the map to use.
- * @map_mask_name: optional property name of the mask to use.
- * @target: optional pointer to a target device node.
- * @id_out: optional pointer to receive the translated ID.
- *
- * Given a PCI requester ID, look up the appropriate implementation-defined
- * platform ID and/or the target device which receives transactions on that
- * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
- * @id_out may be NULL if only the other is required. If @target points to
- * a non-NULL device node pointer, only entries targeting that node will be
- * matched; if it points to a NULL value, it will receive the device node of
- * the first matching target phandle, with a reference held.
- *
- * Return: 0 on success or a standard error code on failure.
- */
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- u32 map_mask, masked_rid;
- int map_len;
- const __be32 *map = NULL;
-
- if (!np || !map_name || (!target && !id_out))
- return -EINVAL;
-
- map = of_get_property(np, map_name, &map_len);
- if (!map) {
- if (target)
- return -ENODEV;
- /* Otherwise, no map implies no translation */
- *id_out = rid;
- return 0;
- }
-
- if (!map_len || map_len % (4 * sizeof(*map))) {
- pr_err("%pOF: Error: Bad %s length: %d\n", np,
- map_name, map_len);
- return -EINVAL;
- }
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "{iommu,msi}-map-mask" property.
- * If of_property_read_u32() fails, the default is used.
- */
- if (map_mask_name)
- of_property_read_u32(np, map_mask_name, &map_mask);
-
- masked_rid = map_mask & rid;
- for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
- struct device_node *phandle_node;
- u32 rid_base = be32_to_cpup(map + 0);
- u32 phandle = be32_to_cpup(map + 1);
- u32 out_base = be32_to_cpup(map + 2);
- u32 rid_len = be32_to_cpup(map + 3);
-
- if (rid_base & ~map_mask) {
- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
- np, map_name, map_name,
- map_mask, rid_base);
- return -EFAULT;
- }
-
- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
- continue;
-
- phandle_node = of_find_node_by_phandle(phandle);
- if (!phandle_node)
- return -ENODEV;
-
- if (target) {
- if (*target)
- of_node_put(phandle_node);
- else
- *target = phandle_node;
-
- if (*target != phandle_node)
- continue;
- }
-
- if (id_out)
- *id_out = masked_rid - rid_base + out_base;
-
- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
- np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, masked_rid - rid_base + out_base);
- return 0;
- }
-
- pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
- np, map_name, rid, target && *target ? *target : NULL);
- return -EFAULT;
-}
-
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
* of_irq_parse_pci - Resolve the interrupt for a PCI device
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 9671ded549f0..b31abe35ed2c 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
+ pgprot_noncached(PAGE_KERNEL)) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 80582c8f98fa..0e202d4273fb 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -1022,12 +1022,13 @@ static int twl4030_bci_probe(struct platform_device *pdev)
if (bci->dev->of_node) {
struct device_node *phynode;
- phynode = of_find_compatible_node(bci->dev->of_node->parent,
- NULL, "ti,twl4030-usb");
+ phynode = of_get_compatible_child(bci->dev->of_node->parent,
+ "ti,twl4030-usb");
if (phynode) {
bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
bci->transceiver = devm_usb_get_phy_by_node(
bci->dev, phynode, &bci->usb_nb);
+ of_node_put(phynode);
if (IS_ERR(bci->transceiver)) {
ret = PTR_ERR(bci->transceiver);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b762d0fd773c..3bb2b3351e35 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
- case REQ_OP_ZONE_REPORT:
- return sd_zbc_setup_report_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
@@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
+ .report_zones = sd_zbc_report_zones,
.pr_ops = &sd_pr_ops,
};
@@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
- case REQ_OP_ZONE_REPORT:
- if (!result) {
- good_bytes = scsi_bufflen(SCpnt)
- - scsi_get_resid(SCpnt);
- scsi_set_resid(SCpnt, 0);
- } else {
- good_bytes = 0;
- scsi_set_resid(SCpnt, blk_rq_bytes(req));
- }
- break;
default:
/*
* In case of bogus fw or device, we could end up having
@@ -3425,8 +3414,6 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- sd_zbc_remove(sdkp);
-
free_opal_dev(sdkp->opal_dev);
blk_register_region(devt, SD_MINORS, NULL,
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a7d4f50b67d4..1d63f3a23ffb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -76,7 +76,6 @@ struct scsi_disk {
#ifdef CONFIG_BLK_DEV_ZONED
u32 nr_zones;
u32 zone_blocks;
- u32 zone_shift;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
@@ -271,12 +270,13 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
+extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -286,15 +286,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
return 0;
}
-static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
-
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
-{
- return BLKPREP_INVALID;
-}
-
static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
@@ -304,6 +297,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
unsigned int good_bytes,
struct scsi_sense_hdr *sshdr) {}
+#define sd_zbc_report_zones NULL
+
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 412c1787dcd9..e06c48c866e4 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -62,16 +62,22 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
}
/**
- * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
* @sdkp: The target disk
* @buf: Buffer to use for the reply
* @buflen: the buffer size
* @lba: Start LBA of the report
+ * @partial: Do partial report
*
* For internal use during device validation.
+ * Using partial=true can significantly speed up execution of a report zones
+ * command because the disk does not have to count all possible report matching
+ * zones and will only report the count of zones fitting in the command reply
+ * buffer.
*/
-static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, sector_t lba)
+static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen, sector_t lba,
+ bool partial)
{
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout;
@@ -85,6 +91,8 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
cmd[1] = ZI_REPORT_ZONES;
put_unaligned_be64(lba, &cmd[2]);
put_unaligned_be32(buflen, &cmd[10]);
+ if (partial)
+ cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
memset(buf, 0, buflen);
result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
@@ -110,108 +118,56 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
- * @cmd: The command to setup
+ * sd_zbc_report_zones - Disk report zones operation.
+ * @disk: The target disk
+ * @sector: Start 512B sector of the report
+ * @zones: Array of zone descriptors
+ * @nr_zones: Number of descriptors in the array
+ * @gfp_mask: Memory allocation mask
*
- * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+ * Execute a report zones command on the target disk.
*/
-int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t lba, sector = blk_rq_pos(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
- int ret;
-
- WARN_ON(nr_bytes == 0);
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ unsigned int i, buflen, nrz = *nr_zones;
+ unsigned char *buf;
+ size_t offset = 0;
+ int ret = 0;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
- return BLKPREP_KILL;
-
- ret = scsi_init_io(cmd);
- if (ret != BLKPREP_OK)
- return ret;
-
- cmd->cmd_len = 16;
- memset(cmd->cmnd, 0, cmd->cmd_len);
- cmd->cmnd[0] = ZBC_IN;
- cmd->cmnd[1] = ZI_REPORT_ZONES;
- lba = sectors_to_logical(sdkp->device, sector);
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
- /* Do partial report for speeding things up */
- cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
-
- cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->sdb.length = nr_bytes;
- cmd->transfersize = sdkp->device->sector_size;
- cmd->allowed = 0;
-
- return BLKPREP_OK;
-}
-
-/**
- * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
- * @scmd: The completed report zones command
- * @good_bytes: reply size in bytes
- *
- * Convert all reported zone descriptors to struct blk_zone. The conversion
- * is done in-place, directly in the request specified sg buffer.
- */
-static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
- unsigned int good_bytes)
-{
- struct request *rq = scmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct sg_mapping_iter miter;
- struct blk_zone_report_hdr hdr;
- struct blk_zone zone;
- unsigned int offset, bytes = 0;
- unsigned long flags;
- u8 *buf;
-
- if (good_bytes < 64)
- return;
+ return -EOPNOTSUPP;
- memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
-
- sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
-
- local_irq_save(flags);
- while (sg_miter_next(&miter) && bytes < good_bytes) {
+ /*
+ * Get a reply buffer for the number of requested zones plus a header.
+ * For ATA, buffers must be aligned to 512B.
+ */
+ buflen = roundup((nrz + 1) * 64, 512);
+ buf = kmalloc(buflen, gfp_mask);
+ if (!buf)
+ return -ENOMEM;
- buf = miter.addr;
- offset = 0;
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out_free_buf;
- if (bytes == 0) {
- /* Set the report header */
- hdr.nr_zones = min_t(unsigned int,
- (good_bytes - 64) / 64,
- get_unaligned_be32(&buf[0]) / 64);
- memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
- offset += 64;
- bytes += 64;
- }
+ nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
+ for (i = 0; i < nrz; i++) {
+ offset += 64;
+ sd_zbc_parse_report(sdkp, buf + offset, zones);
+ zones++;
+ }
- /* Parse zone descriptors */
- while (offset < miter.length && hdr.nr_zones) {
- WARN_ON(offset > miter.length);
- buf = miter.addr + offset;
- sd_zbc_parse_report(sdkp, buf, &zone);
- memcpy(buf, &zone, sizeof(struct blk_zone));
- offset += 64;
- bytes += 64;
- hdr.nr_zones--;
- }
+ *nr_zones = nrz;
- if (!hdr.nr_zones)
- break;
+out_free_buf:
+ kfree(buf);
- }
- sg_miter_stop(&miter);
- local_irq_restore(flags);
+ return ret;
}
/**
@@ -294,30 +250,23 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
break;
-
- case REQ_OP_ZONE_REPORT:
-
- if (!result)
- sd_zbc_report_zones_complete(cmd, good_bytes);
- break;
-
}
}
/**
- * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
* @sdkp: Target disk
* @buf: Buffer where to store the VPD page data
*
- * Read VPD page B6.
+ * Read VPD page B6, get information and check that reads are unconstrained.
*/
-static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
- unsigned char *buf)
+static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+ unsigned char *buf)
{
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
sd_printk(KERN_NOTICE, sdkp,
- "Unconstrained-read check failed\n");
+ "Read zoned characteristics VPD page failed\n");
return -ENODEV;
}
@@ -335,43 +284,17 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
}
- return 0;
-}
-
-/**
- * sd_zbc_check_capacity - Check reported capacity.
- * @sdkp: Target disk
- * @buf: Buffer to use for commands
- *
- * ZBC drive may report only the capacity of the first conventional zones at
- * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
- * Check this here. If the disk reported only its conventional zones capacity,
- * get the total capacity by doing a report zones.
- */
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
-{
- sector_t lba;
- int ret;
-
- if (sdkp->rc_basis != 0)
- return 0;
-
- /* Do a report zone to get the maximum LBA to check capacity */
- ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
- if (ret)
- return ret;
-
- /* The max_lba field is the capacity of this device */
- lba = get_unaligned_be64(&buf[8]);
- if (lba + 1 == sdkp->capacity)
- return 0;
-
- if (sdkp->first_scan)
- sd_printk(KERN_WARNING, sdkp,
- "Changing capacity from %llu to max LBA+1 %llu\n",
- (unsigned long long)sdkp->capacity,
- (unsigned long long)lba + 1);
- sdkp->capacity = lba + 1;
+ /*
+ * Check for unconstrained reads: host-managed devices with
+ * constrained reads (drives failing read after write pointer)
+ * are not supported.
+ */
+ if (!sdkp->urswrz) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "constrained reads devices are not supported\n");
+ return -ENODEV;
+ }
return 0;
}
@@ -379,24 +302,27 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
#define SD_ZBC_BUF_SIZE 131072U
/**
- * sd_zbc_check_zone_size - Check the device zone sizes
+ * sd_zbc_check_zones - Check the device capacity and zone sizes
* @sdkp: Target disk
*
- * Check that all zones of the device are equal. The last zone can however
- * be smaller. The zone size must also be a power of two number of LBAs.
+ * Check that the device capacity as reported by READ CAPACITY matches the
+ * max_lba value (plus one)of the report zones command reply. Also check that
+ * all zones of the device have an equal size, only allowing the last zone of
+ * the disk to have a smaller size (runt zone). The zone size must also be a
+ * power of two.
*
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
{
u64 zone_blocks = 0;
- sector_t block = 0;
+ sector_t max_lba, block = 0;
unsigned char *buf;
unsigned char *rec;
unsigned int buf_len;
unsigned int list_length;
- s64 ret;
+ int ret;
u8 same;
/* Get a buffer */
@@ -404,11 +330,28 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
if (!buf)
return -ENOMEM;
- /* Do a report zone to get the same field */
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+ /* Do a report zone to get max_lba and the same field */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
if (ret)
goto out_free;
+ if (sdkp->rc_basis == 0) {
+ /* The max_lba field is the capacity of this device */
+ max_lba = get_unaligned_be64(&buf[8]);
+ if (sdkp->capacity != max_lba + 1) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_WARNING, sdkp,
+ "Changing capacity from %llu to max LBA+1 %llu\n",
+ (unsigned long long)sdkp->capacity,
+ (unsigned long long)max_lba + 1);
+ sdkp->capacity = max_lba + 1;
+ }
+ }
+
+ /*
+ * Check same field: for any value other than 0, we know that all zones
+ * have the same size.
+ */
same = buf[4] & 0x0f;
if (same > 0) {
rec = &buf[64];
@@ -445,8 +388,8 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
}
if (block < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf,
- SD_ZBC_BUF_SIZE, block);
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+ block, true);
if (ret)
goto out_free;
}
@@ -470,9 +413,10 @@ out:
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -ENODEV;
+ ret = -EFBIG;
} else {
- ret = zone_blocks;
+ *zblocks = zone_blocks;
+ ret = 0;
}
out_free:
@@ -481,191 +425,11 @@ out_free:
return ret;
}
-/**
- * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @nr_zones: Number of zones to allocate space for.
- * @numa_node: NUMA node to allocate the memory from.
- */
-static inline unsigned long *
-sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
-{
- return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
- GFP_KERNEL, numa_node);
-}
-
-/**
- * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
- * @sdkp: disk used
- * @buf: report reply buffer
- * @buflen: length of @buf
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @seq_zones_bitmap: bitmap of sequential zones to set
- *
- * Parse reported zone descriptors in @buf to identify sequential zones and
- * set the reported zone bit in @seq_zones_bitmap accordingly.
- * Since read-only and offline zones cannot be written, do not
- * mark them as sequential in the bitmap.
- * Return the LBA after the last zone reported.
- */
-static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, u32 zone_shift,
- unsigned long *seq_zones_bitmap)
-{
- sector_t lba, next_lba = sdkp->capacity;
- unsigned int buf_len, list_length;
- unsigned char *rec;
- u8 type, cond;
-
- list_length = get_unaligned_be32(&buf[0]) + 64;
- buf_len = min(list_length, buflen);
- rec = buf + 64;
-
- while (rec < buf + buf_len) {
- type = rec[0] & 0x0f;
- cond = (rec[1] >> 4) & 0xf;
- lba = get_unaligned_be64(&rec[16]);
- if (type != ZBC_ZONE_TYPE_CONV &&
- cond != ZBC_ZONE_COND_READONLY &&
- cond != ZBC_ZONE_COND_OFFLINE)
- set_bit(lba >> zone_shift, seq_zones_bitmap);
- next_lba = lba + get_unaligned_be64(&rec[8]);
- rec += 64;
- }
-
- return next_lba;
-}
-
-/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
- * @sdkp: target disk
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @nr_zones: number of zones to set up a seq zone bitmap for
- *
- * Allocate a zone bitmap and initialize it by identifying sequential zones.
- */
-static unsigned long *
-sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
- u32 nr_zones)
-{
- struct request_queue *q = sdkp->disk->queue;
- unsigned long *seq_zones_bitmap;
- sector_t lba = 0;
- unsigned char *buf;
- int ret = -ENOMEM;
-
- seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
- if (!seq_zones_bitmap)
- return ERR_PTR(-ENOMEM);
-
- buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- goto out;
-
- while (lba < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
- if (ret)
- goto out;
- lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
- zone_shift, seq_zones_bitmap);
- }
-
- if (lba != sdkp->capacity) {
- /* Something went wrong */
- ret = -EIO;
- }
-
-out:
- kfree(buf);
- if (ret) {
- kfree(seq_zones_bitmap);
- return ERR_PTR(ret);
- }
- return seq_zones_bitmap;
-}
-
-static void sd_zbc_cleanup(struct scsi_disk *sdkp)
-{
- struct request_queue *q = sdkp->disk->queue;
-
- kfree(q->seq_zones_bitmap);
- q->seq_zones_bitmap = NULL;
-
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
-
- q->nr_zones = 0;
-}
-
-static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
-{
- struct request_queue *q = sdkp->disk->queue;
- u32 zone_shift = ilog2(zone_blocks);
- u32 nr_zones;
- int ret;
-
- /* chunk_sectors indicates the zone size */
- blk_queue_chunk_sectors(q,
- logical_to_sectors(sdkp->device, zone_blocks));
- nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
-
- /*
- * Initialize the device request queue information if the number
- * of zones changed.
- */
- if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
- unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
- size_t zone_bitmap_size;
-
- if (nr_zones) {
- seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
- q->node);
- if (!seq_zones_wlock) {
- ret = -ENOMEM;
- goto err;
- }
-
- seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
- zone_shift, nr_zones);
- if (IS_ERR(seq_zones_bitmap)) {
- ret = PTR_ERR(seq_zones_bitmap);
- kfree(seq_zones_wlock);
- goto err;
- }
- }
- zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
- sizeof(unsigned long);
- blk_mq_freeze_queue(q);
- if (q->nr_zones != nr_zones) {
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = zone_shift;
- sdkp->nr_zones = nr_zones;
- q->nr_zones = nr_zones;
- swap(q->seq_zones_wlock, seq_zones_wlock);
- swap(q->seq_zones_bitmap, seq_zones_bitmap);
- } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size) != 0) {
- memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size);
- }
- blk_mq_unfreeze_queue(q);
- kfree(seq_zones_wlock);
- kfree(seq_zones_bitmap);
- }
-
- return 0;
-
-err:
- sd_zbc_cleanup(sdkp);
- return ret;
-}
-
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
- int64_t zone_blocks;
+ struct gendisk *disk = sdkp->disk;
+ unsigned int nr_zones;
+ u32 zone_blocks;
int ret;
if (!sd_is_zoned(sdkp))
@@ -675,26 +439,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
*/
return 0;
- /* Get zoned block device characteristics */
- ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
- if (ret)
- goto err;
-
- /*
- * Check for unconstrained reads: host-managed devices with
- * constrained reads (drives failing read after write pointer)
- * are not supported.
- */
- if (!sdkp->urswrz) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "constrained reads devices are not supported\n");
- ret = -ENODEV;
- goto err;
- }
-
- /* Check capacity */
- ret = sd_zbc_check_capacity(sdkp, buf);
+ /* Check zoned block device characteristics (unconstrained reads) */
+ ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
goto err;
@@ -702,33 +448,44 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- zone_blocks = sd_zbc_check_zone_size(sdkp);
- ret = -EFBIG;
- if (zone_blocks != (u32)zone_blocks)
- goto err;
- ret = zone_blocks;
- if (ret < 0)
+ ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ if (ret != 0)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- ret = sd_zbc_setup(sdkp, zone_blocks);
- if (ret)
- goto err;
+ blk_queue_chunk_sectors(sdkp->disk->queue,
+ logical_to_sectors(sdkp->device, zone_blocks));
+ nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ /*
+ * If something changed, revalidate the disk zone bitmaps once we have
+ * the capacity, that is on the second revalidate execution during disk
+ * scan and always during normal revalidate.
+ */
+ if (sdkp->first_scan)
+ return 0;
+ if (sdkp->zone_blocks != zone_blocks ||
+ sdkp->nr_zones != nr_zones ||
+ disk->queue->nr_zones != nr_zones) {
+ ret = blk_revalidate_disk_zones(disk);
+ if (ret != 0)
+ goto err;
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
+ }
return 0;
err:
sdkp->capacity = 0;
- sd_zbc_cleanup(sdkp);
return ret;
}
-void sd_zbc_remove(struct scsi_disk *sdkp)
-{
- sd_zbc_cleanup(sdkp);
-}
-
void sd_zbc_print_zones(struct scsi_disk *sdkp)
{
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 5abb08ffb74d..ffc5311c0ed8 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -383,7 +383,7 @@ int __init dove_init_pmu(void)
domains_node = of_get_child_by_name(np_pmu, "domains");
if (!domains_node) {
- pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
return 0;
}
@@ -396,7 +396,7 @@ int __init dove_init_pmu(void)
pmu->pmc_base = of_iomap(pmu->of_node, 0);
pmu->pmu_base = of_iomap(pmu->of_node, 1);
if (!pmu->pmc_base || !pmu->pmu_base) {
- pr_err("%s: failed to map PMU\n", np_pmu->name);
+ pr_err("%pOFn: failed to map PMU\n", np_pmu);
iounmap(pmu->pmu_base);
iounmap(pmu->pmc_base);
kfree(pmu);
@@ -414,7 +414,7 @@ int __init dove_init_pmu(void)
break;
domain->pmu = pmu;
- domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
if (!domain->base.name) {
kfree(domain);
break;
@@ -444,7 +444,7 @@ int __init dove_init_pmu(void)
/* Loss of the interrupt controller is not a fatal error. */
parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
if (!parent_irq) {
- pr_err("%s: no interrupt specified\n", np_pmu->name);
+ pr_err("%pOFn: no interrupt specified\n", np_pmu);
} else {
ret = dove_init_pmu_irq(pmu, parent_irq);
if (ret)
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 6fd5fef5f39b..109b38de3176 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -419,7 +419,7 @@ static size_t fqd_sz, pfdr_sz;
static int zero_priv_mem(phys_addr_t addr, size_t sz)
{
/* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ void __iomem *tmpp = ioremap_cache(addr, sz);
if (!tmpp)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f744c214f680..f78c34647ca2 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -131,7 +131,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
return -EINVAL;
}
@@ -153,7 +153,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
ret = -EINVAL;
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
goto err_miss_siram_property;
}
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 57af8a537332..4bda793ba6ae 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -219,7 +219,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
adev->domain_id = id->domain_id;
adev->version = id->svc_version;
if (np)
- strncpy(adev->name, np->name, APR_NAME_SIZE);
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
else
strncpy(adev->name, id->name, APR_NAME_SIZE);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 6dff8682155f..6f86a726bb45 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -392,21 +392,21 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = of_property_read_u32(node, "reg", &id);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- node->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ node, error);
return -EINVAL;
}
if (id >= pmu->info->num_domains) {
- dev_err(pmu->dev, "%s: invalid domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: invalid domain id %d\n",
+ node, id);
return -EINVAL;
}
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
- dev_err(pmu->dev, "%s: undefined domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: undefined domain id %d\n",
+ node, id);
return -EINVAL;
}
@@ -424,8 +424,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (!pd->clks)
return -ENOMEM;
} else {
- dev_dbg(pmu->dev, "%s: doesn't have clocks: %d\n",
- node->name, pd->num_clks);
+ dev_dbg(pmu->dev, "%pOFn: doesn't have clocks: %d\n",
+ node, pd->num_clks);
pd->num_clks = 0;
}
@@ -434,8 +434,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (IS_ERR(pd->clks[i].clk)) {
error = PTR_ERR(pd->clks[i].clk);
dev_err(pmu->dev,
- "%s: failed to get clk at index %d: %d\n",
- node->name, i, error);
+ "%pOFn: failed to get clk at index %d: %d\n",
+ node, i, error);
return error;
}
}
@@ -486,8 +486,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = rockchip_pd_power(pd, true);
if (error) {
dev_err(pmu->dev,
- "failed to power on domain '%s': %d\n",
- node->name, error);
+ "failed to power on domain '%pOFn': %d\n",
+ node, error);
goto err_unprepare_clocks;
}
@@ -575,24 +575,24 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
error = of_property_read_u32(parent, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- parent->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ parent, error);
goto err_out;
}
parent_domain = pmu->genpd_data.domains[idx];
error = rockchip_pm_add_one_domain(pmu, np);
if (error) {
- dev_err(pmu->dev, "failed to handle node %s: %d\n",
- np->name, error);
+ dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
+ np, error);
goto err_out;
}
error = of_property_read_u32(np, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- np->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ np, error);
goto err_out;
}
child_domain = pmu->genpd_data.domains[idx];
@@ -683,16 +683,16 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
- dev_err(dev, "failed to handle node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
error = rockchip_pm_add_subdomain(pmu, node);
if (error < 0) {
- dev_err(dev, "failed to handle subdomain node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 2d6f3fcf3211..acbe63e925d5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -796,7 +796,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %s: %d\n", np->name, id);
+ pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -816,13 +816,13 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %s: %d\n", np->name, err);
+ pr_err("failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %s: %d\n", np->name, err);
+ pr_err("failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -851,15 +851,15 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ pr_err("failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %s: %d\n",
- np->name, err);
+ pr_err("failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 224d7ddeeb76..bbd4e5bc8707 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -544,15 +544,15 @@ static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
if (_size)
*_size = resource_size(&res);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 6755f2af5619..b5d5673c255c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1382,15 +1382,15 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
return regs;
}
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 3be9519654e5..cf3fad2cb871 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -2,7 +2,7 @@
* TURBOchannel bus services.
*
* Copyright (c) Harald Koerfgen, 1998
- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
+ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
@@ -10,6 +10,7 @@
* directory of this archive for more details.
*/
#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tdev->dev.bus = &tc_bus_type;
tdev->slot = slot;
+ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
+ tdev->dma_mask = DMA_BIT_MASK(34);
+ tdev->dev.dma_mask = &tdev->dma_mask;
+ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
+
for (i = 0; i < 8; i++) {
tdev->firmware[i] =
readb(module + offset + TC_FIRM_VER + 4 * i);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e69edc77d18..5422523c03f8 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -432,7 +432,7 @@ source "drivers/thermal/samsung/Kconfig"
endmenu
menu "STMicroelectronics thermal drivers"
-depends on ARCH_STI && OF
+depends on (ARCH_STI || ARCH_STM32) && OF
source "drivers/thermal/st/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 610344eb3e03..82bb50dc6423 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
-obj-$(CONFIG_ST_THERMAL) += st/
+obj-y += st/
obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 2c2f6d93034e..92f67d40f2e9 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -526,8 +526,8 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
/* First memory region points towards the status register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -EIO;
/*
* Edit the resource start address and length to map over all the
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index dd8dd947b7f0..01b0cb994457 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
THERMAL_EVENT_UNSPECIFIED);
delay = msecs_to_jiffies(thermal->zone->passive_delay);
- schedule_delayed_work(&thermal->work, delay);
+ queue_delayed_work(system_freezable_wq, &thermal->work, delay);
return;
}
@@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
struct da9062_thermal *thermal = data;
disable_irq_nosync(thermal->irq);
- schedule_delayed_work(&thermal->work, 0);
+ queue_delayed_work(system_freezable_wq, &thermal->work, 0);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 761d0559c268..c4111a98f1a7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -55,25 +55,39 @@
#define HI3660_TEMP_STEP (205)
#define HI3660_TEMP_LAG (4000)
-#define HI6220_DEFAULT_SENSOR 2
-#define HI3660_DEFAULT_SENSOR 1
+#define HI6220_CLUSTER0_SENSOR 2
+#define HI6220_CLUSTER1_SENSOR 1
+
+#define HI3660_LITTLE_SENSOR 0
+#define HI3660_BIG_SENSOR 1
+#define HI3660_G3D_SENSOR 2
+#define HI3660_MODEM_SENSOR 3
+
+struct hisi_thermal_data;
struct hisi_thermal_sensor {
+ struct hisi_thermal_data *data;
struct thermal_zone_device *tzd;
+ const char *irq_name;
uint32_t id;
uint32_t thres_temp;
};
+struct hisi_thermal_ops {
+ int (*get_temp)(struct hisi_thermal_sensor *sensor);
+ int (*enable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*disable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*irq_handler)(struct hisi_thermal_sensor *sensor);
+ int (*probe)(struct hisi_thermal_data *data);
+};
+
struct hisi_thermal_data {
- int (*get_temp)(struct hisi_thermal_data *data);
- int (*enable_sensor)(struct hisi_thermal_data *data);
- int (*disable_sensor)(struct hisi_thermal_data *data);
- int (*irq_handler)(struct hisi_thermal_data *data);
+ const struct hisi_thermal_ops *ops;
+ struct hisi_thermal_sensor *sensor;
struct platform_device *pdev;
struct clk *clk;
- struct hisi_thermal_sensor sensor;
void __iomem *regs;
- int irq;
+ int nr_sensors;
};
/*
@@ -266,30 +280,40 @@ static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value)
(value << 4), addr + HI6220_TEMP0_CFG);
}
-static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi6220_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
hi6220_thermal_alarm_clear(data->regs, 1);
return 0;
}
-static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi3660_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
- hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1);
+ struct hisi_thermal_data *data = sensor->data;
+
+ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1);
return 0;
}
-static int hi6220_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi6220_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
return hi6220_thermal_get_temperature(data->regs);
}
-static int hi3660_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi3660_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
- return hi3660_thermal_get_temperature(data->regs, data->sensor.id);
+ struct hisi_thermal_data *data = sensor->data;
+
+ return hi3660_thermal_get_temperature(data->regs, sensor->id);
}
-static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
hi6220_thermal_enable(data->regs, 0);
hi6220_thermal_alarm_enable(data->regs, 0);
@@ -300,16 +324,18 @@ static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
- hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0);
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
return 0;
}
-static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
int ret;
/* enable clock for tsensor */
@@ -345,10 +371,10 @@ static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
unsigned int value;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
/* disable interrupt */
hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
@@ -371,21 +397,8 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
- data->get_temp = hi6220_thermal_get_temp;
- data->enable_sensor = hi6220_thermal_enable_sensor;
- data->disable_sensor = hi6220_thermal_disable_sensor;
- data->irq_handler = hi6220_thermal_irq_handler;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
-
data->clk = devm_clk_get(dev, "thermal_clk");
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
@@ -394,11 +407,14 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
return ret;
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->sensor.id = HI6220_DEFAULT_SENSOR;
+ data->sensor[0].id = HI6220_CLUSTER0_SENSOR;
+ data->sensor[0].irq_name = "tsensor_intr";
+ data->sensor[0].data = data;
+ data->nr_sensors = 1;
return 0;
}
@@ -407,38 +423,34 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
- data->get_temp = hi3660_thermal_get_temp;
- data->enable_sensor = hi3660_thermal_enable_sensor;
- data->disable_sensor = hi3660_thermal_disable_sensor;
- data->irq_handler = hi3660_thermal_irq_handler;
+ data->nr_sensors = 2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
+ data->nr_sensors, GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor[0].id = HI3660_BIG_SENSOR;
+ data->sensor[0].irq_name = "tsensor_a73";
+ data->sensor[0].data = data;
- data->sensor.id = HI3660_DEFAULT_SENSOR;
+ data->sensor[1].id = HI3660_LITTLE_SENSOR;
+ data->sensor[1].irq_name = "tsensor_a53";
+ data->sensor[1].data = data;
return 0;
}
static int hisi_thermal_get_temp(void *__data, int *temp)
{
- struct hisi_thermal_data *data = __data;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = __data;
+ struct hisi_thermal_data *data = sensor->data;
- *temp = data->get_temp(data);
+ *temp = data->ops->get_temp(sensor);
- dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n",
- sensor->id, *temp, sensor->thres_temp);
+ dev_dbg(&data->pdev->dev, "tzd=%p, id=%d, temp=%d, thres=%d\n",
+ sensor->tzd, sensor->id, *temp, sensor->thres_temp);
return 0;
}
@@ -449,38 +461,39 @@ static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
{
- struct hisi_thermal_data *data = dev;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = dev;
+ struct hisi_thermal_data *data = sensor->data;
int temp = 0;
- data->irq_handler(data);
+ data->ops->irq_handler(sensor);
- hisi_thermal_get_temp(data, &temp);
+ hisi_thermal_get_temp(sensor, &temp);
if (temp >= sensor->thres_temp) {
- dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM: %d > %d\n",
+ sensor->id, temp, sensor->thres_temp);
- thermal_zone_device_update(data->sensor.tzd,
+ thermal_zone_device_update(sensor->tzd,
THERMAL_EVENT_UNSPECIFIED);
} else {
- dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM stopped: %d < %d\n",
+ sensor->id, temp, sensor->thres_temp);
}
return IRQ_HANDLED;
}
static int hisi_thermal_register_sensor(struct platform_device *pdev,
- struct hisi_thermal_data *data,
struct hisi_thermal_sensor *sensor)
{
int ret, i;
const struct thermal_trip *trip;
sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, data,
+ sensor->id, sensor,
&hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
@@ -502,14 +515,30 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
return 0;
}
+static const struct hisi_thermal_ops hi6220_ops = {
+ .get_temp = hi6220_thermal_get_temp,
+ .enable_sensor = hi6220_thermal_enable_sensor,
+ .disable_sensor = hi6220_thermal_disable_sensor,
+ .irq_handler = hi6220_thermal_irq_handler,
+ .probe = hi6220_thermal_probe,
+};
+
+static const struct hisi_thermal_ops hi3660_ops = {
+ .get_temp = hi3660_thermal_get_temp,
+ .enable_sensor = hi3660_thermal_enable_sensor,
+ .disable_sensor = hi3660_thermal_disable_sensor,
+ .irq_handler = hi3660_thermal_irq_handler,
+ .probe = hi3660_thermal_probe,
+};
+
static const struct of_device_id of_hisi_thermal_match[] = {
{
.compatible = "hisilicon,tsensor",
- .data = hi6220_thermal_probe
+ .data = &hi6220_ops,
},
{
.compatible = "hisilicon,hi3660-tsensor",
- .data = hi3660_thermal_probe
+ .data = &hi3660_ops,
},
{ /* end */ }
};
@@ -527,9 +556,9 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
static int hisi_thermal_probe(struct platform_device *pdev)
{
struct hisi_thermal_data *data;
- int (*platform_probe)(struct hisi_thermal_data *);
struct device *dev = &pdev->dev;
- int ret;
+ struct resource *res;
+ int i, ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -537,41 +566,50 @@ static int hisi_thermal_probe(struct platform_device *pdev)
data->pdev = pdev;
platform_set_drvdata(pdev, data);
+ data->ops = of_device_get_match_data(dev);
- platform_probe = of_device_get_match_data(dev);
- if (!platform_probe) {
- dev_err(dev, "failed to get probe func\n");
- return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
}
- ret = platform_probe(data);
+ ret = data->ops->probe(data);
if (ret)
return ret;
- ret = hisi_thermal_register_sensor(pdev, data,
- &data->sensor);
- if (ret) {
- dev_err(dev, "failed to register thermal sensor: %d\n", ret);
- return ret;
- }
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- ret = data->enable_sensor(data);
- if (ret) {
- dev_err(dev, "Failed to setup the sensor: %d\n", ret);
- return ret;
- }
+ ret = hisi_thermal_register_sensor(pdev, sensor);
+ if (ret) {
+ dev_err(dev, "failed to register thermal sensor: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, sensor->irq_name);
+ if (ret < 0)
+ return ret;
- if (data->irq) {
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- hisi_thermal_alarm_irq_thread,
- IRQF_ONESHOT, "hisi_thermal", data);
+ ret = devm_request_threaded_irq(dev, ret, NULL,
+ hisi_thermal_alarm_irq_thread,
+ IRQF_ONESHOT, sensor->irq_name,
+ sensor);
if (ret < 0) {
- dev_err(dev, "failed to request alarm irq: %d\n", ret);
+ dev_err(dev, "Failed to request alarm irq: %d\n", ret);
return ret;
}
- }
- hisi_thermal_toggle_sensor(&data->sensor, true);
+ ret = data->ops->enable_sensor(sensor);
+ if (ret) {
+ dev_err(dev, "Failed to setup the sensor: %d\n", ret);
+ return ret;
+ }
+
+ hisi_thermal_toggle_sensor(sensor, true);
+ }
return 0;
}
@@ -579,11 +617,14 @@ static int hisi_thermal_probe(struct platform_device *pdev)
static int hisi_thermal_remove(struct platform_device *pdev)
{
struct hisi_thermal_data *data = platform_get_drvdata(pdev);
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ int i;
- hisi_thermal_toggle_sensor(sensor, false);
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- data->disable_sensor(data);
+ hisi_thermal_toggle_sensor(sensor, false);
+ data->ops->disable_sensor(sensor);
+ }
return 0;
}
@@ -592,8 +633,10 @@ static int hisi_thermal_remove(struct platform_device *pdev)
static int hisi_thermal_suspend(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i;
- data->disable_sensor(data);
+ for (i = 0; i < data->nr_sensors; i++)
+ data->ops->disable_sensor(&data->sensor[i]);
return 0;
}
@@ -601,8 +644,12 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i, ret = 0;
+
+ for (i = 0; i < data->nr_sensors; i++)
+ ret |= data->ops->enable_sensor(&data->sensor[i]);
- return data->enable_sensor(data);
+ return ret;
}
#endif
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index aa452acb60b6..15661549eb67 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -725,7 +725,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
} else {
ret = imx_init_from_tempmon_data(pdev);
if (ret) {
- dev_err(&pdev->dev, "failed to init from from fsl,tempmon-data\n");
+ dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n");
return ret;
}
}
@@ -762,9 +762,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to get thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
/*
@@ -777,9 +775,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = clk_prepare_enable(data->thermal_clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
data->tz = thermal_zone_device_register("imx_thermal_zone",
@@ -792,10 +788,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
"failed to register thermal zone device %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto clk_disable;
}
dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
@@ -827,14 +820,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
0, "imx_thermal", data);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- thermal_zone_device_unregister(data->tz);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto thermal_zone_unregister;
}
return 0;
+
+thermal_zone_unregister:
+ thermal_zone_device_unregister(data->tz);
+clk_disable:
+ clk_disable_unprepare(data->thermal_clk);
+cpufreq_put:
+ cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
+
+ return ret;
}
static int imx_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4f2816559205..4bfdb4a1e47d 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -19,23 +19,34 @@
/*** Private data structures to represent thermal device tree data ***/
/**
- * struct __thermal_bind_param - a match between trip and cooling device
+ * struct __thermal_cooling_bind_param - a cooling device for a trip point
* @cooling_device: a pointer to identify the referred cooling device
- * @trip_id: the trip point index
- * @usage: the percentage (from 0 to 100) of cooling contribution
* @min: minimum cooling state used at this trip point
* @max: maximum cooling state used at this trip point
*/
-struct __thermal_bind_params {
+struct __thermal_cooling_bind_param {
struct device_node *cooling_device;
- unsigned int trip_id;
- unsigned int usage;
unsigned long min;
unsigned long max;
};
/**
+ * struct __thermal_bind_param - a match between trip and cooling device
+ * @tcbp: a pointer to an array of cooling devices
+ * @count: number of elements in array
+ * @trip_id: the trip point index
+ * @usage: the percentage (from 0 to 100) of cooling contribution
+ */
+
+struct __thermal_bind_params {
+ struct __thermal_cooling_bind_param *tcbp;
+ unsigned int count;
+ unsigned int trip_id;
+ unsigned int usage;
+};
+
+/**
* struct __thermal_zone - internal representation of a thermal zone
* @mode: current thermal zone device mode (enabled/disabled)
* @passive_delay: polling interval while passive cooling is activated
@@ -192,25 +203,31 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to bind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- ret = thermal_zone_bind_cooling_device(thermal,
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
- tbp->max,
- tbp->min,
+ tcbp->max,
+ tcbp->min,
tbp->usage);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
}
}
@@ -221,22 +238,28 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to unbind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
+
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
- ret = thermal_zone_unbind_cooling_device(thermal,
- tbp->trip_id, cdev);
- if (ret)
- return ret;
+ ret = thermal_zone_unbind_cooling_device(thermal,
+ tbp->trip_id, cdev);
+ if (ret)
+ return ret;
+ }
}
}
@@ -486,8 +509,8 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -655,8 +678,9 @@ static int thermal_of_populate_bind_params(struct device_node *np,
int ntrips)
{
struct of_phandle_args cooling_spec;
+ struct __thermal_cooling_bind_param *__tcbp;
struct device_node *trip;
- int ret, i;
+ int ret, i, count;
u32 prop;
/* Default weight. Usage is optional */
@@ -683,20 +707,44 @@ static int thermal_of_populate_bind_params(struct device_node *np,
goto end;
}
- ret = of_parse_phandle_with_args(np, "cooling-device", "#cooling-cells",
- 0, &cooling_spec);
- if (ret < 0) {
- pr_err("missing cooling_device property\n");
+ count = of_count_phandle_with_args(np, "cooling-device",
+ "#cooling-cells");
+ if (!count) {
+ pr_err("Add a cooling_device property with at least one device\n");
goto end;
}
- __tbp->cooling_device = cooling_spec.np;
- if (cooling_spec.args_count >= 2) { /* at least min and max */
- __tbp->min = cooling_spec.args[0];
- __tbp->max = cooling_spec.args[1];
- } else {
- pr_err("wrong reference to cooling device, missing limits\n");
+
+ __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
+ if (!__tcbp)
+ goto end;
+
+ for (i = 0; i < count; i++) {
+ ret = of_parse_phandle_with_args(np, "cooling-device",
+ "#cooling-cells", i, &cooling_spec);
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ goto free_tcbp;
+ }
+
+ __tcbp[i].cooling_device = cooling_spec.np;
+
+ if (cooling_spec.args_count >= 2) { /* at least min and max */
+ __tcbp[i].min = cooling_spec.args[0];
+ __tcbp[i].max = cooling_spec.args[1];
+ } else {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ }
}
+ __tbp->tcbp = __tcbp;
+ __tbp->count = count;
+
+ goto end;
+
+free_tcbp:
+ for (i = i - 1; i >= 0; i--)
+ of_node_put(__tcbp[i].cooling_device);
+ kfree(__tcbp);
end:
of_node_put(trip);
@@ -903,8 +951,16 @@ finish:
return tz;
free_tbps:
- for (i = i - 1; i >= 0; i--)
- of_node_put(tz->tbps[i].cooling_device);
+ for (i = i - 1; i >= 0; i--) {
+ struct __thermal_bind_params *tbp = tz->tbps + i;
+ int j;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
+
kfree(tz->tbps);
free_trips:
for (i = 0; i < tz->ntrips; i++)
@@ -920,10 +976,18 @@ free_tz:
static inline void of_thermal_free_zone(struct __thermal_zone *tz)
{
- int i;
+ struct __thermal_bind_params *tbp;
+ int i, j;
+
+ for (i = 0; i < tz->num_tbps; i++) {
+ tbp = tz->tbps + i;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
- for (i = 0; i < tz->num_tbps; i++)
- of_node_put(tz->tbps[i].cooling_device);
kfree(tz->tbps);
for (i = 0; i < tz->ntrips; i++)
of_node_put(tz->trips[i].np);
@@ -963,8 +1027,8 @@ int __init of_parse_thermal_zones(void)
tz = thermal_of_build_thermal_zone(child);
if (IS_ERR(tz)) {
- pr_err("failed to build thermal zone %s: %ld\n",
- child->name,
+ pr_err("failed to build thermal zone %pOFn: %ld\n",
+ child,
PTR_ERR(tz));
continue;
}
@@ -998,7 +1062,7 @@ int __init of_parse_thermal_zones(void)
tz->passive_delay,
tz->polling_delay);
if (IS_ERR(zone)) {
- pr_err("Failed to build %s zone %ld\n", child->name,
+ pr_err("Failed to build %pOFn zone %ld\n", child,
PTR_ERR(zone));
kfree(tzp);
kfree(ops);
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index ad4f3a8d6560..b2d5d5bf4a9b 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
+#include "thermal_core.h"
+
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
@@ -37,9 +39,11 @@
#define STATUS_GEN2_STATE_MASK GENMASK(6, 4)
#define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_OVERRIDE_MASK GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6)
#define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
+#define SHUTDOWN_CTRL1_RATE_25HZ BIT(3)
+
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
/*
@@ -56,12 +60,19 @@
#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
#define THRESH_MIN 0
+#define THRESH_MAX 3
+
+/* Stage 2 Threshold Min: 125 C */
+#define STAGE2_THRESHOLD_MIN 125000
+/* Stage 2 Threshold Max: 140 C */
+#define STAGE2_THRESHOLD_MAX 140000
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
struct qpnp_tm_chip {
struct regmap *map;
+ struct device *dev;
struct thermal_zone_device *tz_dev;
unsigned int subtype;
long temp;
@@ -69,6 +80,10 @@ struct qpnp_tm_chip {
unsigned int stage;
unsigned int prev_stage;
unsigned int base;
+ /* protects .thresh, .stage and chip registers */
+ struct mutex lock;
+ bool initialized;
+
struct iio_channel *adc;
};
@@ -125,6 +140,8 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
unsigned int stage, stage_new, stage_old;
int ret;
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
return ret;
@@ -163,8 +180,15 @@ static int qpnp_tm_get_temp(void *data, int *temp)
if (!temp)
return -EINVAL;
+ if (!chip->initialized) {
+ *temp = DEFAULT_TEMP;
+ return 0;
+ }
+
if (!chip->adc) {
+ mutex_lock(&chip->lock);
ret = qpnp_tm_update_temp_no_adc(chip);
+ mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
} else {
@@ -180,8 +204,72 @@ static int qpnp_tm_get_temp(void *data, int *temp)
return 0;
}
+static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ int temp)
+{
+ u8 reg;
+ bool disable_s2_shutdown = false;
+
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
+ /*
+ * Default: S2 and S3 shutdown enabled, thresholds at
+ * 105C/125C/145C, monitoring at 25Hz
+ */
+ reg = SHUTDOWN_CTRL1_RATE_25HZ;
+
+ if (temp == THERMAL_TEMP_INVALID ||
+ temp < STAGE2_THRESHOLD_MIN) {
+ chip->thresh = THRESH_MIN;
+ goto skip;
+ }
+
+ if (temp <= STAGE2_THRESHOLD_MAX) {
+ chip->thresh = THRESH_MAX -
+ ((STAGE2_THRESHOLD_MAX - temp) /
+ TEMP_THRESH_STEP);
+ disable_s2_shutdown = true;
+ } else {
+ chip->thresh = THRESH_MAX;
+
+ if (chip->adc)
+ disable_s2_shutdown = true;
+ else
+ dev_warn(chip->dev,
+ "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n");
+ }
+
+skip:
+ reg |= chip->thresh;
+ if (disable_s2_shutdown)
+ reg |= SHUTDOWN_CTRL1_OVERRIDE_S2;
+
+ return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+}
+
+static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
+{
+ struct qpnp_tm_chip *chip = data;
+ const struct thermal_trip *trip_points;
+ int ret;
+
+ trip_points = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trip_points)
+ return -EINVAL;
+
+ if (trip_points[trip].type != THERMAL_TRIP_CRITICAL)
+ return 0;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_update_critical_trip_temp(chip, temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
.get_temp = qpnp_tm_get_temp,
+ .set_trip_temp = qpnp_tm_set_trip_temp,
};
static irqreturn_t qpnp_tm_isr(int irq, void *data)
@@ -193,6 +281,29 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip)
+{
+ int ntrips;
+ const struct thermal_trip *trips;
+ int i;
+
+ ntrips = of_thermal_get_ntrips(chip->tz_dev);
+ if (ntrips <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ trips = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trips)
+ return THERMAL_TEMP_INVALID;
+
+ for (i = 0; i < ntrips; i++) {
+ if (of_thermal_is_trip_valid(chip->tz_dev, i) &&
+ trips[i].type == THERMAL_TRIP_CRITICAL)
+ return trips[i].temperature;
+ }
+
+ return THERMAL_TEMP_INVALID;
+}
+
/*
* This function initializes the internal temp value based on only the
* current thermal stage and threshold. Setup threshold control and
@@ -203,17 +314,20 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
unsigned int stage;
int ret;
u8 reg = 0;
+ int crit_temp;
+
+ mutex_lock(&chip->lock);
ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
if (ret < 0)
- return ret;
+ goto out;
chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
chip->temp = DEFAULT_TEMP;
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
- return ret;
+ goto out;
chip->stage = ret;
stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
@@ -224,21 +338,19 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
(stage - 1) * TEMP_STAGE_STEP +
TEMP_THRESH_MIN;
- /*
- * Set threshold and disable software override of stage 2 and 3
- * shutdowns.
- */
- chip->thresh = THRESH_MIN;
- reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
- reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
- ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+ crit_temp = qpnp_tm_get_critical_trip_temp(chip);
+ ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
if (ret < 0)
- return ret;
+ goto out;
/* Enable the thermal alarm PMIC module in always-on mode. */
reg = ALARM_CTRL_FORCE_ENABLE;
ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg);
+ chip->initialized = true;
+
+out:
+ mutex_unlock(&chip->lock);
return ret;
}
@@ -257,6 +369,9 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, chip);
+ chip->dev = &pdev->dev;
+
+ mutex_init(&chip->lock);
chip->map = dev_get_regmap(pdev->dev.parent, NULL);
if (!chip->map)
@@ -302,6 +417,18 @@ static int qpnp_tm_probe(struct platform_device *pdev)
chip->subtype = subtype;
+ /*
+ * Register the sensor before initializing the hardware to be able to
+ * read the trip points. get_temp() returns the default temperature
+ * before the hardware initialization is completed.
+ */
+ chip->tz_dev = devm_thermal_zone_of_sensor_register(
+ &pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
+ if (IS_ERR(chip->tz_dev)) {
+ dev_err(&pdev->dev, "failed to register sensor\n");
+ return PTR_ERR(chip->tz_dev);
+ }
+
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
@@ -313,12 +440,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
- &qpnp_tm_sensor_ops);
- if (IS_ERR(chip->tz_dev)) {
- dev_err(&pdev->dev, "failed to register sensor\n");
- return PTR_ERR(chip->tz_dev);
- }
+ thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
index fdf561b8b81d..c6dd620ac029 100644
--- a/drivers/thermal/qcom/tsens-8916.c
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -109,5 +100,6 @@ static const struct tsens_ops ops_8916 = {
const struct tsens_data data_8916 = {
.num_sensors = 5,
.ops = &ops_8916,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
.hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0451277d3a8f..0f0adb302a7b 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -69,7 +60,7 @@ static int suspend_8960(struct tsens_device *tmdev)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
if (ret)
@@ -94,7 +85,7 @@ static int suspend_8960(struct tsens_device *tmdev)
static int resume_8960(struct tsens_device *tmdev)
{
int ret;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -126,12 +117,12 @@ static int enable_8960(struct tsens_device *tmdev, int id)
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
@@ -140,7 +131,7 @@ static int enable_8960(struct tsens_device *tmdev, int id)
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
@@ -157,7 +148,7 @@ static void disable_8960(struct tsens_device *tmdev)
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
@@ -168,7 +159,7 @@ static void disable_8960(struct tsens_device *tmdev)
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
}
static int init_8960(struct tsens_device *tmdev)
@@ -176,8 +167,8 @@ static int init_8960(struct tsens_device *tmdev)
int ret, i;
u32 reg_cntl;
- tmdev->map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->map)
+ tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->tm_map)
return -ENODEV;
/*
@@ -193,14 +184,14 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
if (tmdev->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -209,12 +200,12 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
@@ -261,12 +252,12 @@ static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->map, s->status, &code);
+ ret = regmap_read(tmdev->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
index 9baf77e8cbe3..3d3fda3d731b 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -241,4 +232,5 @@ static const struct tsens_ops ops_8974 = {
const struct tsens_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 6207d8d92351..3be4be2e0465 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -21,7 +12,11 @@
#include <linux/regmap.h>
#include "tsens.h"
-#define S0_ST_ADDR 0x1030
+/* SROT */
+#define TSENS_EN BIT(0)
+
+/* TM */
+#define STATUS_OFFSET 0x30
#define SN_ADDR_OFFSET 0x4
#define SN_ST_TEMP_MASK 0x3ff
#define CAL_DEGC_PT1 30
@@ -107,8 +102,8 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
unsigned int status_reg;
int last_temp = 0, ret;
- status_reg = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & SN_ST_TEMP_MASK;
@@ -126,29 +121,52 @@ static const struct regmap_config tsens_config = {
int __init init_common(struct tsens_device *tmdev)
{
- void __iomem *base;
+ void __iomem *tm_base, *srot_base;
struct resource *res;
+ u32 code;
+ int ret;
struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
+ u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
if (!op)
return -EINVAL;
- /* The driver only uses the TM register address space for now */
if (op->num_resources > 1) {
+ /* DT with separate SROT and TM address space */
tmdev->tm_offset = 0;
+ res = platform_get_resource(op, IORESOURCE_MEM, 1);
+ srot_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(srot_base))
+ return PTR_ERR(srot_base);
+
+ tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev,
+ srot_base, &tsens_config);
+ if (IS_ERR(tmdev->srot_map))
+ return PTR_ERR(tmdev->srot_map);
+
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
tmdev->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&op->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
- if (IS_ERR(tmdev->map))
- return PTR_ERR(tmdev->map);
+ tm_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(tm_base))
+ return PTR_ERR(tm_base);
+
+ tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
+ if (IS_ERR(tmdev->tm_map))
+ return PTR_ERR(tmdev->tm_map);
+
+ if (tmdev->srot_map) {
+ ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
+ if (ret)
+ return ret;
+ if (!(code & TSENS_EN)) {
+ dev_err(tmdev->dev, "tsens device is not enabled\n");
+ return -ENODEV;
+ }
+ }
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 44da02f594ac..381a212872bf 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -21,7 +21,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
int ret;
status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & LAST_TEMP_MASK;
@@ -29,7 +29,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
goto done;
/* Try a second time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -40,7 +40,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
}
/* Try a third/last time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -68,10 +68,12 @@ static const struct tsens_ops ops_generic_v2 = {
const struct tsens_data data_tsens_v2 = {
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
/* Kept around for backward compatibility with old msm8996.dtsi */
const struct tsens_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index a2c9bfae3d86..f1ec9bbe4717 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -89,11 +80,6 @@ static int tsens_register(struct tsens_device *tmdev)
{
int i;
struct thermal_zone_device *tzd;
- u32 *hw_id, n = tmdev->num_sensors;
-
- hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
- if (!hw_id)
- return -ENOMEM;
for (i = 0; i < tmdev->num_sensors; i++) {
tmdev->sensor[i].tmdev = tmdev;
@@ -158,6 +144,9 @@ static int tsens_probe(struct platform_device *pdev)
else
tmdev->sensor[i].hw_id = i;
}
+ for (i = 0; i < REG_ARRAY_SIZE; i++) {
+ tmdev->reg_offsets[i] = data->reg_offsets[i];
+ }
if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
return -EINVAL;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 14331eb45a86..7b7feee5dc46 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -1,15 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+
#ifndef __QCOM_TSENS_H__
#define __QCOM_TSENS_H__
@@ -55,15 +48,23 @@ struct tsens_ops {
int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
};
+enum reg_list {
+ SROT_CTRL_OFFSET,
+
+ REG_ARRAY_SIZE,
+};
+
/**
* struct tsens_data - tsens instance specific data
* @num_sensors: Max number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ * @reg_offsets: Register offsets for commonly used registers
*/
struct tsens_data {
const u32 num_sensors;
const struct tsens_ops *ops;
+ const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
};
@@ -76,8 +77,10 @@ struct tsens_context {
struct tsens_device {
struct device *dev;
u32 num_sensors;
- struct regmap *map;
+ struct regmap *tm_map;
+ struct regmap *srot_map;
u32 tm_offset;
+ u16 reg_offsets[REG_ARRAY_SIZE];
struct tsens_context ctx;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 450ed66edf58..18c711b19514 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -119,8 +119,8 @@ static int qoriq_tmu_get_sensor_id(void)
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -294,6 +294,7 @@ static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
static const struct of_device_id qoriq_tmu_match[] = {
{ .compatible = "fsl,qoriq-tmu", },
+ { .compatible = "fsl,imx8mq-tmu", },
{},
};
MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 7aed5337bdd3..75786cc8e2f9 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -318,9 +318,11 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
}
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
+ { .compatible = "renesas,r8a774a1-thermal", },
{ .compatible = "renesas,r8a7795-thermal", },
{ .compatible = "renesas,r8a7796-thermal", },
{ .compatible = "renesas,r8a77965-thermal", },
+ { .compatible = "renesas,r8a77980-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 78f932822d38..8014a207d8d9 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -113,6 +113,10 @@ static const struct of_device_id rcar_thermal_dt_ids[] = {
.data = &rcar_gen2_thermal,
},
{
+ .compatible = "renesas,thermal-r8a77970",
+ .data = &rcar_gen3_thermal,
+ },
+ {
.compatible = "renesas,thermal-r8a77995",
.data = &rcar_gen3_thermal,
},
@@ -434,8 +438,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
rcar_thermal_for_each_priv(priv, common) {
if (rcar_thermal_had_changed(priv, status)) {
rcar_thermal_irq_disable(priv);
- schedule_delayed_work(&priv->work,
- msecs_to_jiffies(300));
+ queue_delayed_work(system_freezable_wq, &priv->work,
+ msecs_to_jiffies(300));
}
}
@@ -453,6 +457,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
rcar_thermal_for_each_priv(priv, common) {
rcar_thermal_irq_disable(priv);
+ cancel_delayed_work_sync(&priv->work);
if (priv->chip->use_of_thermal)
thermal_remove_hwmon_sysfs(priv->zone);
else
@@ -492,7 +497,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
pm_runtime_get_sync(dev);
for (i = 0; i < chip->nirqs; i++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!irq)
continue;
if (!common->base) {
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index 490fdbe22eea..b80f9a9e4f8f 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -1,3 +1,7 @@
+#
+# STMicroelectronics thermal drivers configuration
+#
+
config ST_THERMAL
tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
help
@@ -10,3 +14,13 @@ config ST_THERMAL_SYSCFG
config ST_THERMAL_MEMMAP
select ST_THERMAL
tristate "STi series memory mapped access based thermal sensors"
+
+config STM32_THERMAL
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile
index b38878977bd8..b2b9e9b96296 100644
--- a/drivers/thermal/st/Makefile
+++ b/drivers/thermal/st/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ST_THERMAL) := st_thermal.o
obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o
obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o
+obj-$(CONFIG_STM32_THERMAL) := stm_thermal.o \ No newline at end of file
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
new file mode 100644
index 000000000000..47623da0f91b
--- /dev/null
+++ b/drivers/thermal/st/stm_thermal.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: David Hernandez Sanchez <david.hernandezsanchez@st.com> for
+ * STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+/* DTS register offsets */
+#define DTS_CFGR1_OFFSET 0x0
+#define DTS_T0VALR1_OFFSET 0x8
+#define DTS_RAMPVALR_OFFSET 0X10
+#define DTS_ITR1_OFFSET 0x14
+#define DTS_DR_OFFSET 0x1C
+#define DTS_SR_OFFSET 0x20
+#define DTS_ITENR_OFFSET 0x24
+#define DTS_CIFR_OFFSET 0x28
+
+/* DTS_CFGR1 register mask definitions */
+#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
+#define TS1_SMP_TIME_MASK GENMASK(19, 16)
+#define TS1_INTRIG_SEL_MASK GENMASK(11, 8)
+
+/* DTS_T0VALR1 register mask definitions */
+#define TS1_T0_MASK GENMASK(17, 16)
+#define TS1_FMT0_MASK GENMASK(15, 0)
+
+/* DTS_RAMPVALR register mask definitions */
+#define TS1_RAMP_COEFF_MASK GENMASK(15, 0)
+
+/* DTS_ITR1 register mask definitions */
+#define TS1_HITTHD_MASK GENMASK(31, 16)
+#define TS1_LITTHD_MASK GENMASK(15, 0)
+
+/* DTS_DR register mask definitions */
+#define TS1_MFREQ_MASK GENMASK(15, 0)
+
+/* Less significant bit position definitions */
+#define TS1_T0_POS 16
+#define TS1_SMP_TIME_POS 16
+#define TS1_HITTHD_POS 16
+#define HSREF_CLK_DIV_POS 24
+
+/* DTS_CFGR1 bit definitions */
+#define TS1_EN BIT(0)
+#define TS1_START BIT(4)
+#define REFCLK_SEL BIT(20)
+#define REFCLK_LSE REFCLK_SEL
+#define Q_MEAS_OPT BIT(21)
+#define CALIBRATION_CONTROL Q_MEAS_OPT
+
+/* DTS_SR bit definitions */
+#define TS_RDY BIT(15)
+/* Bit definitions below are common for DTS_SR, DTS_ITENR and DTS_CIFR */
+#define HIGH_THRESHOLD BIT(2)
+#define LOW_THRESHOLD BIT(1)
+
+/* Constants */
+#define ADJUST 100
+#define ONE_MHZ 1000000
+#define POLL_TIMEOUT 5000
+#define STARTUP_TIME 40
+#define TS1_T0_VAL0 30
+#define TS1_T0_VAL1 130
+#define NO_HW_TRIG 0
+
+/* The Thermal Framework expects millidegrees */
+#define mcelsius(temp) ((temp) * 1000)
+
+/* The Sensor expects oC degrees */
+#define celsius(temp) ((temp) / 1000)
+
+struct stm_thermal_sensor {
+ struct device *dev;
+ struct thermal_zone_device *th_dev;
+ enum thermal_device_mode mode;
+ struct clk *clk;
+ int high_temp;
+ int low_temp;
+ int temp_critical;
+ int temp_passive;
+ unsigned int low_temp_enabled;
+ int num_trips;
+ int irq;
+ unsigned int irq_enabled;
+ void __iomem *base;
+ int t0, fmt0, ramp_coeff;
+};
+
+static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+{
+ struct stm_thermal_sensor *sensor = sdata;
+
+ disable_irq_nosync(irq);
+ sensor->irq_enabled = false;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+{
+ u32 value;
+ struct stm_thermal_sensor *sensor = sdata;
+
+ /* read IT reason in SR and clear flags */
+ value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+
+ if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ u32 value;
+
+ /* Enable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value |= TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /*
+ * The DTS block can be enabled by setting TSx_EN bit in
+ * DTS_CFGRx register. It requires a startup time of
+ * 40μs. Use 5 ms as arbitrary timeout.
+ */
+ ret = readl_poll_timeout(sensor->base + DTS_SR_OFFSET,
+ value, (value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Start continuous measuring */
+ value = readl_relaxed(sensor->base +
+ DTS_CFGR1_OFFSET);
+ value |= TS1_START;
+ writel_relaxed(value, sensor->base +
+ DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Stop measuring */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_START;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure stop is taken into account */
+ usleep_range(STARTUP_TIME, POLL_TIMEOUT);
+
+ /* Disable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure disable is taken into account */
+ return readl_poll_timeout(sensor->base + DTS_SR_OFFSET, value,
+ !(value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+}
+
+static int stm_thermal_calibration(struct stm_thermal_sensor *sensor)
+{
+ u32 value, clk_freq;
+ u32 prescaler;
+
+ /* Figure out prescaler value for PCLK during calibration */
+ clk_freq = clk_get_rate(sensor->clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ prescaler = 0;
+ clk_freq /= ONE_MHZ;
+ if (clk_freq) {
+ while (prescaler <= clk_freq)
+ prescaler++;
+ }
+
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Clear prescaler */
+ value &= ~HSREF_CLK_DIV_MASK;
+
+ /* Set prescaler. pclk_freq/prescaler < 1MHz */
+ value |= (prescaler << HSREF_CLK_DIV_POS);
+
+ /* Select PCLK as reference clock */
+ value &= ~REFCLK_SEL;
+
+ /* Set maximal sampling time for better precision */
+ value |= TS1_SMP_TIME_MASK;
+
+ /* Measure with calibration */
+ value &= ~CALIBRATION_CONTROL;
+
+ /* select trigger */
+ value &= ~TS1_INTRIG_SEL_MASK;
+ value |= NO_HW_TRIG;
+
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+/* Fill in DTS structure with factory sensor values */
+static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
+{
+ /* Retrieve engineering calibration temperature */
+ sensor->t0 = readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) &
+ TS1_T0_MASK;
+ if (!sensor->t0)
+ sensor->t0 = TS1_T0_VAL0;
+ else
+ sensor->t0 = TS1_T0_VAL1;
+
+ /* Retrieve fmt0 and put it on Hz */
+ sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
+ & TS1_FMT0_MASK;
+
+ /* Retrieve ramp coefficient */
+ sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
+ TS1_RAMP_COEFF_MASK;
+
+ if (!sensor->fmt0 || !sensor->ramp_coeff) {
+ dev_err(sensor->dev, "%s: wrong setting\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(sensor->dev, "%s: T0 = %doC, FMT0 = %dHz, RAMP_COEFF = %dHz/oC",
+ __func__, sensor->t0, sensor->fmt0, sensor->ramp_coeff);
+
+ return 0;
+}
+
+static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
+ int temp, u32 *th)
+{
+ int freqM;
+ u32 sampling_time;
+
+ /* Retrieve the number of periods to sample */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the CLK_PTAT frequency for a given temperature */
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
+ + sensor->fmt0;
+
+ dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
+ __func__, freqM);
+
+ /* Figure out the threshold sample number */
+ *th = clk_get_rate(sensor->clk);
+ if (!*th)
+ return -EINVAL;
+
+ *th = *th / freqM;
+
+ *th *= sampling_time;
+
+ return 0;
+}
+
+static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
+{
+ u32 value, th;
+ int ret;
+
+ value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+
+ /* Erase threshold content */
+ value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
+
+ /* Retrieve the sample threshold number th for a given temperature */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
+ if (ret)
+ return ret;
+
+ value |= th & TS1_LITTHD_MASK;
+
+ if (sensor->low_temp_enabled) {
+ /* Retrieve the sample threshold */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
+ &th);
+ if (ret)
+ return ret;
+
+ value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ }
+
+ /* Write value on the Low interrupt threshold */
+ writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+
+ return 0;
+}
+
+/* Disable temperature interrupt */
+static int stm_disable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
+ sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
+
+ return 0;
+}
+
+/* Enable temperature interrupt */
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /*
+ * Code below enables High temperature threshold using a low threshold
+ * sampling value
+ */
+
+ /* Make sure LOW_THRESHOLD IT is clear before enabling */
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for low threshold */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value |= LOW_THRESHOLD;
+
+ /* Enable the low temperature threshold if needed */
+ if (sensor->low_temp_enabled) {
+ /* Make sure HIGH_THRESHOLD IT is clear before enabling */
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for high threshold */
+ value |= HIGH_THRESHOLD;
+ }
+
+ /* Enable thresholds */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_disable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+
+/* Callback to get temperature from HW */
+static int stm_thermal_get_temp(void *data, int *temp)
+{
+ struct stm_thermal_sensor *sensor = data;
+ u32 sampling_time;
+ int freqM, ret;
+
+ if (sensor->mode != THERMAL_DEVICE_ENABLED)
+ return -EAGAIN;
+
+ /* Retrieve the number of samples */
+ ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
+ (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
+ POLL_TIMEOUT);
+
+ if (ret)
+ return ret;
+
+ if (!freqM)
+ return -ENODATA;
+
+ /* Retrieve the number of periods sampled */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the number of samples per period */
+ freqM /= sampling_time;
+
+ /* Figure out the CLK_PTAT frequency */
+ freqM = clk_get_rate(sensor->clk) / freqM;
+ if (!freqM)
+ return -EINVAL;
+
+ dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
+
+ /* Figure out the temperature in mili celsius */
+ *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
+ sensor->ramp_coeff));
+
+ dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
+ __func__, *temp);
+
+ /* Update thresholds */
+ if (sensor->num_trips > 1) {
+ /* Update alarm threshold value to next higher trip point */
+ if (sensor->high_temp == sensor->temp_passive &&
+ celsius(*temp) >= sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_critical;
+ sensor->low_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = true;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ if (sensor->high_temp == sensor->temp_critical &&
+ celsius(*temp) < sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = false;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Re-enable alarm IRQ if temperature below critical
+ * temperature
+ */
+ if (!sensor->irq_enabled &&
+ (celsius(*temp) < sensor->temp_critical)) {
+ sensor->irq_enabled = true;
+ enable_irq(sensor->irq);
+ }
+ }
+
+ return 0;
+}
+
+/* Registers DTS irq to be visible by GIC */
+static int stm_register_irq(struct stm_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ sensor->irq = platform_get_irq(pdev, 0);
+ if (sensor->irq < 0) {
+ dev_err(dev, "%s: Unable to find IRQ\n", __func__);
+ return sensor->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, sensor->irq,
+ stm_thermal_alarm_irq,
+ stm_thermal_alarm_irq_thread,
+ IRQF_ONESHOT,
+ dev->driver->name, sensor);
+ if (ret) {
+ dev_err(dev, "%s: Failed to register IRQ %d\n", __func__,
+ sensor->irq);
+ return ret;
+ }
+
+ sensor->irq_enabled = true;
+
+ dev_dbg(dev, "%s: thermal IRQ registered", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(sensor->clk);
+
+ return 0;
+}
+
+static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ struct device *dev = sensor->dev;
+
+ ret = clk_prepare_enable(sensor->clk);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_calibration(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ /* Set threshold(s) for IRQ */
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(dev, "%s: failed to power on sensor\n", __func__);
+ goto irq_disable;
+ }
+
+ return 0;
+
+irq_disable:
+ stm_disable_irq(sensor);
+
+thermal_unprepare:
+ clk_disable_unprepare(sensor->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm_thermal_suspend(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_sensor_off(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+static int stm_thermal_resume(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_prepare(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
+
+static const struct thermal_zone_of_device_ops stm_tz_ops = {
+ .get_temp = stm_thermal_get_temp,
+};
+
+static const struct of_device_id stm_thermal_of_match[] = {
+ { .compatible = "st,stm32-thermal"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stm_thermal_of_match);
+
+static int stm_thermal_probe(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor;
+ struct resource *res;
+ const struct thermal_trip *trip;
+ void __iomem *base;
+ int ret, i;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "%s: device tree node not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sensor);
+
+ sensor->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* Populate sensor */
+ sensor->base = base;
+
+ ret = stm_thermal_read_factory_settings(sensor);
+ if (ret)
+ return ret;
+
+ sensor->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(sensor->clk)) {
+ dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
+ __func__);
+ return PTR_ERR(sensor->clk);
+ }
+
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
+ return ret;
+
+ sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ sensor,
+ &stm_tz_ops);
+
+ if (IS_ERR(sensor->th_dev)) {
+ dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
+ __func__);
+ ret = PTR_ERR(sensor->th_dev);
+ return ret;
+ }
+
+ if (!sensor->th_dev->ops->get_crit_temp) {
+ /* Critical point must be provided */
+ ret = -EINVAL;
+ goto err_tz;
+ }
+
+ ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
+ &sensor->temp_critical);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to read critical_temp: %d\n", ret);
+ goto err_tz;
+ }
+
+ sensor->temp_critical = celsius(sensor->temp_critical);
+
+ /* Set thresholds for IRQ */
+ sensor->high_temp = sensor->temp_critical;
+
+ trip = of_thermal_get_trip_points(sensor->th_dev);
+ sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
+
+ /* Find out passive temperature if it exists */
+ for (i = (sensor->num_trips - 1); i >= 0; i--) {
+ if (trip[i].type == THERMAL_TRIP_PASSIVE) {
+ sensor->temp_passive = celsius(trip[i].temperature);
+ /* Update high temperature threshold */
+ sensor->high_temp = sensor->temp_passive;
+ }
+ }
+
+ /*
+ * Ensure low_temp_enabled flag is disabled.
+ * By disabling low_temp_enabled, low threshold IT will not be
+ * configured neither enabled because it is not needed as high
+ * threshold is set on the lowest temperature trip point after
+ * probe.
+ */
+ sensor->low_temp_enabled = false;
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to enable sensor: %d\n", ret);
+ goto err_tz;
+ }
+
+ /*
+ * Thermal_zone doesn't enable hwmon as default,
+ * enable it here
+ */
+ sensor->th_dev->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(sensor->th_dev);
+ if (ret)
+ goto err_tz;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
+ __func__);
+
+ return 0;
+
+err_tz:
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+ return ret;
+}
+
+static int stm_thermal_remove(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ stm_thermal_sensor_off(sensor);
+ thermal_remove_hwmon_sysfs(sensor->th_dev);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm_thermal_driver = {
+ .driver = {
+ .name = "stm_thermal",
+ .pm = &stm_thermal_pm_ops,
+ .of_match_table = stm_thermal_of_match,
+ },
+ .probe = stm_thermal_probe,
+ .remove = stm_thermal_remove,
+};
+module_platform_driver(stm_thermal_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 Thermal Sensor Driver");
+MODULE_AUTHOR("David Hernandez Sanchez <david.hernandezsanchez@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm_thermal");
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index f103665cad43..40182ed85648 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h>
-#include <asm/io.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
@@ -401,7 +400,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
#endif /* CONFIG_PMAC_BACKLIGHT */
#ifdef CONFIG_PPC
- p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE);
+ p->screen_base = ioremap_wc(addr, 0x200000);
#else
p->screen_base = ioremap(addr, 0x200000);
#endif
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 8d14b29aafea..9cb0ef7ac29e 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -48,9 +48,7 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/btext.h>
#include "macmodes.h"
@@ -715,8 +713,7 @@ static int __init control_of_init(struct device_node *dp)
goto error_out;
}
/* map at most 8MB for the frame buffer */
- p->frame_buffer = __ioremap(p->frame_buffer_phys, 0x800000,
- _PAGE_WRITETHRU);
+ p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
if (!p->control_regs_phys ||
!request_mem_region(p->control_regs_phys, p->control_regs_size,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index bc9eb8afc313..332a56b6811f 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1925,7 +1925,7 @@ static int __init fsl_diu_init(void)
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
- np = of_find_node_by_type(NULL, "cpu");
+ np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("fsl-diu-fb: can't find 'cpu' device node\n");
return -ENODEV;
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 377d3399a3ad..bf6b7fb83cf4 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -32,9 +32,7 @@
#include <linux/nvram.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "platinumfb.h"
@@ -577,8 +575,7 @@ static int platinumfb_probe(struct platform_device* odev)
/* frame buffer - map only 4MB */
pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
- pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000,
- _PAGE_WRITETHRU);
+ pinfo->frame_buffer = ioremap_wt(pinfo->rsrc_fb.start, 0x400000);
pinfo->base_frame_buffer = pinfo->frame_buffer;
/* registers */
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 275fb98236d3..d51c3a8009cb 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -54,13 +54,11 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#ifdef CONFIG_MAC
#include <asm/macintosh.h>
#else
#include <asm/prom.h>
#endif
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "valkyriefb.h"
@@ -318,7 +316,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
- unsigned long frame_buffer_phys, cmap_regs_phys, flags;
+ unsigned long frame_buffer_phys, cmap_regs_phys;
int err;
char *option = NULL;
@@ -337,7 +335,6 @@ int __init valkyriefb_init(void)
/* Hardcoded addresses... welcome to 68k Macintosh country :-) */
frame_buffer_phys = 0xf9000000;
cmap_regs_phys = 0x50f24000;
- flags = IOMAP_NOCACHE_SER; /* IOMAP_WRITETHROUGH?? */
#else /* ppc (!CONFIG_MAC) */
{
struct device_node *dp;
@@ -354,7 +351,6 @@ int __init valkyriefb_init(void)
frame_buffer_phys = r.start;
cmap_regs_phys = r.start + 0x304000;
- flags = _PAGE_WRITETHRU;
}
#endif /* ppc (!CONFIG_MAC) */
@@ -369,7 +365,11 @@ int __init valkyriefb_init(void)
}
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
- p->frame_buffer = __ioremap(frame_buffer_phys, p->total_vram, flags);
+#ifdef CONFIG_MAC
+ p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+#else
+ p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
+#endif
p->cmap_regs_phys = cmap_regs_phys;
p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
p->valkyrie_regs_phys = cmap_regs_phys+0x6000;