diff options
Diffstat (limited to 'drivers')
337 files changed, 12013 insertions, 4836 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 71511ae2dfcd..ba2612e9a0eb 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -161,6 +161,11 @@ static const struct apd_device_desc hip08_i2c_desc = { .fixed_clk_rate = 250000000, }; +static const struct apd_device_desc hip08_lite_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +}; + static const struct apd_device_desc thunderx2_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 125000000, @@ -243,6 +248,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, { "NXP0001", APD_ADDR(nxp_i2c_desc) }, #endif diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 5995c437cbdf..c9017e0584c0 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -67,10 +67,10 @@ static struct file_system_type internal_fs_type = { .name = "devtmpfs", #ifdef CONFIG_TMPFS .init_fs_context = shmem_init_fs_context, - .parameters = &shmem_fs_parameters, + .parameters = shmem_fs_parameters, #else .init_fs_context = ramfs_init_fs_context, - .parameters = &ramfs_fs_parameters, + .parameters = ramfs_fs_parameters, #endif .kill_sb = kill_litter_super, }; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 8e5725b11ee8..959d6d5eb000 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2303,6 +2303,44 @@ out: EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); /** + * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. + * @parent_spec: OF phandle args to use for parent PM domain look-up + * @subdomain_spec: OF phandle args to use for subdomain look-up + * + * Looks-up a parent PM domain and subdomain based upon phandle args + * provided and removes the subdomain from the parent PM domain. Returns a + * negative error code on failure. + */ +int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) +{ + struct generic_pm_domain *parent, *subdomain; + int ret; + + mutex_lock(&gpd_list_lock); + + parent = genpd_get_from_provider(parent_spec); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + goto out; + } + + subdomain = genpd_get_from_provider(subdomain_spec); + if (IS_ERR(subdomain)) { + ret = PTR_ERR(subdomain); + goto out; + } + + ret = pm_genpd_remove_subdomain(parent, subdomain); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); + +/** * of_genpd_remove_last - Remove the last PM domain registered for a provider * @provider: Pointer to device structure associated with provider * diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 1bb8ec575352..025b1b77b11a 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -432,16 +432,6 @@ config VIRTIO_BLK This is the virtual block driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). Say Y or M. -config VIRTIO_BLK_SCSI - bool "SCSI passthrough request for the Virtio block driver" - depends on VIRTIO_BLK - select BLK_SCSI_REQUEST - ---help--- - Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on - virtio-blk devices. This is only supported for the legacy - virtio protocol and not enabled by default by any hypervisor. - You probably want to use virtio-scsi instead. - config BLK_DEV_RBD tristate "Rados block device (RBD)" depends on INET && BLOCK diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 405b66e09040..6343402c09e6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -848,7 +848,7 @@ enum { Opt_notrim, }; -static const struct fs_parameter_spec rbd_param_specs[] = { +static const struct fs_parameter_spec rbd_parameters[] = { fsparam_u32 ("alloc_size", Opt_alloc_size), fsparam_flag ("exclusive", Opt_exclusive), fsparam_flag ("lock_on_read", Opt_lock_on_read), @@ -863,11 +863,6 @@ static const struct fs_parameter_spec rbd_param_specs[] = { {} }; -static const struct fs_parameter_description rbd_parameters = { - .name = "rbd", - .specs = rbd_param_specs, -}; - struct rbd_options { int queue_depth; int alloc_size; @@ -6353,19 +6348,19 @@ static int rbd_parse_param(struct fs_parameter *param, { struct rbd_options *opt = pctx->opts; struct fs_parse_result result; + struct p_log log = {.prefix = "rbd"}; int token, ret; ret = ceph_parse_param(param, pctx->copts, NULL); if (ret != -ENOPARAM) return ret; - token = fs_parse(NULL, &rbd_parameters, param, &result); + token = __fs_parse(&log, rbd_parameters, param, &result); dout("%s fs_parse '%s' token %d\n", __func__, param->key, token); if (token < 0) { - if (token == -ENOPARAM) { - return invalf(NULL, "rbd: Unknown parameter '%s'", - param->key); - } + if (token == -ENOPARAM) + return inval_plog(&log, "Unknown parameter '%s'", + param->key); return token; } @@ -6378,9 +6373,8 @@ static int rbd_parse_param(struct fs_parameter *param, case Opt_alloc_size: if (result.uint_32 < SECTOR_SIZE) goto out_of_range; - if (!is_power_of_2(result.uint_32)) { - return invalf(NULL, "rbd: alloc_size must be a power of 2"); - } + if (!is_power_of_2(result.uint_32)) + return inval_plog(&log, "alloc_size must be a power of 2"); opt->alloc_size = result.uint_32; break; case Opt_lock_timeout: @@ -6416,7 +6410,7 @@ static int rbd_parse_param(struct fs_parameter *param, return 0; out_of_range: - return invalf(NULL, "rbd: %s out of range", param->key); + return inval_plog(&log, "%s out of range", param->key); } /* @@ -6433,7 +6427,7 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx) if (*key) { struct fs_parameter param = { .key = key, - .type = fs_value_is_string, + .type = fs_value_is_flag, }; char *value = strchr(key, '='); size_t v_len = 0; @@ -6443,14 +6437,11 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx) continue; *value++ = 0; v_len = strlen(value); - } - - - if (v_len > 0) { param.string = kmemdup_nul(value, v_len, GFP_KERNEL); if (!param.string) return -ENOMEM; + param.type = fs_value_is_string; } param.size = v_len; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index fbbf18ac1d5d..54158766334b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -11,7 +11,6 @@ #include <linux/virtio_blk.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> -#include <scsi/scsi_cmnd.h> #include <linux/idr.h> #include <linux/blk-mq.h> #include <linux/blk-mq-virtio.h> @@ -56,11 +55,6 @@ struct virtio_blk { }; struct virtblk_req { -#ifdef CONFIG_VIRTIO_BLK_SCSI - struct scsi_request sreq; /* for SCSI passthrough, must be first */ - u8 sense[SCSI_SENSE_BUFFERSIZE]; - struct virtio_scsi_inhdr in_hdr; -#endif struct virtio_blk_outhdr out_hdr; u8 status; struct scatterlist sg[]; @@ -78,80 +72,6 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr) } } -/* - * If this is a packet command we need a couple of additional headers. Behind - * the normal outhdr we put a segment with the scsi command block, and before - * the normal inhdr we put the sense data and the inhdr with additional status - * information. - */ -#ifdef CONFIG_VIRTIO_BLK_SCSI -static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr, - struct scatterlist *data_sg, bool have_data) -{ - struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; - unsigned int num_out = 0, num_in = 0; - - sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); - sgs[num_out++] = &hdr; - sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len); - sgs[num_out++] = &cmd; - - if (have_data) { - if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) - sgs[num_out++] = data_sg; - else - sgs[num_out + num_in++] = data_sg; - } - - sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); - sgs[num_out + num_in++] = &sense; - sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); - sgs[num_out + num_in++] = &inhdr; - sg_init_one(&status, &vbr->status, sizeof(vbr->status)); - sgs[num_out + num_in++] = &status; - - return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); -} - -static inline void virtblk_scsi_request_done(struct request *req) -{ - struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); - struct virtio_blk *vblk = req->q->queuedata; - struct scsi_request *sreq = &vbr->sreq; - - sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); - sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); - sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); -} - -static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long data) -{ - struct gendisk *disk = bdev->bd_disk; - struct virtio_blk *vblk = disk->private_data; - - /* - * Only allow the generic SCSI ioctls if the host can support it. - */ - if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) - return -ENOTTY; - - return scsi_cmd_blk_ioctl(bdev, mode, cmd, - (void __user *)data); -} -#else -static inline int virtblk_add_req_scsi(struct virtqueue *vq, - struct virtblk_req *vbr, struct scatterlist *data_sg, - bool have_data) -{ - return -EIO; -} -static inline void virtblk_scsi_request_done(struct request *req) -{ -} -#define virtblk_ioctl NULL -#endif /* CONFIG_VIRTIO_BLK_SCSI */ - static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, struct scatterlist *data_sg, bool have_data) { @@ -216,13 +136,6 @@ static inline void virtblk_request_done(struct request *req) req->special_vec.bv_offset); } - switch (req_op(req)) { - case REQ_OP_SCSI_IN: - case REQ_OP_SCSI_OUT: - virtblk_scsi_request_done(req); - break; - } - blk_mq_end_request(req, virtblk_result(vbr)); } @@ -299,10 +212,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, type = VIRTIO_BLK_T_WRITE_ZEROES; unmap = !(req->cmd_flags & REQ_NOUNMAP); break; - case REQ_OP_SCSI_IN: - case REQ_OP_SCSI_OUT: - type = VIRTIO_BLK_T_SCSI_CMD; - break; case REQ_OP_DRV_IN: type = VIRTIO_BLK_T_GET_ID; break; @@ -333,10 +242,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, } spin_lock_irqsave(&vblk->vqs[qid].lock, flags); - if (blk_rq_is_scsi(req)) - err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); - else - err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); + err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); blk_mq_stop_hw_queue(hctx); @@ -404,10 +310,6 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) } static const struct block_device_operations virtblk_fops = { - .ioctl = virtblk_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = blkdev_compat_ptr_ioctl, -#endif .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; @@ -686,9 +588,6 @@ static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, struct virtio_blk *vblk = set->driver_data; struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); -#ifdef CONFIG_VIRTIO_BLK_SCSI - vbr->sreq.sense = vbr->sense; -#endif sg_init_table(vbr->sg, vblk->sg_elems); return 0; } @@ -701,23 +600,11 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set) vblk->vdev, 0); } -#ifdef CONFIG_VIRTIO_BLK_SCSI -static void virtblk_initialize_rq(struct request *req) -{ - struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); - - scsi_req_init(&vbr->sreq); -} -#endif - static const struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .commit_rqs = virtio_commit_rqs, .complete = virtblk_request_done, .init_request = virtblk_init_request, -#ifdef CONFIG_VIRTIO_BLK_SCSI - .initialize_rq_fn = virtblk_initialize_rq, -#endif .map_queues = virtblk_map_queues, }; @@ -994,9 +881,6 @@ static const struct virtio_device_id id_table[] = { static unsigned int features_legacy[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, -#ifdef CONFIG_VIRTIO_BLK_SCSI - VIRTIO_BLK_F_SCSI, -#endif VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, } diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 50200d1c06ea..6095b6df8a81 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -139,7 +139,6 @@ config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" depends on ARCH_TEGRA_210_SOC depends on OF && PM - select PM_CLK help Driver for the Tegra ACONNECT bus which is used to interface with the devices inside the Audio Processing Engine (APE) for Tegra210. diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 36cf13eee6b8..15fa293819a0 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -102,12 +102,11 @@ static int moxtet_match(struct device *dev, struct device_driver *drv) return 0; } -struct bus_type moxtet_bus_type = { +static struct bus_type moxtet_bus_type = { .name = "moxtet", .dev_groups = moxtet_dev_groups, .match = moxtet_match, }; -EXPORT_SYMBOL_GPL(moxtet_bus_type); int __moxtet_register_driver(struct module *owner, struct moxtet_driver *mdrv) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index ccb44fe790a7..f702c85c81b6 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -479,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata) { struct ti_sysc_platform_data *pdata; - if (ddata->legacy_mode) + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) return; pdata = dev_get_platdata(ddata->dev); @@ -491,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata) { struct ti_sysc_platform_data *pdata; - if (ddata->legacy_mode) + if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO)) return; pdata = dev_get_platdata(ddata->dev); @@ -509,10 +509,8 @@ static int sysc_init_resets(struct sysc *ddata) { ddata->rsts = devm_reset_control_get_optional_shared(ddata->dev, "rstctrl"); - if (IS_ERR(ddata->rsts)) - return PTR_ERR(ddata->rsts); - return 0; + return PTR_ERR_OR_ZERO(ddata->rsts); } /** @@ -1216,10 +1214,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff, SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), - SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000020, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000030, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff, @@ -1251,6 +1245,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { /* Quirks that need to be set based on detected module */ SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, SYSC_MODULE_QUIRK_AESS), + SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, diff --git a/drivers/clk/clk-plldig.c b/drivers/clk/clk-plldig.c index 312b8312d503..25020164b89e 100644 --- a/drivers/clk/clk-plldig.c +++ b/drivers/clk/clk-plldig.c @@ -187,7 +187,7 @@ static int plldig_init(struct clk_hw *hw) { struct clk_plldig *data = to_clk_plldig(hw); struct clk_hw *parent = clk_hw_get_parent(hw); - unsigned long parent_rate = clk_hw_get_rate(parent); + unsigned long parent_rate; unsigned long val; unsigned long long lltmp; unsigned int mfd, fracdiv = 0; @@ -195,6 +195,8 @@ static int plldig_init(struct clk_hw *hw) if (!parent) return -EINVAL; + parent_rate = clk_hw_get_rate(parent); + if (data->vco_freq) { mfd = data->vco_freq / parent_rate; lltmp = data->vco_freq % parent_rate; diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 886f7c5df51a..c491f5de0f3f 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -176,7 +176,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_CLOCK }, + { SCMI_PROTOCOL_CLOCK, "clocks" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index b4a95cbbda98..6e71591e63a0 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -53,6 +53,8 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 +#define APMU_USBHSIC0 0xf8 +#define APMU_USBHSIC1 0xfc #define MPMU_UART_PLL 0x14 struct mmp2_clk_unit { @@ -194,6 +196,8 @@ static struct mmp_clk_mix_config sdh_mix_config = { }; static DEFINE_SPINLOCK(usb_lock); +static DEFINE_SPINLOCK(usbhsic0_lock); +static DEFINE_SPINLOCK(usbhsic1_lock); static DEFINE_SPINLOCK(disp0_lock); static DEFINE_SPINLOCK(disp1_lock); @@ -224,6 +228,8 @@ static struct mmp_param_div_clk apmu_div_clks[] = { static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock}, + {MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock}, + {MMP2_CLK_USBHSIC1, "usbhsic1_clk", "usb_pll", 0, APMU_USBHSIC1, 0x1b, 0x1b, 0x0, 0, &usbhsic1_lock}, /* The gate clocks has mux parent. */ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index da045b200def..357159fe85b5 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -218,6 +218,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, clk_flags = clk_hw_get_flags(hw); p = clk_hw_get_parent_by_index(hw, index); + if (!p) + return -EINVAL; + if (clk_flags & CLK_SET_RATE_PARENT) { rate = f->freq; if (f->pre_div) { @@ -953,7 +956,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_hw *p; unsigned long prate = 0; - u32 val, mask, cfg, mode; + u32 val, mask, cfg, mode, src; int i, num_parents; regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); @@ -963,12 +966,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, if (cfg & mask) f->pre_div = cfg & mask; - cfg &= CFG_SRC_SEL_MASK; - cfg >>= CFG_SRC_SEL_SHIFT; + src = cfg & CFG_SRC_SEL_MASK; + src >>= CFG_SRC_SEL_SHIFT; num_parents = clk_hw_get_num_parents(hw); for (i = 0; i < num_parents; i++) { - if (cfg == rcg->parent_map[i].cfg) { + if (src == rcg->parent_map[i].cfg) { f->src = rcg->parent_map[i].src; p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); prate = clk_hw_get_rate(p); diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index 30c1e25d3edb..dd7af41e47eb 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -76,40 +76,32 @@ static struct clk_alpha_pll_postdiv disp_cc_pll0_out_even = { static const struct parent_map disp_cc_parent_map_0[] = { { P_BI_TCXO, 0 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_0[] = { { .fw_name = "bi_tcxo" }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, }; static const struct parent_map disp_cc_parent_map_1[] = { { P_BI_TCXO, 0 }, { P_DP_PHY_PLL_LINK_CLK, 1 }, { P_DP_PHY_PLL_VCO_DIV_CLK, 2 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_1[] = { { .fw_name = "bi_tcxo" }, - { .fw_name = "dp_phy_pll_link_clk", .name = "dp_phy_pll_link_clk" }, - { .fw_name = "dp_phy_pll_vco_div_clk", - .name = "dp_phy_pll_vco_div_clk"}, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, + { .fw_name = "dp_phy_pll_link_clk" }, + { .fw_name = "dp_phy_pll_vco_div_clk" }, }; static const struct parent_map disp_cc_parent_map_2[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_2[] = { { .fw_name = "bi_tcxo" }, - { .fw_name = "dsi0_phy_pll_out_byteclk", - .name = "dsi0_phy_pll_out_byteclk" }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, + { .fw_name = "dsi0_phy_pll_out_byteclk" }, }; static const struct parent_map disp_cc_parent_map_3[] = { @@ -117,7 +109,6 @@ static const struct parent_map disp_cc_parent_map_3[] = { { P_DISP_CC_PLL0_OUT_MAIN, 1 }, { P_GPLL0_OUT_MAIN, 4 }, { P_DISP_CC_PLL0_OUT_EVEN, 5 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_3[] = { @@ -125,32 +116,26 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = { { .hw = &disp_cc_pll0.clkr.hw }, { .fw_name = "gcc_disp_gpll0_clk_src" }, { .hw = &disp_cc_pll0_out_even.clkr.hw }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, }; static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 4 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_4[] = { { .fw_name = "bi_tcxo" }, { .fw_name = "gcc_disp_gpll0_clk_src" }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, }; static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data disp_cc_parent_data_5[] = { { .fw_name = "bi_tcxo" }, - { .fw_name = "dsi0_phy_pll_out_dsiclk", - .name = "dsi0_phy_pll_out_dsiclk" }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, + { .fw_name = "dsi0_phy_pll_out_dsiclk" }, }; static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { @@ -169,7 +154,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_ahb_clk_src", .parent_data = disp_cc_parent_data_4, - .num_parents = 3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, @@ -183,7 +168,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_byte0_clk_src", .parent_data = disp_cc_parent_data_2, - .num_parents = 3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, @@ -203,7 +188,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_aux_clk_src", .parent_data = disp_cc_parent_data_0, - .num_parents = 2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .ops = &clk_rcg2_ops, }, }; @@ -216,7 +201,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_crypto_clk_src", .parent_data = disp_cc_parent_data_1, - .num_parents = 4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, @@ -230,7 +215,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_link_clk_src", .parent_data = disp_cc_parent_data_1, - .num_parents = 4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, @@ -244,7 +229,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_pixel_clk_src", .parent_data = disp_cc_parent_data_1, - .num_parents = 4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, @@ -259,7 +244,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_esc0_clk_src", .parent_data = disp_cc_parent_data_2, - .num_parents = 3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .ops = &clk_rcg2_ops, }, }; @@ -282,7 +267,7 @@ static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_mdp_clk_src", .parent_data = disp_cc_parent_data_3, - .num_parents = 5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .ops = &clk_rcg2_shared_ops, }, }; @@ -295,7 +280,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_parent_data_5, - .num_parents = 3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, @@ -310,7 +295,7 @@ static struct clk_rcg2 disp_cc_mdss_rot_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_rot_clk_src", .parent_data = disp_cc_parent_data_3, - .num_parents = 5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .ops = &clk_rcg2_shared_ops, }, }; @@ -324,7 +309,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_vsync_clk_src", .parent_data = disp_cc_parent_data_0, - .num_parents = 2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c index ec61194cceaf..a96c0b945de2 100644 --- a/drivers/clk/qcom/gpucc-sc7180.c +++ b/drivers/clk/qcom/gpucc-sc7180.c @@ -60,7 +60,6 @@ static const struct parent_map gpu_cc_parent_map_0[] = { { P_GPU_CC_PLL1_OUT_MAIN, 3 }, { P_GPLL0_OUT_MAIN, 5 }, { P_GPLL0_OUT_MAIN_DIV, 6 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data gpu_cc_parent_data_0[] = { @@ -68,7 +67,6 @@ static const struct clk_parent_data gpu_cc_parent_data_0[] = { { .hw = &gpu_cc_pll1.clkr.hw }, { .fw_name = "gcc_gpu_gpll0_clk_src" }, { .fw_name = "gcc_gpu_gpll0_div_clk_src" }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, }; static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { @@ -86,7 +84,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpu_cc_gmu_clk_src", .parent_data = gpu_cc_parent_data_0, - .num_parents = 5, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c index 76add30024aa..c363c3cc544e 100644 --- a/drivers/clk/qcom/videocc-sc7180.c +++ b/drivers/clk/qcom/videocc-sc7180.c @@ -50,13 +50,11 @@ static struct clk_alpha_pll video_pll0 = { static const struct parent_map video_cc_parent_map_1[] = { { P_BI_TCXO, 0 }, { P_VIDEO_PLL0_OUT_MAIN, 1 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const struct clk_parent_data video_cc_parent_data_1[] = { { .fw_name = "bi_tcxo" }, { .hw = &video_pll0.clkr.hw }, - { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" }, }; static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = { @@ -78,7 +76,7 @@ static struct clk_rcg2 video_cc_venus_clk_src = { .clkr.hw.init = &(struct clk_init_data){ .name = "video_cc_venus_clk_src", .parent_data = video_cc_parent_data_1, - .num_parents = 3, + .num_parents = ARRAY_SIZE(video_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 2b4dab632318..312a20f8ec0e 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -604,6 +604,18 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons { 0 }, }; +static const struct +omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = { + { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { 0 }, +}; + static const struct omap_clkctrl_bit_data omap4_gpio1_bit_data[] __initconst = { { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL }, { 0 }, @@ -691,6 +703,7 @@ const struct omap_clkctrl_data omap4_clkctrl_data[] __initconst = { { 0x4a009220, omap4_l3_gfx_clkctrl_regs }, { 0x4a009320, omap4_l3_init_clkctrl_regs }, { 0x4a009420, omap4_l4_per_clkctrl_regs }, + { 0x4a0095a0, omap4_l4_secure_clkctrl_regs }, { 0x4a307820, omap4_l4_wkup_clkctrl_regs }, { 0x4a307a20, omap4_emu_sys_clkctrl_regs }, { 0 }, diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 14d98a890c02..92bf2dda95b9 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -301,6 +301,18 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst { 0 }, }; +static const struct +omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = { + { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" }, + { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" }, + { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" }, + { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" }, + { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { 0 }, +}; + static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = { { OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" }, { OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" }, @@ -523,6 +535,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = { { 0x4a008d20, omap5_l4cfg_clkctrl_regs }, { 0x4a008e20, omap5_l3instr_clkctrl_regs }, { 0x4a009020, omap5_l4per_clkctrl_regs }, + { 0x4a0091a0, omap5_l4_secure_clkctrl_regs }, { 0x4a009220, omap5_iva_clkctrl_regs }, { 0x4a009420, omap5_dss_clkctrl_regs }, { 0x4a009520, omap5_gpu_clkctrl_regs }, diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c index 62745c962049..e421946a91c5 100644 --- a/drivers/clocksource/timer-davinci.c +++ b/drivers/clocksource/timer-davinci.c @@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk, return rv; } - clockevents_config_and_register(&clockevent->dev, tick_rate, - DAVINCI_TIMER_MIN_DELTA, - DAVINCI_TIMER_MAX_DELTA); - davinci_clocksource.dev.rating = 300; davinci_clocksource.dev.read = davinci_clocksource_read; davinci_clocksource.dev.mask = @@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk, davinci_clocksource_init_tim34(base); } + clockevents_config_and_register(&clockevent->dev, tick_rate, + DAVINCI_TIMER_MIN_DELTA, + DAVINCI_TIMER_MAX_DELTA); + rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate); if (rv) { pr_err("Unable to register clocksource"); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index e6182c89df79..61623e2ff149 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -261,7 +261,7 @@ static void scmi_cpufreq_remove(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_PERF }, + { SCMI_PROTOCOL_PERF, "cpufreq" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index ee70d5cc5b99..cc8c769d7fa9 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -21,7 +21,9 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o -obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o +cpuidle_psci-y := cpuidle-psci.o +cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o ############################################################################### # MIPS drivers diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c new file mode 100644 index 000000000000..423f03bbeb74 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PM domains for CPUs via genpd - managed by cpuidle-psci. + * + * Copyright (C) 2019 Linaro Ltd. + * Author: Ulf Hansson <ulf.hansson@linaro.org> + * + */ + +#define pr_fmt(fmt) "CPUidle PSCI: " fmt + +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/psci.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "cpuidle-psci.h" + +struct psci_pd_provider { + struct list_head link; + struct device_node *node; +}; + +static LIST_HEAD(psci_pd_providers); +static bool osi_mode_enabled __initdata; + +static int psci_pd_power_off(struct generic_pm_domain *pd) +{ + struct genpd_power_state *state = &pd->states[pd->state_idx]; + u32 *pd_state; + + if (!state->data) + return 0; + + /* OSI mode is enabled, set the corresponding domain state. */ + pd_state = state->data; + psci_set_domain_state(*pd_state); + + return 0; +} + +static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states, + int state_count) +{ + int i, ret; + u32 psci_state, *psci_state_buf; + + for (i = 0; i < state_count; i++) { + ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), + &psci_state); + if (ret) + goto free_state; + + psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL); + if (!psci_state_buf) { + ret = -ENOMEM; + goto free_state; + } + *psci_state_buf = psci_state; + states[i].data = psci_state_buf; + } + + return 0; + +free_state: + i--; + for (; i >= 0; i--) + kfree(states[i].data); + return ret; +} + +static int __init psci_pd_parse_states(struct device_node *np, + struct genpd_power_state **states, int *state_count) +{ + int ret; + + /* Parse the domain idle states. */ + ret = of_genpd_parse_idle_states(np, states, state_count); + if (ret) + return ret; + + /* Fill out the PSCI specifics for each found state. */ + ret = psci_pd_parse_state_nodes(*states, *state_count); + if (ret) + kfree(*states); + + return ret; +} + +static void psci_pd_free_states(struct genpd_power_state *states, + unsigned int state_count) +{ + int i; + + for (i = 0; i < state_count; i++) + kfree(states[i].data); + kfree(states); +} + +static int __init psci_pd_init(struct device_node *np) +{ + struct generic_pm_domain *pd; + struct psci_pd_provider *pd_provider; + struct dev_power_governor *pd_gov; + struct genpd_power_state *states = NULL; + int ret = -ENOMEM, state_count = 0; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + goto out; + + pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); + if (!pd_provider) + goto free_pd; + + pd->name = kasprintf(GFP_KERNEL, "%pOF", np); + if (!pd->name) + goto free_pd_prov; + + /* + * Parse the domain idle states and let genpd manage the state selection + * for those being compatible with "domain-idle-state". + */ + ret = psci_pd_parse_states(np, &states, &state_count); + if (ret) + goto free_name; + + pd->free_states = psci_pd_free_states; + pd->name = kbasename(pd->name); + pd->power_off = psci_pd_power_off; + pd->states = states; + pd->state_count = state_count; + pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; + + /* Use governor for CPU PM domains if it has some states to manage. */ + pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; + + ret = pm_genpd_init(pd, pd_gov, false); + if (ret) { + psci_pd_free_states(states, state_count); + goto free_name; + } + + ret = of_genpd_add_provider_simple(np, pd); + if (ret) + goto remove_pd; + + pd_provider->node = of_node_get(np); + list_add(&pd_provider->link, &psci_pd_providers); + + pr_debug("init PM domain %s\n", pd->name); + return 0; + +remove_pd: + pm_genpd_remove(pd); +free_name: + kfree(pd->name); +free_pd_prov: + kfree(pd_provider); +free_pd: + kfree(pd); +out: + pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); + return ret; +} + +static void __init psci_pd_remove(void) +{ + struct psci_pd_provider *pd_provider, *it; + struct generic_pm_domain *genpd; + + list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { + of_genpd_del_provider(pd_provider->node); + + genpd = of_genpd_remove_last(pd_provider->node); + if (!IS_ERR(genpd)) + kfree(genpd); + + of_node_put(pd_provider->node); + list_del(&pd_provider->link); + kfree(pd_provider); + } +} + +static int __init psci_pd_init_topology(struct device_node *np, bool add) +{ + struct device_node *node; + struct of_phandle_args child, parent; + int ret; + + for_each_child_of_node(np, node) { + if (of_parse_phandle_with_args(node, "power-domains", + "#power-domain-cells", 0, &parent)) + continue; + + child.np = node; + child.args_count = 0; + + ret = add ? of_genpd_add_subdomain(&parent, &child) : + of_genpd_remove_subdomain(&parent, &child); + of_node_put(parent.np); + if (ret) { + of_node_put(node); + return ret; + } + } + + return 0; +} + +static int __init psci_pd_add_topology(struct device_node *np) +{ + return psci_pd_init_topology(np, true); +} + +static void __init psci_pd_remove_topology(struct device_node *np) +{ + psci_pd_init_topology(np, false); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci-1.0" }, + {} +}; + +static int __init psci_idle_init_domains(void) +{ + struct device_node *np = of_find_matching_node(NULL, psci_of_match); + struct device_node *node; + int ret = 0, pd_count = 0; + + if (!np) + return -ENODEV; + + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (!psci_has_osi_support()) + goto out; + + /* + * Parse child nodes for the "#power-domain-cells" property and + * initialize a genpd/genpd-of-provider pair when it's found. + */ + for_each_child_of_node(np, node) { + if (!of_find_property(node, "#power-domain-cells", NULL)) + continue; + + ret = psci_pd_init(node); + if (ret) + goto put_node; + + pd_count++; + } + + /* Bail out if not using the hierarchical CPU topology. */ + if (!pd_count) + goto out; + + /* Link genpd masters/subdomains to model the CPU topology. */ + ret = psci_pd_add_topology(np); + if (ret) + goto remove_pd; + + /* Try to enable OSI mode. */ + ret = psci_set_osi_mode(); + if (ret) { + pr_warn("failed to enable OSI mode: %d\n", ret); + psci_pd_remove_topology(np); + goto remove_pd; + } + + osi_mode_enabled = true; + of_node_put(np); + pr_info("Initialized CPU PM domain topology\n"); + return pd_count; + +put_node: + of_node_put(node); +remove_pd: + if (pd_count) + psci_pd_remove(); + pr_err("failed to create CPU PM domains ret=%d\n", ret); +out: + of_node_put(np); + return ret; +} +subsys_initcall(psci_idle_init_domains); + +struct device __init *psci_dt_attach_cpu(int cpu) +{ + struct device *dev; + + if (!osi_mode_enabled) + return NULL; + + dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); + if (IS_ERR_OR_NULL(dev)) + return dev; + + pm_runtime_irq_safe(dev); + if (cpu_online(cpu)) + pm_runtime_get_sync(dev); + + return dev; +} diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index f3c1a2396f98..edd7a54ef0d3 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "CPUidle PSCI: " fmt +#include <linux/cpuhotplug.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> @@ -16,21 +17,107 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/psci.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <asm/cpuidle.h> +#include "cpuidle-psci.h" #include "dt_idle_states.h" -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); +struct psci_cpuidle_data { + u32 *psci_states; + struct device *dev; +}; + +static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); +static DEFINE_PER_CPU(u32, domain_state); +static bool psci_cpuidle_use_cpuhp __initdata; + +void psci_set_domain_state(u32 state) +{ + __this_cpu_write(domain_state, state); +} + +static inline u32 psci_get_domain_state(void) +{ + return __this_cpu_read(domain_state); +} + +static inline int psci_enter_state(int idx, u32 state) +{ + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state); +} + +static int psci_enter_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); + u32 *states = data->psci_states; + struct device *pd_dev = data->dev; + u32 state; + int ret; + + /* Do runtime PM to manage a hierarchical CPU toplogy. */ + pm_runtime_put_sync_suspend(pd_dev); + + state = psci_get_domain_state(); + if (!state) + state = states[idx]; + + ret = psci_enter_state(idx, state); + + pm_runtime_get_sync(pd_dev); + + /* Clear the domain state to start fresh when back from idle. */ + psci_set_domain_state(0); + return ret; +} + +static int psci_idle_cpuhp_up(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); + + if (pd_dev) + pm_runtime_get_sync(pd_dev); + + return 0; +} + +static int psci_idle_cpuhp_down(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); + + if (pd_dev) { + pm_runtime_put_sync(pd_dev); + /* Clear domain state to start fresh at next online. */ + psci_set_domain_state(0); + } + + return 0; +} + +static void __init psci_idle_init_cpuhp(void) +{ + int err; + + if (!psci_cpuidle_use_cpuhp) + return; + + err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, + "cpuidle/psci:online", + psci_idle_cpuhp_up, + psci_idle_cpuhp_down); + if (err) + pr_warn("Failed %d while setup cpuhp state\n", err); +} static int psci_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { - u32 *state = __this_cpu_read(psci_power_state); + u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); - return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, - idx, state[idx - 1]); + return psci_enter_state(idx, state[idx]); } static struct cpuidle_driver psci_idle_driver __initdata = { @@ -56,7 +143,7 @@ static const struct of_device_id psci_idle_state_match[] __initconst = { { }, }; -static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) +int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "arm,psci-suspend-param", state); @@ -73,28 +160,25 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) return 0; } -static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, + struct device_node *cpu_node, + unsigned int state_count, int cpu) { - int i, ret = 0, count = 0; + int i, ret = 0; u32 *psci_states; struct device_node *state_node; + struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + state_count++; /* Add WFI state too */ + psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL); if (!psci_states) return -ENOMEM; - for (i = 0; i < count; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + for (i = 1; i < state_count; i++) { + state_node = of_get_cpu_state_node(cpu_node, i - 1); + if (!state_node) + break; + ret = psci_dt_parse_state_node(state_node, &psci_states[i]); of_node_put(state_node); @@ -104,8 +188,33 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; + if (i != state_count) { + ret = -ENODEV; + goto free_mem; + } + + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (psci_has_osi_support()) { + data->dev = psci_dt_attach_cpu(cpu); + if (IS_ERR(data->dev)) { + ret = PTR_ERR(data->dev); + goto free_mem; + } + + /* + * Using the deepest state for the CPU to trigger a potential + * selection of a shared state for the domain, assumes the + * domain states are all deeper states. + */ + if (data->dev) { + drv->states[state_count - 1].enter = + psci_enter_domain_idle_state; + psci_cpuidle_use_cpuhp = true; + } + } + + /* Idle states parsed correctly, store them in the per-cpu struct. */ + data->psci_states = psci_states; return 0; free_mem: @@ -113,7 +222,8 @@ free_mem: return ret; } -static __init int psci_cpu_init_idle(unsigned int cpu) +static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, + unsigned int cpu, unsigned int state_count) { struct device_node *cpu_node; int ret; @@ -129,7 +239,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu) if (!cpu_node) return -ENODEV; - ret = psci_dt_cpu_init_idle(cpu_node, cpu); + ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu); of_node_put(cpu_node); @@ -185,7 +295,7 @@ static int __init psci_idle_init_cpu(int cpu) /* * Initialize PSCI idle states. */ - ret = psci_cpu_init_idle(cpu); + ret = psci_cpu_init_idle(drv, cpu, ret); if (ret) { pr_err("CPU %d failed to PSCI idle\n", cpu); goto out_kfree_drv; @@ -221,6 +331,7 @@ static int __init psci_idle_init(void) goto out_fail; } + psci_idle_init_cpuhp(); return 0; out_fail: diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h new file mode 100644 index 000000000000..7299a04dd467 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __CPUIDLE_PSCI_H +#define __CPUIDLE_PSCI_H + +struct device_node; + +void psci_set_domain_state(u32 state); +int __init psci_dt_parse_state_node(struct device_node *np, u32 *state); + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +struct device __init *psci_dt_attach_cpu(int cpu); +#else +static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; } +#endif + +#endif /* __CPUIDLE_PSCI_H */ diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index d06d21a9525d..252f2a9686a6 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -111,8 +111,7 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx, for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { cpu_node = of_cpu_device_node_get(cpu); - curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - idx); + curr_state_node = of_get_cpu_state_node(cpu_node, idx); if (state_node != curr_state_node) valid = false; @@ -170,7 +169,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); for (i = 0; ; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + state_node = of_get_cpu_state_node(cpu_node, i); if (!state_node) break; diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 6b6ba238b81a..a014ab96e673 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -2,6 +2,7 @@ /* * OMAP DMAengine support */ +#include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -23,12 +24,33 @@ #define OMAP_SDMA_REQUESTS 127 #define OMAP_SDMA_CHANNELS 32 +struct omap_dma_config { + int lch_end; + unsigned int rw_priority:1; + unsigned int needs_busy_check:1; + unsigned int may_lose_context:1; + unsigned int needs_lch_clear:1; +}; + +struct omap_dma_context { + u32 irqenable_l0; + u32 irqenable_l1; + u32 ocp_sysconfig; + u32 gcr; +}; + struct omap_dmadev { struct dma_device ddev; spinlock_t lock; void __iomem *base; const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; + const struct omap_dma_config *cfg; + struct notifier_block nb; + struct omap_dma_context context; + int lch_count; + DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS); + struct mutex lch_lock; /* for assigning logical channels */ bool legacy; bool ll123_supported; struct dma_pool *desc_pool; @@ -376,6 +398,19 @@ static unsigned omap_dma_get_csr(struct omap_chan *c) return val; } +static void omap_dma_clear_lch(struct omap_dmadev *od, int lch) +{ + struct omap_chan *c; + int i; + + c = od->lch_map[lch]; + if (!c) + return; + + for (i = CSDP; i <= od->cfg->lch_end; i++) + omap_dma_chan_write(c, i, 0); +} + static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, unsigned lch) { @@ -633,6 +668,37 @@ static irqreturn_t omap_dma_irq(int irq, void *devid) return IRQ_HANDLED; } +static int omap_dma_get_lch(struct omap_dmadev *od, int *lch) +{ + int channel; + + mutex_lock(&od->lch_lock); + channel = find_first_zero_bit(od->lch_bitmap, od->lch_count); + if (channel >= od->lch_count) + goto out_busy; + set_bit(channel, od->lch_bitmap); + mutex_unlock(&od->lch_lock); + + omap_dma_clear_lch(od, channel); + *lch = channel; + + return 0; + +out_busy: + mutex_unlock(&od->lch_lock); + *lch = -EINVAL; + + return -EBUSY; +} + +static void omap_dma_put_lch(struct omap_dmadev *od, int lch) +{ + omap_dma_clear_lch(od, lch); + mutex_lock(&od->lch_lock); + clear_bit(lch, od->lch_bitmap); + mutex_unlock(&od->lch_lock); +} + static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); @@ -644,8 +710,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, c, &c->dma_ch); } else { - ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, - &c->dma_ch); + ret = omap_dma_get_lch(od, &c->dma_ch); } dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); @@ -702,7 +767,11 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) c->channel_base = NULL; od->lch_map[c->dma_ch] = NULL; vchan_free_chan_resources(&c->vc); - omap_free_dma(c->dma_ch); + + if (od->legacy) + omap_free_dma(c->dma_ch); + else + omap_dma_put_lch(od, c->dma_ch); dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, c->dma_sig); @@ -1453,16 +1522,128 @@ static void omap_dma_free(struct omap_dmadev *od) } } +/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */ +static int omap_dma_busy_notifier(struct notifier_block *nb, + unsigned long cmd, void *v) +{ + struct omap_dmadev *od; + struct omap_chan *c; + int lch = -1; + + od = container_of(nb, struct omap_dmadev, nb); + + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + while (1) { + lch = find_next_bit(od->lch_bitmap, od->lch_count, + lch + 1); + if (lch >= od->lch_count) + break; + c = od->lch_map[lch]; + if (!c) + continue; + if (omap_dma_chan_read(c, CCR) & CCR_ENABLE) + return NOTIFY_BAD; + } + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + break; + } + + return NOTIFY_OK; +} + +/* + * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0. + * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for + * now. Context save seems to be only currently needed on omap3. + */ +static void omap_dma_context_save(struct omap_dmadev *od) +{ + od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0); + od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1); + od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); + od->context.gcr = omap_dma_glbl_read(od, GCR); +} + +static void omap_dma_context_restore(struct omap_dmadev *od) +{ + int i; + + omap_dma_glbl_write(od, GCR, od->context.gcr); + omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig); + omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0); + omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1); + + /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */ + if (od->plat->errata & DMA_ROMCODE_BUG) + omap_dma_glbl_write(od, IRQSTATUS_L0, 0); + + /* Clear dma channels */ + for (i = 0; i < od->lch_count; i++) + omap_dma_clear_lch(od, i); +} + +/* Currently only used for omap3 */ +static int omap_dma_context_notifier(struct notifier_block *nb, + unsigned long cmd, void *v) +{ + struct omap_dmadev *od; + + od = container_of(nb, struct omap_dmadev, nb); + + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + omap_dma_context_save(od); + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + omap_dma_context_restore(od); + break; + } + + return NOTIFY_OK; +} + +static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate, + int max_fifo_depth, int tparams) +{ + u32 val; + + /* Set only for omap2430 and later */ + if (!od->cfg->rw_priority) + return; + + if (max_fifo_depth == 0) + max_fifo_depth = 1; + if (arb_rate == 0) + arb_rate = 1; + + val = 0xff & max_fifo_depth; + val |= (0x3 & tparams) << 12; + val |= (arb_rate & 0xff) << 16; + + omap_dma_glbl_write(od, GCR, val); +} + #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +/* + * No flags currently set for default configuration as omap1 is still + * using platform data. + */ +static const struct omap_dma_config default_cfg; + static int omap_dma_probe(struct platform_device *pdev) { + const struct omap_dma_config *conf; struct omap_dmadev *od; struct resource *res; int rc, i, irq; - u32 lch_count; + u32 val; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) @@ -1473,9 +1654,21 @@ static int omap_dma_probe(struct platform_device *pdev) if (IS_ERR(od->base)) return PTR_ERR(od->base); - od->plat = omap_get_plat_info(); - if (!od->plat) - return -EPROBE_DEFER; + conf = of_device_get_match_data(&pdev->dev); + if (conf) { + od->cfg = conf; + od->plat = dev_get_platdata(&pdev->dev); + if (!od->plat) { + dev_err(&pdev->dev, "omap_system_dma_plat_info is missing"); + return -ENODEV; + } + } else { + od->cfg = &default_cfg; + + od->plat = omap_get_plat_info(); + if (!od->plat) + return -EPROBE_DEFER; + } od->reg_map = od->plat->reg_map; @@ -1507,6 +1700,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); + mutex_init(&od->lch_lock); spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); @@ -1522,18 +1716,30 @@ static int omap_dma_probe(struct platform_device *pdev) /* Number of available logical channels */ if (!pdev->dev.of_node) { - lch_count = od->plat->dma_attr->lch_count; - if (unlikely(!lch_count)) - lch_count = OMAP_SDMA_CHANNELS; + od->lch_count = od->plat->dma_attr->lch_count; + if (unlikely(!od->lch_count)) + od->lch_count = OMAP_SDMA_CHANNELS; } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", - &lch_count)) { + &od->lch_count)) { dev_info(&pdev->dev, "Missing dma-channels property, using %u.\n", OMAP_SDMA_CHANNELS); - lch_count = OMAP_SDMA_CHANNELS; + od->lch_count = OMAP_SDMA_CHANNELS; + } + + /* Mask of allowed logical channels */ + if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node, + "dma-channel-mask", + &val)) { + /* Tag channels not in mask as reserved */ + val = ~val; + bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count); } + if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED) + bitmap_set(od->lch_bitmap, 0, 2); - od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map), + od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count, + sizeof(*od->lch_map), GFP_KERNEL); if (!od->lch_map) return -ENOMEM; @@ -1605,6 +1811,16 @@ static int omap_dma_probe(struct platform_device *pdev) } } + omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0); + + if (od->cfg->needs_busy_check) { + od->nb.notifier_call = omap_dma_busy_notifier; + cpu_pm_register_notifier(&od->nb); + } else if (od->cfg->may_lose_context) { + od->nb.notifier_call = omap_dma_context_notifier; + cpu_pm_register_notifier(&od->nb); + } + dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); @@ -1616,6 +1832,9 @@ static int omap_dma_remove(struct platform_device *pdev) struct omap_dmadev *od = platform_get_drvdata(pdev); int irq; + if (od->cfg->may_lose_context) + cpu_pm_unregister_notifier(&od->nb); + if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); @@ -1637,12 +1856,45 @@ static int omap_dma_remove(struct platform_device *pdev) return 0; } +static const struct omap_dma_config omap2420_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, + .needs_busy_check = true, +}; + +static const struct omap_dma_config omap2430_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, +}; + +static const struct omap_dma_config omap3430_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, + .may_lose_context = true, +}; + +static const struct omap_dma_config omap3630_data = { + .lch_end = CCDN, + .rw_priority = true, + .needs_lch_clear = true, + .may_lose_context = true, +}; + +static const struct omap_dma_config omap4_data = { + .lch_end = CCDN, + .rw_priority = true, + .needs_lch_clear = true, +}; + static const struct of_device_id omap_dma_match[] = { - { .compatible = "ti,omap2420-sdma", }, - { .compatible = "ti,omap2430-sdma", }, - { .compatible = "ti,omap3430-sdma", }, - { .compatible = "ti,omap3630-sdma", }, - { .compatible = "ti,omap4430-sdma", }, + { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, }, + { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, }, + { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, }, + { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, }, + { .compatible = "ti,omap4430-sdma", .data = &omap4_data, }, {}, }; MODULE_DEVICE_TABLE(of, omap_dma_match); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index e40a77bfe821..ea869addc89b 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -239,14 +239,6 @@ config QCOM_SCM depends on ARM || ARM64 select RESET_CONTROLLER -config QCOM_SCM_32 - def_bool y - depends on QCOM_SCM && ARM - -config QCOM_SCM_64 - def_bool y - depends on QCOM_SCM && ARM64 - config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" depends on QCOM_SCM diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3fcb91975bdc..e9fb838af4df 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -17,10 +17,7 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm.o -obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o -obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o -CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a +obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 7a30952b463d..db55c43a2cbd 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -28,8 +28,12 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) return NULL; for (; id->protocol_id; id++) - if (id->protocol_id == scmi_dev->protocol_id) - return id; + if (id->protocol_id == scmi_dev->protocol_id) { + if (!id->name) + return id; + else if (!strcmp(id->name, scmi_dev->name)) + return id; + } return NULL; } @@ -56,6 +60,11 @@ static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle) return fn(handle); } +static int scmi_protocol_dummy_init(struct scmi_handle *handle) +{ + return 0; +} + static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); @@ -74,6 +83,10 @@ static int scmi_dev_probe(struct device *dev) if (ret) return ret; + /* Skip protocol initialisation for additional devices */ + idr_replace(&scmi_protocols, &scmi_protocol_dummy_init, + scmi_dev->protocol_id); + return scmi_drv->probe(scmi_dev); } @@ -125,7 +138,8 @@ static void scmi_device_release(struct device *dev) } struct scmi_device * -scmi_device_create(struct device_node *np, struct device *parent, int protocol) +scmi_device_create(struct device_node *np, struct device *parent, int protocol, + const char *name) { int id, retval; struct scmi_device *scmi_dev; @@ -134,8 +148,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) if (!scmi_dev) return NULL; + scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL); + if (!scmi_dev->name) { + kfree(scmi_dev); + return NULL; + } + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); if (id < 0) { + kfree_const(scmi_dev->name); kfree(scmi_dev); return NULL; } @@ -154,6 +175,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) return scmi_dev; put_dev: + kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, id); return NULL; @@ -161,6 +183,7 @@ put_dev: void scmi_device_destroy(struct scmi_device *scmi_dev) { + kfree_const(scmi_dev->name); scmi_handle_put(scmi_dev->handle); ida_simple_remove(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 32526a793f3a..4c2227662b26 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -65,6 +65,7 @@ struct scmi_clock_set_rate { }; struct clock_info { + u32 version; int num_clocks; int max_async_req; atomic_t cur_async_req; @@ -340,6 +341,7 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle) scmi_clock_describe_rates_get(handle, clkid, clk); } + cinfo->version = version; handle->clk_ops = &clk_ops; handle->clk_priv = cinfo; diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 5237c2ff79fe..df35358ff324 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -81,6 +81,7 @@ struct scmi_msg { /** * struct scmi_xfer - Structure representing a message flow * + * @transfer_id: Unique ID for debug & profiling purpose * @hdr: Transmit message header * @tx: Transmit message * @rx: Receive message, the buffer should be pre-allocated to store @@ -90,6 +91,7 @@ struct scmi_msg { * @async: pointer to delayed response message received event completion */ struct scmi_xfer { + int transfer_id; struct scmi_msg_hdr hdr; struct scmi_msg tx; struct scmi_msg rx; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3eb0382491ce..2c96f6b5a7d8 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -29,6 +29,9 @@ #include "common.h" +#define CREATE_TRACE_POINTS +#include <trace/events/scmi.h> + #define MSG_ID_MASK GENMASK(7, 0) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_TYPE_MASK GENMASK(9, 8) @@ -61,6 +64,8 @@ enum scmi_error_codes { static LIST_HEAD(scmi_list); /* Protection for the entire list */ static DEFINE_MUTEX(scmi_list_mutex); +/* Track the unique id for the transfers for debug & profiling purpose */ +static atomic_t transfer_last_id; /** * struct scmi_xfers_info - Structure to manage transfer information @@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, xfer = &minfo->xfer_block[xfer_id]; xfer->hdr.seq = xfer_id; reinit_completion(&xfer->done); + xfer->transfer_id = atomic_inc_return(&transfer_last_id); return xfer; } @@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) scmi_fetch_response(xfer, mem); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + msg_type); + if (msg_type == MSG_TYPE_DELAYED_RESP) complete(xfer->async_done); else @@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) if (unlikely(!cinfo)) return -EINVAL; + trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.poll_completion); + ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { dev_dbg(dev, "mbox send fail %d\n", ret); @@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) */ mbox_client_txdone(cinfo->chan, ret); + trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.status); + return ret; } @@ -735,6 +753,11 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, idx = tx ? 0 : 1; idr = tx ? &info->tx_idr : &info->rx_idr; + /* check if already allocated, used for multiple device per protocol */ + cinfo = idr_find(idr, prot_id); + if (cinfo) + return 0; + if (scmi_mailbox_check(np, idx)) { cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ @@ -803,11 +826,11 @@ scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) static inline void scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, - int prot_id) + int prot_id, const char *name) { struct scmi_device *sdev; - sdev = scmi_device_create(np, info->dev, prot_id); + sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); @@ -824,6 +847,40 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, scmi_set_handle(sdev); } +#define MAX_SCMI_DEV_PER_PROTOCOL 2 +struct scmi_prot_devnames { + int protocol_id; + char *names[MAX_SCMI_DEV_PER_PROTOCOL]; +}; + +static struct scmi_prot_devnames devnames[] = { + { SCMI_PROTOCOL_POWER, { "genpd" },}, + { SCMI_PROTOCOL_PERF, { "cpufreq" },}, + { SCMI_PROTOCOL_CLOCK, { "clocks" },}, + { SCMI_PROTOCOL_SENSOR, { "hwmon" },}, + { SCMI_PROTOCOL_RESET, { "reset" },}, +}; + +static inline void +scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info, + int prot_id) +{ + int loop, cnt; + + for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) { + if (devnames[loop].protocol_id != prot_id) + continue; + + for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) { + const char *name = devnames[loop].names[cnt]; + + if (name) + scmi_create_protocol_device(np, info, prot_id, + name); + } + } +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -892,7 +949,7 @@ static int scmi_probe(struct platform_device *pdev) continue; } - scmi_create_protocol_device(child, info, prot_id); + scmi_create_protocol_devices(child, info, prot_id); } return 0; @@ -940,6 +997,52 @@ static int scmi_remove(struct platform_device *pdev) return ret; } +static ssize_t protocol_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%u.%u\n", info->version.major_ver, + info->version.minor_ver); +} +static DEVICE_ATTR_RO(protocol_version); + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "0x%x\n", info->version.impl_ver); +} +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.vendor_id); +} +static DEVICE_ATTR_RO(vendor_id); + +static ssize_t sub_vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.sub_vendor_id); +} +static DEVICE_ATTR_RO(sub_vendor_id); + +static struct attribute *versions_attrs[] = { + &dev_attr_firmware_version.attr, + &dev_attr_protocol_version.attr, + &dev_attr_vendor_id.attr, + &dev_attr_sub_vendor_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(versions); + static const struct scmi_desc scmi_generic_desc = { .max_rx_timeout_ms = 30, /* We may increase this if required */ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ @@ -958,6 +1061,7 @@ static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", .of_match_table = scmi_of_match, + .dev_groups = versions_groups, }, .probe = scmi_probe, .remove = scmi_remove, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 601af4edad5e..ec81e6f7e7a4 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -145,6 +145,7 @@ struct perf_dom_info { }; struct scmi_perf_info { + u32 version; int num_domains; bool power_scale_mw; u64 stats_addr; @@ -736,6 +737,7 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle) scmi_perf_domain_init_fc(handle, domain, &dom->fc_info); } + pinfo->version = version; handle->perf_ops = &perf_ops; handle->perf_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 5abef7079c0a..214886ce84f1 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -50,6 +50,7 @@ struct power_dom_info { }; struct scmi_power_info { + u32 version; int num_domains; u64 stats_addr; u32 stats_size; @@ -207,6 +208,7 @@ static int scmi_power_protocol_init(struct scmi_handle *handle) scmi_power_domain_attributes_get(handle, domain, dom); } + pinfo->version = version; handle->power_ops = &power_ops; handle->power_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index ab42c21c5517..de73054554f3 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -48,6 +48,7 @@ struct reset_dom_info { }; struct scmi_reset_info { + u32 version; int num_domains; struct reset_dom_info *dom_info; }; @@ -217,6 +218,7 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle) scmi_reset_domain_attributes_get(handle, domain, dom); } + pinfo->version = version; handle->reset_ops = &reset_ops; handle->reset_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473..bafbfe358f97 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -112,7 +112,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_POWER }, + { SCMI_PROTOCOL_POWER, "genpd" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index a400ea805fc2..eba61b9c1f53 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -68,6 +68,7 @@ struct scmi_msg_sensor_reading_get { }; struct sensors_info { + u32 version; int num_sensors; int max_requests; u64 reg_addr; @@ -294,6 +295,7 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle) scmi_sensor_description_get(handle, sinfo); + sinfo->version = version; handle->sensor_ops = &sensor_ops; handle->sensor_priv = sinfo; diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 0dbee32da4c6..1d2e5b85d7ca 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config IMX_DSP - bool "IMX DSP Protocol driver" + tristate "IMX DSP Protocol driver" depends on IMX_MBOX help This enables DSP IPC protocol between host AP (Linux) diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index b3b6c15e7b36..2937d44b5df4 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -97,7 +97,7 @@ static inline bool psci_has_ext_power_state(void) PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; } -static inline bool psci_has_osi_support(void) +bool psci_has_osi_support(void) { return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED; } @@ -162,6 +162,15 @@ static u32 psci_get_version(void) return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); } +int psci_set_osi_mode(void) +{ + int err; + + err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, + PSCI_1_0_SUSPEND_MODE_OSI, 0, 0); + return psci_to_linux_errno(err); +} + static int psci_cpu_suspend(u32 state, unsigned long entry_point) { int err; @@ -544,9 +553,14 @@ static int __init psci_1_0_init(struct device_node *np) if (err) return err; - if (psci_has_osi_support()) + if (psci_has_osi_support()) { pr_info("OSI mode supported.\n"); + /* Default to PC mode. */ + invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, + PSCI_1_0_SUSPEND_MODE_PC, 0, 0); + } + return 0; } diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c deleted file mode 100644 index 48e2ef794ea3..000000000000 --- a/drivers/firmware/qcom_scm-32.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. - * Copyright (C) 2015 Linaro Ltd. - */ - -#include <linux/slab.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/qcom_scm.h> -#include <linux/dma-mapping.h> - -#include "qcom_scm.h" - -#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 -#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 -#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 - -#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 -#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 -#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 - -struct qcom_scm_entry { - int flag; - void *entry; -}; - -static struct qcom_scm_entry qcom_scm_wb[] = { - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, -}; - -static DEFINE_MUTEX(qcom_scm_lock); - -/** - * struct qcom_scm_command - one SCM command buffer - * @len: total available memory for command and response - * @buf_offset: start of command buffer - * @resp_hdr_offset: start of response buffer - * @id: command to be executed - * @buf: buffer returned from qcom_scm_get_command_buffer() - * - * An SCM command is laid out in memory as follows: - * - * ------------------- <--- struct qcom_scm_command - * | command header | - * ------------------- <--- qcom_scm_get_command_buffer() - * | command buffer | - * ------------------- <--- struct qcom_scm_response and - * | response header | qcom_scm_command_to_response() - * ------------------- <--- qcom_scm_get_response_buffer() - * | response buffer | - * ------------------- - * - * There can be arbitrary padding between the headers and buffers so - * you should always use the appropriate qcom_scm_get_*_buffer() routines - * to access the buffers in a safe manner. - */ -struct qcom_scm_command { - __le32 len; - __le32 buf_offset; - __le32 resp_hdr_offset; - __le32 id; - __le32 buf[0]; -}; - -/** - * struct qcom_scm_response - one SCM response buffer - * @len: total available memory for response - * @buf_offset: start of response data relative to start of qcom_scm_response - * @is_complete: indicates if the command has finished processing - */ -struct qcom_scm_response { - __le32 len; - __le32 buf_offset; - __le32 is_complete; -}; - -/** - * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response - * @cmd: command - * - * Returns a pointer to a response for a command. - */ -static inline struct qcom_scm_response *qcom_scm_command_to_response( - const struct qcom_scm_command *cmd) -{ - return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); -} - -/** - * qcom_scm_get_command_buffer() - Get a pointer to a command buffer - * @cmd: command - * - * Returns a pointer to the command buffer of a command. - */ -static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) -{ - return (void *)cmd->buf; -} - -/** - * qcom_scm_get_response_buffer() - Get a pointer to a response buffer - * @rsp: response - * - * Returns a pointer to a response buffer of a response. - */ -static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) -{ - return (void *)rsp + le32_to_cpu(rsp->buf_offset); -} - -static u32 smc(u32 cmd_addr) -{ - int context_id; - register u32 r0 asm("r0") = 1; - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = cmd_addr; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3", "r12"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - return r0; -} - -/** - * qcom_scm_call() - Send an SCM command - * @dev: struct device - * @svc_id: service identifier - * @cmd_id: command identifier - * @cmd_buf: command buffer - * @cmd_len: length of the command buffer - * @resp_buf: response buffer - * @resp_len: length of the response buffer - * - * Sends a command to the SCM and waits for the command to finish processing. - * - * A note on cache maintenance: - * Note that any buffers that are expected to be accessed by the secure world - * must be flushed before invoking qcom_scm_call and invalidated in the cache - * immediately after qcom_scm_call returns. Cache maintenance on the command - * and response buffers is taken care of by qcom_scm_call; however, callers are - * responsible for any other cached buffers passed over to the secure world. - */ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const void *cmd_buf, size_t cmd_len, void *resp_buf, - size_t resp_len) -{ - int ret; - struct qcom_scm_command *cmd; - struct qcom_scm_response *rsp; - size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; - dma_addr_t cmd_phys; - - cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->len = cpu_to_le32(alloc_len); - cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); - cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); - - cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); - if (cmd_buf) - memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); - - rsp = qcom_scm_command_to_response(cmd); - - cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, cmd_phys)) { - kfree(cmd); - return -ENOMEM; - } - - mutex_lock(&qcom_scm_lock); - ret = smc(cmd_phys); - if (ret < 0) - ret = qcom_scm_remap_error(ret); - mutex_unlock(&qcom_scm_lock); - if (ret) - goto out; - - do { - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, - sizeof(*rsp), DMA_FROM_DEVICE); - } while (!rsp->is_complete); - - if (resp_buf) { - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + - le32_to_cpu(rsp->buf_offset), - resp_len, DMA_FROM_DEVICE); - memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), - resp_len); - } -out: - dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); - kfree(cmd); - return ret; -} - -#define SCM_CLASS_REGISTER (0x2 << 8) -#define SCM_MASK_IRQS BIT(5) -#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ - SCM_CLASS_REGISTER | \ - SCM_MASK_IRQS | \ - (n & 0xf)) - -/** - * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument - * @svc_id: service identifier - * @cmd_id: command identifier - * @arg1: first argument - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) -{ - int context_id; - - register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = arg1; - - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3", "r12"); - return r0; -} - -/** - * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments - * @svc_id: service identifier - * @cmd_id: command identifier - * @arg1: first argument - * @arg2: second argument - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) -{ - int context_id; - - register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2); - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = arg1; - register u32 r3 asm("r3") = arg2; - - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") - __asmeq("%4", "r3") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2), "r" (r3) - : "r12"); - return r0; -} - -u32 qcom_scm_get_version(void) -{ - int context_id; - static u32 version = -1; - register u32 r0 asm("r0"); - register u32 r1 asm("r1"); - - if (version != -1) - return version; - - mutex_lock(&qcom_scm_lock); - - r0 = 0x1 << 8; - r1 = (u32)&context_id; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r1") - __asmeq("%2", "r0") - __asmeq("%3", "r1") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0), "=r" (r1) - : "r" (r0), "r" (r1) - : "r2", "r3", "r12"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - version = r1; - mutex_unlock(&qcom_scm_lock); - - return version; -} -EXPORT_SYMBOL(qcom_scm_get_version); - -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) -{ - int flags = 0; - int cpu; - int scm_cb_flags[] = { - QCOM_SCM_FLAG_COLDBOOT_CPU0, - QCOM_SCM_FLAG_COLDBOOT_CPU1, - QCOM_SCM_FLAG_COLDBOOT_CPU2, - QCOM_SCM_FLAG_COLDBOOT_CPU3, - }; - - if (!cpus || (cpus && cpumask_empty(cpus))) - return -EINVAL; - - for_each_cpu(cpu, cpus) { - if (cpu < ARRAY_SIZE(scm_cb_flags)) - flags |= scm_cb_flags[cpu]; - else - set_cpu_present(cpu, false); - } - - return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - flags, virt_to_phys(entry)); -} - -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus) -{ - int ret; - int flags = 0; - int cpu; - struct { - __le32 flags; - __le32 addr; - } cmd; - - /* - * Reassign only if we are switching from hotplug entry point - * to cpuidle entry point or vice versa. - */ - for_each_cpu(cpu, cpus) { - if (entry == qcom_scm_wb[cpu].entry) - continue; - flags |= qcom_scm_wb[cpu].flag; - } - - /* No change in entry function */ - if (!flags) - return 0; - - cmd.addr = cpu_to_le32(virt_to_phys(entry)); - cmd.flags = cpu_to_le32(flags); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - &cmd, sizeof(cmd), NULL, 0); - if (!ret) { - for_each_cpu(cpu, cpus) - qcom_scm_wb[cpu].entry = entry; - } - - return ret; -} - -/** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache - * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. - */ -void __qcom_scm_cpu_power_down(u32 flags) -{ - qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, - flags & QCOM_SCM_FLUSH_FLAG_MASK); -} - -int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) -{ - int ret; - __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id); - __le32 ret_val = 0; - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, - &svc_cmd, sizeof(svc_cmd), &ret_val, - sizeof(ret_val)); - if (ret) - return ret; - - return le32_to_cpu(ret_val); -} - -int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, - u32 req_cnt, u32 *resp) -{ - if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) - return -ERANGE; - - return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, - req, req_cnt * sizeof(*req), resp, sizeof(*resp)); -} - -int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size, - u32 mode) -{ - struct ocmem_tz_lock { - __le32 id; - __le32 offset; - __le32 size; - __le32 mode; - } request; - - request.id = cpu_to_le32(id); - request.offset = cpu_to_le32(offset); - request.size = cpu_to_le32(size); - request.mode = cpu_to_le32(mode); - - return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD, - &request, sizeof(request), NULL, 0); -} - -int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size) -{ - struct ocmem_tz_unlock { - __le32 id; - __le32 offset; - __le32 size; - } request; - - request.id = cpu_to_le32(id); - request.offset = cpu_to_le32(offset); - request.size = cpu_to_le32(size); - - return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD, - &request, sizeof(request), NULL, 0); -} - -void __qcom_scm_init(void) -{ -} - -bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? false : !!out; -} - -int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys) -{ - __le32 scm_ret; - int ret; - struct { - __le32 proc; - __le32 image_addr; - } request; - - request.proc = cpu_to_le32(peripheral); - request.image_addr = cpu_to_le32(metadata_phys); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_INIT_IMAGE_CMD, - &request, sizeof(request), - &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size) -{ - __le32 scm_ret; - int ret; - struct { - __le32 proc; - __le32 addr; - __le32 len; - } request; - - request.proc = cpu_to_le32(peripheral); - request.addr = cpu_to_le32(addr); - request.len = cpu_to_le32(size); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_MEM_SETUP_CMD, - &request, sizeof(request), - &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_AUTH_AND_RESET_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_SHUTDOWN_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) -{ - __le32 out; - __le32 in = cpu_to_le32(reset); - int ret; - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_set_dload_mode(struct device *dev, bool enable) -{ - return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, - enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0); -} - -int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) -{ - struct { - __le32 state; - __le32 id; - } req; - __le32 scm_ret = 0; - int ret; - - req.state = cpu_to_le32(state); - req.id = cpu_to_le32(id); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, - &req, sizeof(req), &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, - size_t mem_sz, phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz) -{ - return -ENODEV; -} - -int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, - u32 spare) -{ - struct msm_scm_sec_cfg { - __le32 id; - __le32 ctx_bank_num; - } cfg; - int ret, scm_ret = 0; - - cfg.id = cpu_to_le32(device_id); - cfg.ctx_bank_num = cpu_to_le32(spare); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, - &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret)); - - if (ret || scm_ret) - return ret ? ret : -EINVAL; - - return 0; -} - -int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size) -{ - return -ENODEV; -} - -int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, - u32 spare) -{ - return -ENODEV; -} - -int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, - unsigned int *val) -{ - int ret; - - ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr); - if (ret >= 0) - *val = ret; - - return ret < 0 ? ret : 0; -} - -int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) -{ - return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, - addr, val); -} - -int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable) -{ - return -ENODEV; -} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c deleted file mode 100644 index 3c5850350974..000000000000 --- a/drivers/firmware/qcom_scm-64.c +++ /dev/null @@ -1,579 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. - */ - -#include <linux/io.h> -#include <linux/errno.h> -#include <linux/delay.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/qcom_scm.h> -#include <linux/arm-smccc.h> -#include <linux/dma-mapping.h> - -#include "qcom_scm.h" - -#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) - -#define MAX_QCOM_SCM_ARGS 10 -#define MAX_QCOM_SCM_RETS 3 - -enum qcom_scm_arg_types { - QCOM_SCM_VAL, - QCOM_SCM_RO, - QCOM_SCM_RW, - QCOM_SCM_BUFVAL, -}; - -#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ - (((a) & 0x3) << 4) | \ - (((b) & 0x3) << 6) | \ - (((c) & 0x3) << 8) | \ - (((d) & 0x3) << 10) | \ - (((e) & 0x3) << 12) | \ - (((f) & 0x3) << 14) | \ - (((g) & 0x3) << 16) | \ - (((h) & 0x3) << 18) | \ - (((i) & 0x3) << 20) | \ - (((j) & 0x3) << 22) | \ - ((num) & 0xf)) - -#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - -/** - * struct qcom_scm_desc - * @arginfo: Metadata describing the arguments in args[] - * @args: The array of arguments for the secure syscall - * @res: The values returned by the secure syscall - */ -struct qcom_scm_desc { - u32 arginfo; - u64 args[MAX_QCOM_SCM_ARGS]; -}; - -static u64 qcom_smccc_convention = -1; -static DEFINE_MUTEX(qcom_scm_lock); - -#define QCOM_SCM_EBUSY_WAIT_MS 30 -#define QCOM_SCM_EBUSY_MAX_RETRY 20 - -#define N_EXT_QCOM_SCM_ARGS 7 -#define FIRST_EXT_ARG_IDX 3 -#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) - -static void __qcom_scm_call_do(const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, u32 fn_id, - u64 x5, u32 type) -{ - u64 cmd; - struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; - - cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention, - ARM_SMCCC_OWNER_SIP, fn_id); - - quirk.state.a6 = 0; - - do { - arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], - desc->args[1], desc->args[2], x5, - quirk.state.a6, 0, res, &quirk); - - if (res->a0 == QCOM_SCM_INTERRUPTED) - cmd = res->a0; - - } while (res->a0 == QCOM_SCM_INTERRUPTED); -} - -static void qcom_scm_call_do(const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, u32 fn_id, - u64 x5, bool atomic) -{ - int retry_count = 0; - - if (atomic) { - __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL); - return; - } - - do { - mutex_lock(&qcom_scm_lock); - - __qcom_scm_call_do(desc, res, fn_id, x5, - ARM_SMCCC_STD_CALL); - - mutex_unlock(&qcom_scm_lock); - - if (res->a0 == QCOM_SCM_V2_EBUSY) { - if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) - break; - msleep(QCOM_SCM_EBUSY_WAIT_MS); - } - } while (res->a0 == QCOM_SCM_V2_EBUSY); -} - -static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, bool atomic) -{ - int arglen = desc->arginfo & 0xf; - int i; - u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); - u64 x5 = desc->args[FIRST_EXT_ARG_IDX]; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; - gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; - - if (unlikely(arglen > N_REGISTER_ARGS)) { - alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); - - if (!args_virt) - return -ENOMEM; - - if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { - __le32 *args = args_virt; - - for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) - args[i] = cpu_to_le32(desc->args[i + - FIRST_EXT_ARG_IDX]); - } else { - __le64 *args = args_virt; - - for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) - args[i] = cpu_to_le64(desc->args[i + - FIRST_EXT_ARG_IDX]); - } - - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - x5 = args_phys; - } - - qcom_scm_call_do(desc, res, fn_id, x5, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - - if ((long)res->a0 < 0) - return qcom_scm_remap_error(res->a0); - - return 0; -} - -/** - * qcom_scm_call() - Invoke a syscall in the secure world - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * - * Sends a command to the SCM and waits for the command to finish processing. - * This should *only* be called in pre-emptible context. - */ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) -{ - might_sleep(); - return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false); -} - -/** - * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * @res: Structure containing results from SMC/HVC call - * - * Sends a command to the SCM and waits for the command to finish processing. - * This can be called in atomic context. - */ -static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) -{ - return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true); -} - -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) -{ - return -ENOTSUPP; -} - -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @dev: Device pointer - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus) -{ - return -ENOTSUPP; -} - -/** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache - * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. - */ -void __qcom_scm_cpu_power_down(u32 flags) -{ -} - -int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.arginfo = QCOM_SCM_ARGS(1); - desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) | - (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, - u32 req_cnt, u32 *resp) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) - return -ERANGE; - - desc.args[0] = req[0].addr; - desc.args[1] = req[0].val; - desc.args[2] = req[1].addr; - desc.args[3] = req[1].val; - desc.args[4] = req[2].addr; - desc.args[5] = req[2].val; - desc.args[6] = req[3].addr; - desc.args[7] = req[3].val; - desc.args[8] = req[4].addr; - desc.args[9] = req[4].val; - desc.arginfo = QCOM_SCM_ARGS(10); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc, - &res); - *resp = res.a1; - - return ret; -} - -int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset, - uint32_t size, uint32_t mode) -{ - return -ENOTSUPP; -} - -int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset, - uint32_t size) -{ - return -ENOTSUPP; -} - -void __qcom_scm_init(void) -{ - u64 cmd; - struct arm_smccc_res res; - u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD); - - /* First try a SMC64 call */ - cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, - ARM_SMCCC_OWNER_SIP, function); - - arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)), - 0, 0, 0, 0, 0, &res); - - if (!res.a0 && res.a1) - qcom_smccc_convention = ARM_SMCCC_SMC_64; - else - qcom_smccc_convention = ARM_SMCCC_SMC_32; -} - -bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD, - &desc, &res); - - return ret ? false : !!res.a1; -} - -int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.args[1] = metadata_phys; - desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.args[1] = addr; - desc.args[2] = size; - desc.arginfo = QCOM_SCM_ARGS(3); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_AUTH_AND_RESET_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = reset; - desc.args[1] = 0; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc, - &res); - - return ret ? : res.a1; -} - -int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = state; - desc.args[1] = id; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, - size_t mem_sz, phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = mem_region; - desc.args[1] = mem_sz; - desc.args[2] = src; - desc.args[3] = src_sz; - desc.args[4] = dest; - desc.args[5] = dest_sz; - desc.args[6] = 0; - - desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, - QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, - QCOM_SCM_VAL, QCOM_SCM_VAL); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_MEM_PROT_ASSIGN_ID, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = device_id; - desc.args[1] = spare; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = spare; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res); - - if (size) - *size = res.a1; - - return ret ? : res.a2; -} - -int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, - u32 spare) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res); - - /* the pg table has been initialized already, ignore the error */ - if (ret == -EPERM) - ret = 0; - - return ret; -} - -int __qcom_scm_set_dload_mode(struct device *dev, bool enable) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = QCOM_SCM_SET_DLOAD_MODE; - desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, - &desc, &res); -} - -int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, - unsigned int *val) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = addr; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, - &desc, &res); - if (ret >= 0) - *val = res.a1; - - return ret < 0 ? ret : 0; -} - -int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = addr; - desc.args[1] = val; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, - &desc, &res); -} - -int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL; - desc.args[1] = en; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM, - QCOM_SCM_CONFIG_ERRATA1, &desc, &res); -} diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c new file mode 100644 index 000000000000..8532e7c78ef7 --- /dev/null +++ b/drivers/firmware/qcom_scm-legacy.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/qcom_scm.h> +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> + +#include "qcom_scm.h" + +static DEFINE_MUTEX(qcom_scm_lock); + + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + + +/** + * struct scm_legacy_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from scm_legacy_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct scm_legacy_command + * | command header | + * ------------------- <--- scm_legacy_get_command_buffer() + * | command buffer | + * ------------------- <--- struct scm_legacy_response and + * | response header | scm_legacy_command_to_response() + * ------------------- <--- scm_legacy_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate scm_legacy_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct scm_legacy_command { + __le32 len; + __le32 buf_offset; + __le32 resp_hdr_offset; + __le32 id; + __le32 buf[0]; +}; + +/** + * struct scm_legacy_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of scm_legacy_response + * @is_complete: indicates if the command has finished processing + */ +struct scm_legacy_response { + __le32 len; + __le32 buf_offset; + __le32 is_complete; +}; + +/** + * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct scm_legacy_response *scm_legacy_command_to_response( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); +} + +/** + * scm_legacy_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *scm_legacy_get_command_buffer( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * scm_legacy_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *scm_legacy_get_response_buffer( + const struct scm_legacy_response *rsp) +{ + return (void *)rsp + le32_to_cpu(rsp->buf_offset); +} + +static void __scm_legacy_do(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + do { + arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + smc->args[6], smc->args[7], res); + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +/** + * qcom_scm_call() - Sends a command to the SCM and waits for the command to + * finish processing. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking qcom_scm_call and invalidated in the cache + * immediately after qcom_scm_call returns. Cache maintenance on the command + * and response buffers is taken care of by qcom_scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + u8 arglen = desc->arginfo & 0xf; + int ret = 0, context_id; + unsigned int i; + struct scm_legacy_command *cmd; + struct scm_legacy_response *rsp; + struct arm_smccc_args smc = {0}; + struct arm_smccc_res smc_res; + const size_t cmd_len = arglen * sizeof(__le32); + const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32); + size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; + dma_addr_t cmd_phys; + __le32 *arg_buf; + const __le32 *res_buf; + + cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->len = cpu_to_le32(alloc_len); + cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); + cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); + cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd)); + + arg_buf = scm_legacy_get_command_buffer(cmd); + for (i = 0; i < arglen; i++) + arg_buf[i] = cpu_to_le32(desc->args[i]); + + rsp = scm_legacy_command_to_response(cmd); + + cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, cmd_phys)) { + kfree(cmd); + return -ENOMEM; + } + + smc.args[0] = 1; + smc.args[1] = (unsigned long)&context_id; + smc.args[2] = cmd_phys; + + mutex_lock(&qcom_scm_lock); + __scm_legacy_do(&smc, &smc_res); + if (smc_res.a0) + ret = qcom_scm_remap_error(smc_res.a0); + mutex_unlock(&qcom_scm_lock); + if (ret) + goto out; + + do { + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, + sizeof(*rsp), DMA_FROM_DEVICE); + } while (!rsp->is_complete); + + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + + le32_to_cpu(rsp->buf_offset), + resp_len, DMA_FROM_DEVICE); + + if (res) { + res_buf = scm_legacy_get_response_buffer(rsp); + for (i = 0; i < MAX_QCOM_SCM_RETS; i++) + res->result[i] = le32_to_cpu(res_buf[i]); + } +out: + dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); + kfree(cmd); + return ret; +} + +#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5 +#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2 +#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8) +#define SCM_LEGACY_MASK_IRQS BIT(5) +#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \ + ((SCM_LEGACY_FNID(svc, cmd) << 12) | \ + SCM_LEGACY_CLASS_REGISTER | \ + SCM_LEGACY_MASK_IRQS | \ + (n & 0xf)) + +/** + * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments + * and 3 return values + * @desc: SCM call descriptor containing arguments + * @res: SCM call return values + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +int scm_legacy_call_atomic(struct device *unused, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + int context_id; + struct arm_smccc_res smc_res; + size_t arglen = desc->arginfo & 0xf; + + BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS); + + arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen), + (unsigned long)&context_id, + desc->args[0], desc->args[1], desc->args[2], + desc->args[3], desc->args[4], 0, &smc_res); + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return smc_res.a0; +} diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c new file mode 100644 index 000000000000..497c13ba98d6 --- /dev/null +++ b/drivers/firmware/qcom_scm-smc.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/qcom_scm.h> +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> + +#include "qcom_scm.h" + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + +static DEFINE_MUTEX(qcom_scm_lock); + +#define QCOM_SCM_EBUSY_WAIT_MS 30 +#define QCOM_SCM_EBUSY_MAX_RETRY 20 + +#define SCM_SMC_N_REG_ARGS 4 +#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1) +#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1) +#define SCM_SMC_FIRST_REG_IDX 2 +#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1) + +static void __scm_smc_do_quirk(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + unsigned long a0 = smc->args[0]; + struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; + + quirk.state.a6 = 0; + + do { + arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + quirk.state.a6, smc->args[7], res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + a0 = res->a0; + + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +static void __scm_smc_do(const struct arm_smccc_args *smc, + struct arm_smccc_res *res, bool atomic) +{ + int retry_count = 0; + + if (atomic) { + __scm_smc_do_quirk(smc, res); + return; + } + + do { + mutex_lock(&qcom_scm_lock); + + __scm_smc_do_quirk(smc, res); + + mutex_unlock(&qcom_scm_lock); + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); +} + +int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res, bool atomic) +{ + int arglen = desc->arginfo & 0xf; + int i; + dma_addr_t args_phys = 0; + void *args_virt = NULL; + size_t alloc_len; + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; + u32 qcom_smccc_convention = + (qcom_scm_convention == SMC_CONVENTION_ARM_32) ? + ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64; + struct arm_smccc_res smc_res; + struct arm_smccc_args smc = {0}; + + smc.args[0] = ARM_SMCCC_CALL_VAL( + smccc_call_type, + qcom_smccc_convention, + desc->owner, + SCM_SMC_FNID(desc->svc, desc->cmd)); + smc.args[1] = desc->arginfo; + for (i = 0; i < SCM_SMC_N_REG_ARGS; i++) + smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; + + if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { + alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); + + if (!args_virt) + return -ENOMEM; + + if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { + __le32 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le32(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } else { + __le64 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le64(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } + + args_phys = dma_map_single(dev, args_virt, alloc_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, args_phys)) { + kfree(args_virt); + return -ENOMEM; + } + + smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + } + + __scm_smc_do(&smc, &smc_res, atomic); + + if (args_virt) { + dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); + kfree(args_virt); + } + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0; +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 1ba0df4b97ab..059bb0fbae9e 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1,8 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Qualcomm SCM driver - * - * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. */ #include <linux/platform_device.h> @@ -19,6 +16,7 @@ #include <linux/of_platform.h> #include <linux/clk.h> #include <linux/reset-controller.h> +#include <linux/arm-smccc.h> #include "qcom_scm.h" @@ -52,6 +50,35 @@ struct qcom_scm_mem_map_info { __le64 mem_size; }; +#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 +#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 +#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 +#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 + +#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 +#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 +#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 +#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 + +struct qcom_scm_wb_entry { + int flag; + void *entry; +}; + +static struct qcom_scm_wb_entry qcom_scm_wb[] = { + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, +}; + +static const char *qcom_scm_convention_names[] = { + [SMC_CONVENTION_UNKNOWN] = "unknown", + [SMC_CONVENTION_ARM_32] = "smc arm 32", + [SMC_CONVENTION_ARM_64] = "smc arm 64", + [SMC_CONVENTION_LEGACY] = "smc legacy", +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -87,149 +114,308 @@ static void qcom_scm_clk_disable(void) clk_disable_unprepare(__scm->bus_clk); } -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id); + +enum qcom_scm_convention qcom_scm_convention; +static bool has_queried __read_mostly; +static DEFINE_SPINLOCK(query_lock); + +static void __query_convention(void) { - return __qcom_scm_set_cold_boot_addr(entry, cpus); + unsigned long flags; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, + QCOM_SCM_INFO_IS_CALL_AVAIL) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + spin_lock_irqsave(&query_lock, flags); + if (has_queried) + goto out; + + qcom_scm_convention = SMC_CONVENTION_ARM_64; + // Device isn't required as there is only one argument - no device + // needed to dma_map_single to secure world + ret = scm_smc_call(NULL, &desc, &res, true); + if (!ret && res.result[0] == 1) + goto out; + + qcom_scm_convention = SMC_CONVENTION_ARM_32; + ret = scm_smc_call(NULL, &desc, &res, true); + if (!ret && res.result[0] == 1) + goto out; + + qcom_scm_convention = SMC_CONVENTION_LEGACY; +out: + has_queried = true; + spin_unlock_irqrestore(&query_lock, flags); + pr_info("qcom_scm: convention: %s\n", + qcom_scm_convention_names[qcom_scm_convention]); } -EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +static inline enum qcom_scm_convention __get_convention(void) { - return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); + if (unlikely(!has_queried)) + __query_convention(); + return qcom_scm_convention; } -EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. */ -void qcom_scm_cpu_power_down(u32 flags) +static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) { - __qcom_scm_cpu_power_down(flags); + might_sleep(); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, false); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } } -EXPORT_SYMBOL(qcom_scm_cpu_power_down); /** - * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call * - * Return true if HDCP is supported, false if not. + * Sends a command to the SCM and waits for the command to finish processing. + * This can be called in atomic context. */ -bool qcom_scm_hdcp_available(void) +static int qcom_scm_call_atomic(struct device *dev, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) { - int ret = qcom_scm_clk_enable(); - - if (ret) - return ret; + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, true); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call_atomic(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } +} - ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, - QCOM_SCM_CMD_HDCP); +static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + desc.arginfo = QCOM_SCM_ARGS(1); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); + break; + case SMC_CONVENTION_LEGACY: + desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); + break; + default: + pr_err("Unknown SMC convention being used\n"); + return -EINVAL; + } - qcom_scm_clk_disable(); + ret = qcom_scm_call(dev, &desc, &res); - return ret > 0 ? true : false; + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_hdcp_available); /** - * qcom_scm_hdcp_req() - Send HDCP request. - * @req: HDCP request array - * @req_cnt: HDCP request array count - * @resp: response buffer passed to SCM + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point * - * Write HDCP register(s) through SCM. + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { - int ret = qcom_scm_clk_enable(); + int ret; + int flags = 0; + int cpu; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR, + .arginfo = QCOM_SCM_ARGS(2), + }; - if (ret) - return ret; + /* + * Reassign only if we are switching from hotplug entry point + * to cpuidle entry point or vice versa. + */ + for_each_cpu(cpu, cpus) { + if (entry == qcom_scm_wb[cpu].entry) + continue; + flags |= qcom_scm_wb[cpu].flag; + } + + /* No change in entry function */ + if (!flags) + return 0; + + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) { + for_each_cpu(cpu, cpus) + qcom_scm_wb[cpu].entry = entry; + } - ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); - qcom_scm_clk_disable(); return ret; } -EXPORT_SYMBOL(qcom_scm_hdcp_req); +EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_pas_supported() - Check if the peripheral authentication service is - * available for the given peripherial - * @peripheral: peripheral id + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point * - * Returns true if PAS is supported for this peripheral, otherwise false. + * Set the cold boot address of the cpus. Any cpu outside the supported + * range would be removed from the cpu present mask. */ -bool qcom_scm_pas_supported(u32 peripheral) +int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { - int ret; + int flags = 0; + int cpu; + int scm_cb_flags[] = { + QCOM_SCM_FLAG_COLDBOOT_CPU0, + QCOM_SCM_FLAG_COLDBOOT_CPU1, + QCOM_SCM_FLAG_COLDBOOT_CPU2, + QCOM_SCM_FLAG_COLDBOOT_CPU3, + }; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR, + .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + if (!cpus || (cpus && cpumask_empty(cpus))) + return -EINVAL; - ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD); - if (ret <= 0) - return false; + for_each_cpu(cpu, cpus) { + if (cpu < ARRAY_SIZE(scm_cb_flags)) + flags |= scm_cb_flags[cpu]; + else + set_cpu_present(cpu, false); + } + + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); - return __qcom_scm_pas_supported(__scm->dev, peripheral); + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_pas_supported); +EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** - * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags - Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. */ -bool qcom_scm_ocmem_lock_available(void) +void qcom_scm_cpu_power_down(u32 flags) { - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, - QCOM_SCM_OCMEM_LOCK_CMD); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_TERMINATE_PC, + .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); +EXPORT_SYMBOL(qcom_scm_cpu_power_down); -/** - * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM - * region to the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - * @mode: access mode (WIDE/NARROW) - */ -int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, - u32 mode) +int qcom_scm_set_remote_state(u32 state, u32 id) { - return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = state, + .args[1] = id, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_ocmem_lock); +EXPORT_SYMBOL(qcom_scm_set_remote_state); -/** - * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM - * region from the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - */ -int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) { - return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} + +static void qcom_scm_set_download_mode(bool enable) +{ + bool avail; + int ret = 0; + + avail = __qcom_scm_is_call_available(__scm->dev, + QCOM_SCM_SVC_BOOT, + QCOM_SCM_BOOT_SET_DLOAD_MODE); + if (avail) { + ret = __qcom_scm_set_dload_mode(__scm->dev, enable); + } else if (__scm->dload_mode_addr) { + ret = qcom_scm_io_writel(__scm->dload_mode_addr, + enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); + } else { + dev_err(__scm->dev, + "No available mechanism for setting download mode\n"); + } + + if (ret) + dev_err(__scm->dev, "failed to set download mode: %d\n", ret); } -EXPORT_SYMBOL(qcom_scm_ocmem_unlock); /** * qcom_scm_pas_init_image() - Initialize peripheral authentication service @@ -248,6 +434,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) dma_addr_t mdata_phys; void *mdata_buf; int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, + .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; /* * During the scm call memory protection will be enabled for the meta @@ -266,14 +460,16 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) if (ret) goto free_metadata; - ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); + desc.args[1] = mdata_phys; + + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); free_metadata: dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_init_image); @@ -289,15 +485,25 @@ EXPORT_SYMBOL(qcom_scm_pas_init_image); int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = peripheral, + .args[1] = addr, + .args[2] = size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_mem_setup); @@ -311,15 +517,23 @@ EXPORT_SYMBOL(qcom_scm_pas_mem_setup); int qcom_scm_pas_auth_and_reset(u32 peripheral) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); @@ -332,18 +546,75 @@ EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); int qcom_scm_pas_shutdown(u32 peripheral) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_shutdown); +/** + * qcom_scm_pas_supported() - Check if the peripheral authentication service is + * available for the given peripherial + * @peripheral: peripheral id + * + * Returns true if PAS is supported for this peripheral, otherwise false. + */ +bool qcom_scm_pas_supported(u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PIL_PAS_IS_SUPPORTED); + if (ret <= 0) + return false; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? false : !!res.result[0]; +} +EXPORT_SYMBOL(qcom_scm_pas_supported); + +static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = reset, + .args[1] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} + static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, unsigned long idx) { @@ -367,6 +638,43 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = { .deassert = qcom_scm_pas_reset_deassert, }; +int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_READ, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = addr, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + + ret = qcom_scm_call(__scm->dev, &desc, &res); + if (ret >= 0) + *val = res.result[0]; + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL(qcom_scm_io_readl); + +int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_WRITE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = addr, + .args[1] = val, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_io_writel); + /** * qcom_scm_restore_sec_cfg_available() - Check if secure environment * supports restore security config interface. @@ -376,108 +684,106 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = { bool qcom_scm_restore_sec_cfg_available(void) { return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, - QCOM_SCM_RESTORE_SEC_CFG); + QCOM_SCM_MP_RESTORE_SEC_CFG); } EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { - return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); -} -EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); - -int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) -{ - return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); -} -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); - -int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) -{ - return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); -} -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = device_id, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; -int qcom_scm_qsmmu500_wait_safe_toggle(bool en) -{ - return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); -} -EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + ret = qcom_scm_call(__scm->dev, &desc, &res); -int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) -{ - return __qcom_scm_io_readl(__scm->dev, addr, val); + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_io_readl); +EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); -int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { - return __qcom_scm_io_writel(__scm->dev, addr, val); -} -EXPORT_SYMBOL(qcom_scm_io_writel); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; -static void qcom_scm_set_download_mode(bool enable) -{ - bool avail; - int ret = 0; + ret = qcom_scm_call(__scm->dev, &desc, &res); - avail = __qcom_scm_is_call_available(__scm->dev, - QCOM_SCM_SVC_BOOT, - QCOM_SCM_SET_DLOAD_MODE); - if (avail) { - ret = __qcom_scm_set_dload_mode(__scm->dev, enable); - } else if (__scm->dload_mode_addr) { - ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, - enable ? QCOM_SCM_SET_DLOAD_MODE : 0); - } else { - dev_err(__scm->dev, - "No available mechanism for setting download mode\n"); - } + if (size) + *size = res.result[0]; - if (ret) - dev_err(__scm->dev, "failed to set download mode: %d\n", ret); + return ret ? : res.result[1]; } +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); -static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { - struct device_node *tcsr; - struct device_node *np = dev->of_node; - struct resource res; - u32 offset; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, + .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL), + .args[0] = addr, + .args[1] = size, + .args[2] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; int ret; - tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); - if (!tcsr) - return 0; - - ret = of_address_to_resource(tcsr, 0, &res); - of_node_put(tcsr); - if (ret) - return ret; - - ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); - if (ret < 0) - return ret; + desc.args[0] = addr; + desc.args[1] = size; + desc.args[2] = spare; + desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL); - *addr = res.start + offset; + ret = qcom_scm_call(__scm->dev, &desc, NULL); - return 0; -} + /* the pg table has been initialized already, ignore the error */ + if (ret == -EPERM) + ret = 0; -/** - * qcom_scm_is_available() - Checks if SCM is available - */ -bool qcom_scm_is_available(void) -{ - return !!__scm; + return ret; } -EXPORT_SYMBOL(qcom_scm_is_available); +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); -int qcom_scm_set_remote_state(u32 state, u32 id) +static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, + size_t mem_sz, phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz) { - return __qcom_scm_set_remote_state(__scm->dev, state, id); + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_ASSIGN, + .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[0] = mem_region, + .args[1] = mem_sz, + .args[2] = src, + .args[3] = src_sz, + .args[4] = dest, + .args[5] = dest_sz, + .args[6] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_call(dev, &desc, &res); + + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_set_remote_state); /** * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership @@ -561,6 +867,184 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, } EXPORT_SYMBOL(qcom_scm_assign_mem); +/** + * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + */ +bool qcom_scm_ocmem_lock_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, + QCOM_SCM_OCMEM_LOCK_CMD); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); + +/** + * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM + * region to the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + * @mode: access mode (WIDE/NARROW) + */ +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_LOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .args[3] = mode, + .arginfo = QCOM_SCM_ARGS(4), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock); + +/** + * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM + * region from the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + */ +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .arginfo = QCOM_SCM_ARGS(3), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_ocmem_unlock); + +/** + * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * + * Return true if HDCP is supported, false if not. + */ +bool qcom_scm_hdcp_available(void) +{ + int ret = qcom_scm_clk_enable(); + + if (ret) + return ret; + + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, + QCOM_SCM_HDCP_INVOKE); + + qcom_scm_clk_disable(); + + return ret > 0 ? true : false; +} +EXPORT_SYMBOL(qcom_scm_hdcp_available); + +/** + * qcom_scm_hdcp_req() - Send HDCP request. + * @req: HDCP request array + * @req_cnt: HDCP request array count + * @resp: response buffer passed to SCM + * + * Write HDCP register(s) through SCM. + */ +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_HDCP, + .cmd = QCOM_SCM_HDCP_INVOKE, + .arginfo = QCOM_SCM_ARGS(10), + .args = { + req[0].addr, + req[0].val, + req[1].addr, + req[1].val, + req[2].addr, + req[2].val, + req[3].addr, + req[3].val, + req[4].addr, + req[4].val + }, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) + return -ERANGE; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + *resp = res.result[0]; + + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_hdcp_req); + +int qcom_scm_qsmmu500_wait_safe_toggle(bool en) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, + .args[1] = en, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + +static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) +{ + struct device_node *tcsr; + struct device_node *np = dev->of_node; + struct resource res; + u32 offset; + int ret; + + tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); + if (!tcsr) + return 0; + + ret = of_address_to_resource(tcsr, 0, &res); + of_node_put(tcsr); + if (ret) + return ret; + + ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); + if (ret < 0) + return ret; + + *addr = res.start + offset; + + return 0; +} + +/** + * qcom_scm_is_available() - Checks if SCM is available + */ +bool qcom_scm_is_available(void) +{ + return !!__scm; +} +EXPORT_SYMBOL(qcom_scm_is_available); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; @@ -631,7 +1115,7 @@ static int qcom_scm_probe(struct platform_device *pdev) __scm = scm; __scm->dev = &pdev->dev; - __qcom_scm_init(); + __query_convention(); /* * If requested enable "download mode", from this point on warmboot diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 81dcf5f1138e..d9ed670da222 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -1,71 +1,116 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved. */ #ifndef __QCOM_SCM_INT_H #define __QCOM_SCM_INT_H -#define QCOM_SCM_SVC_BOOT 0x1 -#define QCOM_SCM_BOOT_ADDR 0x1 -#define QCOM_SCM_SET_DLOAD_MODE 0x10 -#define QCOM_SCM_BOOT_ADDR_MC 0x11 -#define QCOM_SCM_SET_REMOTE_STATE 0xa -extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); -extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable); - -#define QCOM_SCM_FLAG_HLOS 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04 -extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus); -extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); - -#define QCOM_SCM_CMD_TERMINATE_PC 0x2 +enum qcom_scm_convention { + SMC_CONVENTION_UNKNOWN, + SMC_CONVENTION_LEGACY, + SMC_CONVENTION_ARM_32, + SMC_CONVENTION_ARM_64, +}; + +extern enum qcom_scm_convention qcom_scm_convention; + +#define MAX_QCOM_SCM_ARGS 10 +#define MAX_QCOM_SCM_RETS 3 + +enum qcom_scm_arg_types { + QCOM_SCM_VAL, + QCOM_SCM_RO, + QCOM_SCM_RW, + QCOM_SCM_BUFVAL, +}; + +#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ + (((a) & 0x3) << 4) | \ + (((b) & 0x3) << 6) | \ + (((c) & 0x3) << 8) | \ + (((d) & 0x3) << 10) | \ + (((e) & 0x3) << 12) | \ + (((f) & 0x3) << 14) | \ + (((g) & 0x3) << 16) | \ + (((h) & 0x3) << 18) | \ + (((i) & 0x3) << 20) | \ + (((j) & 0x3) << 22) | \ + ((num) & 0xf)) + +#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + + +/** + * struct qcom_scm_desc + * @arginfo: Metadata describing the arguments in args[] + * @args: The array of arguments for the secure syscall + */ +struct qcom_scm_desc { + u32 svc; + u32 cmd; + u32 arginfo; + u64 args[MAX_QCOM_SCM_ARGS]; + u32 owner; +}; + +/** + * struct qcom_scm_res + * @result: The values returned by the secure syscall + */ +struct qcom_scm_res { + u64 result[MAX_QCOM_SCM_RETS]; +}; + +#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) +extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res, bool atomic); + +#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff)) +extern int scm_legacy_call_atomic(struct device *dev, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); +extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); + +#define QCOM_SCM_SVC_BOOT 0x01 +#define QCOM_SCM_BOOT_SET_ADDR 0x01 +#define QCOM_SCM_BOOT_TERMINATE_PC 0x02 +#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 +#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 -#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 -extern void __qcom_scm_cpu_power_down(u32 flags); -#define QCOM_SCM_SVC_IO 0x5 -#define QCOM_SCM_IO_READ 0x1 -#define QCOM_SCM_IO_WRITE 0x2 -extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val); -extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val); +#define QCOM_SCM_SVC_PIL 0x02 +#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 +#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02 +#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05 +#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 +#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 +#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a + +#define QCOM_SCM_SVC_IO 0x05 +#define QCOM_SCM_IO_READ 0x01 +#define QCOM_SCM_IO_WRITE 0x02 + +#define QCOM_SCM_SVC_INFO 0x06 +#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01 -#define QCOM_SCM_SVC_INFO 0x6 -#define QCOM_IS_CALL_AVAIL_CMD 0x1 -extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, - u32 cmd_id); +#define QCOM_SCM_SVC_MP 0x0c +#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_ASSIGN 0x16 + +#define QCOM_SCM_SVC_OCMEM 0x0f +#define QCOM_SCM_OCMEM_LOCK_CMD 0x01 +#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02 #define QCOM_SCM_SVC_HDCP 0x11 -#define QCOM_SCM_CMD_HDCP 0x01 -extern int __qcom_scm_hdcp_req(struct device *dev, - struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +#define QCOM_SCM_HDCP_INVOKE 0x01 -extern void __qcom_scm_init(void); +#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 -#define QCOM_SCM_OCMEM_SVC 0xf -#define QCOM_SCM_OCMEM_LOCK_CMD 0x1 -#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2 - -extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, - u32 size, u32 mode); -extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, - u32 size); - -#define QCOM_SCM_SVC_PIL 0x2 -#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1 -#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2 -#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5 -#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6 -#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7 -#define QCOM_SCM_PAS_MSS_RESET 0xa -extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys); -extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size); -extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset); +extern void __qcom_scm_init(void); /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 @@ -94,25 +139,4 @@ static inline int qcom_scm_remap_error(int err) return -EINVAL; } -#define QCOM_SCM_SVC_MP 0xc -#define QCOM_SCM_RESTORE_SEC_CFG 2 -extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, - u32 spare); -#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 -#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 -#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 -#define QCOM_SCM_CONFIG_ERRATA1 0x3 -#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2 -extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size); -extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, - u32 size, u32 spare); -extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, - bool enable); -#define QCOM_MEM_PROT_ASSIGN_ID 0x16 -extern int __qcom_scm_assign_mem(struct device *dev, - phys_addr_t mem_region, size_t mem_sz, - phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz); - #endif diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 72be58960e54..e27f68437b56 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -197,7 +197,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) rwtm->serial_number = reply->status[1]; rwtm->serial_number <<= 32; rwtm->serial_number |= reply->status[0]; - rwtm->board_version = reply->status[2]; + rwtm->board_version = reply->status[2]; rwtm->ram_size = reply->status[3]; reply_to_mac_addr(rwtm->mac_address1, reply->status[4], reply->status[5]); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 74d9f13d72c4..ecc339d846de 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -26,6 +26,9 @@ static const struct zynqmp_eemi_ops *eemi_ops_tbl; +static bool feature_check_enabled; +static u32 zynqmp_pm_features[PM_API_MAX]; + static const struct mfd_cell firmware_devs[] = { { .name = "zynqmp_power_controller", @@ -44,6 +47,8 @@ static int zynqmp_pm_ret_code(u32 ret_status) case XST_PM_SUCCESS: case XST_PM_DOUBLE_REQ: return 0; + case XST_PM_NO_FEATURE: + return -ENOTSUPP; case XST_PM_NO_ACCESS: return -EACCES; case XST_PM_ABORT_SUSPEND: @@ -129,6 +134,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, } /** + * zynqmp_pm_feature() - Check weather given feature is supported or not + * @api_id: API ID to check + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_feature(u32 api_id) +{ + int ret; + u32 ret_payload[PAYLOAD_ARG_CNT]; + u64 smc_arg[2]; + + if (!feature_check_enabled) + return 0; + + /* Return value if feature is already checked */ + if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED) + return zynqmp_pm_features[api_id]; + + smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; + smc_arg[1] = api_id; + + ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); + if (ret) { + zynqmp_pm_features[api_id] = PM_FEATURE_INVALID; + return PM_FEATURE_INVALID; + } + + zynqmp_pm_features[api_id] = ret_payload[1]; + + return zynqmp_pm_features[api_id]; +} + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -162,6 +200,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, */ u64 smc_arg[4]; + if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID) + return -ENOTSUPP; + smc_arg[0] = PM_SIP_SVC | pm_api_id; smc_arg[1] = ((u64)arg1 << 32) | arg0; smc_arg[2] = ((u64)arg3 << 32) | arg2; @@ -717,6 +758,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); if (!np) return 0; + + feature_check_enabled = true; } of_node_put(np); diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7ae3b22c5628..c2bbcdd9c875 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -120,6 +120,7 @@ amdgpu-y += \ amdgpu_rlc.o \ gfx_v8_0.o \ gfx_v9_0.o \ + gfx_v9_4.o \ gfx_v10_0.o # add async DMA block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b1bb10625cd9..da3bcff61b97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1009,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define AMDGPU_REGS_IDX (1<<0) #define AMDGPU_REGS_NO_KIQ (1<<1) +#define AMDGPU_REGS_KIQ (1<<2) #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) +#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) +#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) + #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 82155ac3288a..12247a32f9ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = state == AMD_PG_STATE_GATE ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b2487f4f271b..fa8ac9d19a7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); + INIT_LIST_HEAD(&(*mem)->bo_va_list); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 64e2babbc36e..94a6c42f29ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; -static int amdgpu_ctx_total_num_entities(void) -{ - unsigned i, num_entities = 0; - - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) - num_entities += amdgpu_ctx_num_entities[i]; - - return num_entities; -} - static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) + return -EINVAL; + /* NORMAL and below are accessible by everyone */ if (priority <= DRM_SCHED_PRIORITY_NORMAL) return 0; @@ -68,64 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } -static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, - struct drm_file *filp, - struct amdgpu_ctx *ctx) +static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); - unsigned i, j; + struct amdgpu_device *adev = ctx->adev; + struct amdgpu_ctx_entity *entity; + struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; + unsigned num_scheds = 0; + enum drm_sched_priority priority; int r; - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) - return -EINVAL; - - r = amdgpu_ctx_priority_permit(filp, priority); - if (r) - return r; + entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), + GFP_KERNEL); + if (!entity) + return -ENOMEM; - memset(ctx, 0, sizeof(*ctx)); - ctx->adev = adev; - - ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, - sizeof(struct dma_fence*), GFP_KERNEL); - if (!ctx->fences) - return -ENOMEM; - - ctx->entities[0] = kcalloc(num_entities, - sizeof(struct amdgpu_ctx_entity), - GFP_KERNEL); - if (!ctx->entities[0]) { - r = -ENOMEM; - goto error_free_fences; - } - - for (i = 0; i < num_entities; ++i) { - struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; - - entity->sequence = 1; - entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; - } - for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) - ctx->entities[i] = ctx->entities[i - 1] + - amdgpu_ctx_num_entities[i - 1]; - - kref_init(&ctx->refcount); - spin_lock_init(&ctx->ring_lock); - mutex_init(&ctx->lock); - - ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); - ctx->reset_counter_query = ctx->reset_counter; - ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); - ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { - struct drm_gpu_scheduler **scheds; - struct drm_gpu_scheduler *sched; - unsigned num_scheds = 0; - - switch (i) { + entity->sequence = 1; + priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + switch (hw_ip) { case AMDGPU_HW_IP_GFX: sched = &adev->gfx.gfx_ring[0].sched; scheds = &sched; @@ -166,53 +119,90 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, scheds = adev->jpeg.jpeg_sched; num_scheds = adev->jpeg.num_jpeg_sched; break; - } - - for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) - r = drm_sched_entity_init(&ctx->entities[i][j].entity, - priority, scheds, - num_scheds, &ctx->guilty); - if (r) - goto error_cleanup_entities; } + r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + &ctx->guilty); + if (r) + goto error_free_entity; + + ctx->entities[hw_ip][ring] = entity; return 0; -error_cleanup_entities: - for (i = 0; i < num_entities; ++i) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); - kfree(ctx->entities[0]); +error_free_entity: + kfree(entity); -error_free_fences: - kfree(ctx->fences); - ctx->fences = NULL; return r; } +static int amdgpu_ctx_init(struct amdgpu_device *adev, + enum drm_sched_priority priority, + struct drm_file *filp, + struct amdgpu_ctx *ctx) +{ + int r; + + r = amdgpu_ctx_priority_permit(filp, priority); + if (r) + return r; + + memset(ctx, 0, sizeof(*ctx)); + + ctx->adev = adev; + + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + mutex_init(&ctx->lock); + + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); + ctx->reset_counter_query = ctx->reset_counter; + ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + ctx->init_priority = priority; + ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + + return 0; + +} + +static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +{ + + int i; + + if (!entity) + return; + + for (i = 0; i < amdgpu_sched_jobs; ++i) + dma_fence_put(entity->fences[i]); + + kfree(entity); +} + static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < num_entities; ++i) - for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->entities[0][i].fences[j]); - kfree(ctx->fences); - kfree(ctx->entities[0]); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { + amdgpu_ctx_fini_entity(ctx->entities[i][j]); + ctx->entities[i][j] = NULL; + } + } mutex_destroy(&ctx->lock); - kfree(ctx); } int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { + int r; + if (hw_ip >= AMDGPU_HW_IP_NUM) { DRM_ERROR("unknown HW IP type: %d\n", hw_ip); return -EINVAL; @@ -229,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - *entity = &ctx->entities[hw_ip][ring].entity; + if (ctx->entities[hw_ip][ring] == NULL) { + r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); + if (r) + return r; + } + + *entity = &ctx->entities[hw_ip][ring]->entity; return 0; } @@ -269,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; - unsigned num_entities; - u32 i; + u32 i, j; ctx = container_of(ref, struct amdgpu_ctx, refcount); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + if (!ctx->entities[i][j]) + continue; - num_entities = amdgpu_ctx_total_num_entities(); - for (i = 0; i < num_entities; i++) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); + drm_sched_entity_destroy(&ctx->entities[i][j]->entity); + } + } amdgpu_ctx_fini(ref); } @@ -506,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); enum drm_sched_priority ctx_prio; - unsigned i; + unsigned i, j; ctx->override_priority = priority; ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity = &ctx->entities[0][i].entity; + if (!ctx->entities[i][j]) + continue; - drm_sched_entity_set_priority(entity, ctx_prio); + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_set_priority(entity, ctx_prio); + } } } @@ -554,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; mutex_lock(&mgr->lock); idr_for_each_entry(idp, ctx, id) { - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; - entity = &ctx->entities[0][i].entity; - timeout = drm_sched_entity_flush(entity, timeout); + entity = &ctx->entities[i][j]->entity; + timeout = drm_sched_entity_flush(entity, timeout); + } } } mutex_unlock(&mgr->lock); @@ -576,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; @@ -589,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) - drm_sched_entity_fini(&ctx->entities[0][i].entity); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; + + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_fini(entity); + } + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 4ad90a44dc3c..de490f183af2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,10 +29,12 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; +#define AMDGPU_MAX_ENTITY_NUM 4 + struct amdgpu_ctx_entity { uint64_t sequence; - struct dma_fence **fences; struct drm_sched_entity entity; + struct dma_fence *fences[]; }; struct amdgpu_ctx { @@ -42,8 +44,7 @@ struct amdgpu_ctx { unsigned reset_counter_query; uint32_t vram_lost_counter; spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; enum drm_sched_priority init_priority; enum drm_sched_priority override_priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 53d882000101..39cd545976b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -216,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_rreg(adev, reg); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_rreg(adev, reg); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -294,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, adev->last_mm_index = v; } - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_wreg(adev, reg, v); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_wreg(adev, reg, v); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); @@ -985,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) { struct sysinfo si; - bool is_os_64 = (sizeof(void *) == 8) ? true : false; + bool is_os_64 = (sizeof(void *) == 8); uint64_t total_memory; uint64_t dram_size_seven_GB = 0x1B8000000; uint64_t dram_size_three_GB = 0xB8000000; @@ -3760,6 +3760,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_RAVEN: case CHIP_ARCTURUS: + case CHIP_RENOIR: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: break; default: goto disabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h index 61a26c15c8dd..057f6ea645d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h @@ -52,6 +52,9 @@ struct amdgpu_df_funcs { uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, uint32_t ficadl_val, uint32_t ficadh_val); + uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev, + uint32_t df_inst); + uint32_t (*get_df_inst_id)(struct amdgpu_device *adev); }; struct amdgpu_df { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b88b8b82bb64..0f960b498792 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); + r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); if (r) return r; @@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); amdgpu_ring_fini(ring); } @@ -658,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } + +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_rreg(ring, reg); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return adev->wb.wb[kiq->reg_val_offs]; + +failed_kiq_read: + pr_err("failed to read reg:%x\n", reg); + return ~0; +} + +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_wreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_wreg(ring, reg, v); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_write; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; + + return; + +failed_kiq_write: + pr_err("failed to write reg:%x\n", reg); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index af4bd279f42f..ca17ffb01301 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -94,6 +94,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + uint32_t reg_val_offs; }; /* @@ -375,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 86267baca07c..d3c27a3c43f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -60,11 +60,6 @@ */ #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL -/* - * Default stolen memory size, 1024 * 768 * 4 - */ -#define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE 0x300000ULL - struct firmware; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 3265487b859f..611021514c52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -172,6 +172,8 @@ struct psp_dtm_context { #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 +/*Define the VRAM size that will be encroached by BIST training.*/ +#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 enum psp_memory_training_init_flag { PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 766be7f18282..cef94e2169fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -742,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, return 0; } +uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t df_inst_id; + + if ((!adev->df.funcs) || + (!adev->df.funcs->get_df_inst_id) || + (!adev->df.funcs->get_dram_base_addr)) + return addr; + + df_inst_id = adev->df.funcs->get_df_inst_id(adev); + + return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -759,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + block_info.address = get_xgmi_relative_phy_addr(adev, + block_info.address); + } + switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.funcs->ras_error_inject) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ceb0dbf685f1..59ddba137946 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -652,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, if ((addr + (uint64_t)size) > (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", + DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c4984c5fb2db..d6deb0eb1e15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -65,33 +65,33 @@ /* 1 second timeout */ #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ - ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ +#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \ + ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__MASK_EN_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ - RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \ + RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ +#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \ do { \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ } while (0) -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \ +#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \ ({ \ uint32_t internal_reg_offset, addr; \ bool video_range, aon_range; \ \ - addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ addr <<= 2; \ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ @@ -111,7 +111,7 @@ #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \ ({ \ - WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 103033f96f13..adc813cde8e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->pg_flags = 0; } -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_rreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_rreg(ring, reg); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_read; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_read; - - return adev->wb.wb[adev->virt.reg_val_offs]; - -failed_kiq_read: - pr_err("failed to read reg:%x\n", reg); - return ~0; -} - -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_wreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_write; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_write; - - return; - -failed_kiq_write: - pr_err("failed to write reg:%x\n", reg); -} - void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 4d1ac7612967..daaf909d009a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, uint32_t ref, uint32_t mask); diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index d9cc746af5e6..847ca9b3ce4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA20: case CHIP_RAVEN: athub_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index ceb9aa4df0e7..921a69abda55 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: athub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index f51326598a8c..5a1bd8ed1a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -27,6 +27,9 @@ #include "df/df_3_6_offset.h" #include "df/df_3_6_sh_mask.h" +#define DF_3_6_SMN_REG_INST_DIST 0x8 +#define DF_3_6_INST_CNT 8 + static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 32, 0, 0, 0, 2, 4, 8}; @@ -683,6 +686,58 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, } } +static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev, + uint32_t df_inst) +{ + uint32_t base_addr_reg_val = 0; + uint64_t base_addr = 0; + + base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 + + df_inst * DF_3_6_SMN_REG_INST_DIST); + + if (REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + AddrRngVal) == 0) { + DRM_WARN("address range not valid"); + return 0; + } + + base_addr = REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + DramBaseAddr); + + return base_addr << 28; +} + +static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev) +{ + uint32_t xgmi_node_id = 0; + uint32_t df_inst_id = 0; + + /* Walk through DF dst nodes to find current XGMI node */ + for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) { + + xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 + + df_inst_id * DF_3_6_SMN_REG_INST_DIST); + xgmi_node_id = REG_GET_FIELD(xgmi_node_id, + DF_CS_UMC_AON0_DramLimitAddress0, + DstFabricID); + + /* TODO: establish reason dest fabric id is offset by 7 */ + xgmi_node_id = xgmi_node_id >> 7; + + if (adev->gmc.xgmi.physical_node_id == xgmi_node_id) + break; + } + + if (df_inst_id == DF_3_6_INST_CNT) { + DRM_WARN("cant match df dst id with gpu node"); + return 0; + } + + return df_inst_id; +} + const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, .sw_fini = df_v3_6_sw_fini, @@ -696,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .pmc_stop = df_v3_6_pmc_stop, .pmc_get_count = df_v3_6_pmc_get_count, .get_fica = df_v3_6_get_fica, - .set_fica = df_v3_6_set_fica + .set_fica = df_v3_6_set_fica, + .get_dram_base_addr = df_v3_6_get_dram_base_addr, + .get_df_inst_id = df_v3_6_get_df_inst_id }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 874f641de281..1785fdad6ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -368,7 +368,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, - .invalidate_tlbs_size = 12, + .invalidate_tlbs_size = 2, }; static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -4229,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -4255,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: gfx_v10_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4737,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -4745,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 46f0533ba43f..fa245973de12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6449,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -6457,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46ab46757b25..90f64b8bc358 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -48,6 +48,8 @@ #include "amdgpu_ras.h" +#include "gfx_v9_4.h" + #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -736,6 +738,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); @@ -859,7 +862,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, - .invalidate_tlbs_size = 12, + .invalidate_tlbs_size = 2, }; static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -1159,18 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) } } +struct amdgpu_gfxoff_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) +{ + const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { + if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: break; case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || - adev->pdev->device == 0x15d8) && - (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ - !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && + ((adev->gfx.rlc_fw_version != 106 && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_fw_version == 53815) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) @@ -1949,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .query_ras_error_count = &gfx_v9_0_query_ras_error_count }; +static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_0_select_se_sh, + .read_wave_data = &gfx_v9_0_read_wave_data, + .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count +}; + static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -2000,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_ARCTURUS: + adev->gfx.funcs = &gfx_v9_4_gfx_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2390,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) } } +static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) +{ + uint32_t tmp; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, + DISABLE_BARRIER_WAITCNT, 1); + WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); + break; + default: + break; + }; +} + static void gfx_v9_0_constants_init(struct amdgpu_device *adev) { u32 tmp; @@ -2435,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_init_compute_vmid(adev); gfx_v9_0_init_gds_vmid(adev); + gfx_v9_0_init_sq_config(adev); } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -4029,7 +4097,7 @@ static const struct soc15_reg_entry sgpr2_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, }; -static const struct soc15_reg_entry sec_ded_counter_registers[] = { +static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1}, @@ -4118,7 +4186,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; struct dma_fence *f = NULL; - int r, i, j, k; + int r, i; unsigned total_size, vgpr_offset, sgpr_offset; u64 gpu_addr; @@ -4264,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - /* read back registers to clear the counters */ - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0x0, k); - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); - } - } + switch (adev->asic_type) + { + case CHIP_VEGA20: + gfx_v9_0_clear_ras_edc_counter(adev); + break; + case CHIP_ARCTURUS: + gfx_v9_4_clear_ras_edc_counter(adev); + break; + default: + break; } - WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); - mutex_unlock(&adev->grbm_idx_mutex); fail: amdgpu_ib_free(adev, &ib, NULL); @@ -4638,7 +4705,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_RAVEN: @@ -4700,7 +4767,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: gfx_v9_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4717,12 +4784,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags = 0; /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -4731,18 +4798,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; if (adev->asic_type != CHIP_ARCTURUS) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; @@ -5213,6 +5280,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -5221,9 +5289,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -5545,7 +5613,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } -static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { +static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) @@ -5993,7 +6061,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, int ret; struct ta_ras_trigger_error_input block_info = { 0 }; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) @@ -6118,7 +6186,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); - for (i = 0; i < 16; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); @@ -6137,7 +6205,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); @@ -6158,7 +6226,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 4; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); @@ -6170,7 +6238,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); @@ -6197,36 +6265,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int __get_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; - for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { - if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || - gc_ras_fields_vg20[i].seg != reg->seg || - gc_ras_fields_vg20[i].inst != reg->inst) + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) { + if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_0_ras_fields[i].seg != reg->seg || + gfx_v9_0_ras_fields[i].inst != reg->inst) continue; sec_cnt = (value & - gc_ras_fields_vg20[i].sec_count_mask) >> - gc_ras_fields_vg20[i].sec_count_shift; + gfx_v9_0_ras_fields[i].sec_count_mask) >> + gfx_v9_0_ras_fields[i].sec_count_shift; if (sec_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; } ded_cnt = (value & - gc_ras_fields_vg20[i].ded_count_mask) >> - gc_ras_fields_vg20[i].ded_count_shift; + gfx_v9_0_ras_fields[i].ded_count_mask) >> + gfx_v9_0_ras_fields[i].ded_count_shift; if (ded_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; @@ -6236,6 +6304,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg, return 0; } +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + /* read back registers to clear the counters */ + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -6244,7 +6364,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t i, j, k; uint32_t reg_value; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; err_data->ue_count = 0; @@ -6252,14 +6372,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { gfx_v9_0_select_se_sh(adev, j, 0, k); reg_value = - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) - __get_ras_error_count(&sec_ded_counter_registers[i], + gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i], j, k, reg_value, &sec_count, &ded_count); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c new file mode 100644 index 000000000000..f099f13d7f1e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -0,0 +1,978 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/kernel.h> + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "soc15.h" +#include "soc15d.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_pm.h" + +#include "gc/gc_9_4_1_offset.h" +#include "gc/gc_9_4_1_sh_mask.h" +#include "soc15_common.h" + +#include "gfx_v9_4.h" +#include "amdgpu_ras.h" + +static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { + /* CPC */ + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 }, + /* DC */ + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 }, + /* CPF */ + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 }, + /* GDS */ + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, + /* SPI */ + { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, + /* SQ */ + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, + /* SQC */ + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 }, + /* TA */ + { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 }, + /* TCA */ + { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 }, + /* TCC */ + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 }, + /* TCI */ + { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 }, + /* TCP */ + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 }, + /* TD */ + { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 }, + /* GCEA */ + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 }, + /* RLC */ + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 }, +}; + +static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); +} + +static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = { + /* CPC */ + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, + { "CPC_DC_RESTORE_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, + { "CPC_DC_RESTORE_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, + + /* CPF */ + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, + + /* GDS */ + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + + /* SPI */ + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, + { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) }, + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, + + /* SQ */ + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + + /* SQC */ + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + + /* TA */ + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, + + /* TCA */ + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, + + /* TCC */ + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, + + /* TCI */ + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, + + /* TCP */ + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + + /* TD */ + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, + + /* EA */ + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, + + /* RLC */ + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, +}; + +static const char * const vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", + "UTC_VML2_IFIFO_GROUP0", + "UTC_VML2_IFIFO_GROUP1", + "UTC_VML2_IFIFO_GROUP2", + "UTC_VML2_IFIFO_GROUP3", + "UTC_VML2_IFIFO_GROUP4", + "UTC_VML2_IFIFO_GROUP5", + "UTC_VML2_IFIFO_GROUP6", + "UTC_VML2_IFIFO_GROUP7", + "UTC_VML2_IFIFO_GROUP8", + "UTC_VML2_IFIFO_GROUP9", + "UTC_VML2_IFIFO_GROUP10", + "UTC_VML2_IFIFO_GROUP11", + "UTC_VML2_IFIFO_GROUP12", + "UTC_VML2_IFIFO_GROUP13", + "UTC_VML2_IFIFO_GROUP14", + "UTC_VML2_IFIFO_GROUP15", + "UTC_VML2_IFIFO_GROUP16", + "UTC_VML2_IFIFO_GROUP17", + "UTC_VML2_IFIFO_GROUP18", + "UTC_VML2_IFIFO_GROUP19", + "UTC_VML2_IFIFO_GROUP20", + "UTC_VML2_IFIFO_GROUP21", + "UTC_VML2_IFIFO_GROUP22", + "UTC_VML2_IFIFO_GROUP23", + "UTC_VML2_IFIFO_GROUP24", +}; + +static const char * const vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_ARADDRS", + "UTC_VML2_RDIF_LOG_FIFO", + "UTC_VML2_QUEUE_REQ", + "UTC_VML2_QUEUE_RET", +}; + +static const char * const utcl2_router_mems[] = { + "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0", + "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1", + "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2", + "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3", + "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4", + "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5", + "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6", + "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7", + "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8", + "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9", + "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10", + "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11", + "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12", + "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13", + "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14", + "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15", + "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16", + "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17", + "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18", + "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19", + "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20", + "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21", + "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22", + "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23", + "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24", +}; + +static const char * const atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char * const atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + utcl2_router_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + utcl2_router_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_2m_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + + return 0; +} + +static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, + uint32_t value, uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) { + if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_4_ras_fields[i].seg != reg->seg || + gfx_v9_4_ras_fields[i].inst != reg->inst) + continue; + + sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> + gfx_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> + gfx_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0, k); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + if (reg_value) + gfx_v9_4_ras_error_count( + &gfx_v9_4_edc_counter_regs[i], + j, k, reg_value, &sec_count, + &ded_count); + } + } + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_4_query_utc_edc_status(adev, err_data); + + return 0; +} + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); +} + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = info->head.sub_block_index; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h new file mode 100644 index 000000000000..2e3f6f755ad4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V9_4_H__ +#define __GFX_V9_4_H__ + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev); + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, + void *inject_if); + +#endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index bbede09983e1..9775eca6fe43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -262,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; @@ -289,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -418,7 +419,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (amdgpu_emu_mode == 0 && ring->sched.ready) { spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); amdgpu_fence_emit_polling(ring, &seq); @@ -441,10 +443,10 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (all_hub) { for (i = 0; i < adev->num_vmhubs; i++) gmc_v10_0_flush_gpu_tlb(adev, vmid, - i, 0); + i, flush_type); } else { gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); + AMDGPU_GFXHUB_0, flush_type); } break; } @@ -640,12 +642,7 @@ static int gmc_v10_0_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - /* - * Can't free the stolen VGA memory when it might be used for memory - * training again. - */ - if (!adev->fw_vram_usage.mem_train_support) - amdgpu_bo_late_init(adev); + amdgpu_bo_late_init(adev); r = amdgpu_gmc_allocate_vm_inv_eng(adev); if (r) @@ -829,19 +826,6 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); - /* - * In dual GPUs scenario, stolen_size is assigned to zero on the - * secondary GPU, since there is no pre-OS console using that memory. - * Then the bottom region of VRAM was allocated as GTT, unfortunately a - * small region of bottom VRAM was encroached by UMC firmware during - * GDDR6 BIST training, this cause page fault. - * The page fault can be fixed by forcing stolen_size to 3MB, then the - * bottom region of VRAM was allocated as stolen memory, GTT corruption - * avoid. - */ - adev->gmc.stolen_size = max(adev->gmc.stolen_size, - AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE); - /* Memory manager */ r = amdgpu_bo_init(adev); if (r) @@ -881,13 +865,6 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) static int gmc_v10_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - void *stolen_vga_buf; - - /* - * Free the stolen memory if it wasn't already freed in late_init - * because of memory training. - */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 19d5b133e1d7..9da9596a3638 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU && + adev->gmc.real_vram_size > adev->gmc.aper_size) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 40a496804356..90216abf14a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -476,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, tmp; + u32 j, inv_req, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal @@ -493,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); return; } @@ -521,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -578,7 +578,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (ring->sched.ready) { spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); amdgpu_fence_emit_polling(ring, &seq); @@ -601,10 +602,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (all_hub) { for (i = 0; i < adev->num_vmhubs; i++) gmc_v9_0_flush_gpu_tlb(adev, vmid, - i, 0); + i, flush_type); } else { gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); + AMDGPU_GFXHUB_0, flush_type); } break; } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index a78292d84854..ff2e6e1ccde7 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { if (jpeg_v2_0_is_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 2c58939e6ad0..c6d046df4b70 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index adfd8a6171eb..49a3a56ec017 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_RAVEN: case CHIP_RENOIR: mmhub_v1_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v1_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index a7cb185d639a..bde189680521 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: mmhub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 5c42387c9274..a5281df8d84f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -625,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_ARCTURUS: mmhub_v9_4_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v9_4_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -663,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) } static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + /* MMHUB Range 0 */ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -751,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { 0, 0, SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), }, + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 1 */ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -838,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHAB Range 2*/ + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Rang 3 */ + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 4 */ + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUAB Range 5 */ + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 6 */ + { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 7*/ + { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT), } }; static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, }; static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index f737ce459c28..cf557a428298 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -426,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2e0f8933410e..2d1bebdf1603 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -950,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_mem_power_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 685dd9754c67..0829188c1a5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -22,6 +22,7 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/vmalloc.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -971,10 +972,13 @@ Err_out: */ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) { - int ret; - uint32_t p2c_header[4]; struct psp_memory_training_context *ctx = &psp->mem_train_ctx; uint32_t *pcache = (uint32_t*)ctx->sys_cache; + struct amdgpu_device *adev = psp->adev; + uint32_t p2c_header[4]; + uint32_t sz; + void *buf; + int ret; if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { DRM_DEBUG("Memory training is not supported.\n"); @@ -989,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } - amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", pcache[0], pcache[1], pcache[2], pcache[3], p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); @@ -1026,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) DRM_DEBUG("Memory training ops:%x.\n", ops); if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + /* + * Long traing will encroach certain mount of bottom VRAM, + * saving the content of this bottom VRAM to system memory + * before training, and restoring it after training to avoid + * VRAM corruption. + */ + sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { + DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", + adev->gmc.visible_vram_size, + adev->mman.aper_base_kaddr); + return -EINVAL; + } + + buf = vmalloc(sz); + if (!buf) { + DRM_ERROR("failed to allocate system memory.\n"); + return -ENOMEM; + } + + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); if (ret) { DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); return ret; } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->nbio.funcs->hdp_flush(adev, NULL); + vfree(buf); } if (ops & PSP_MEM_TRAIN_SAVE) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 27c7001be1ee..e55884d204bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2176,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: sdma_v4_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v4_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 4c6bf1f8a528..67b9830b7c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1525,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: sdma_v5_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v5_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 9aac9f9c50bb..42d5601b6bf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle, bool enable; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - enable = (state == AMD_CG_STATE_GATE) ? true : false; + enable = (state == AMD_CG_STATE_GATE); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { for (i = 0; i < adev->sdma.num_instances; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 317803f6a561..15f3424a1ff7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -537,6 +537,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { + /* original raven doesn't have full asic reset */ + if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8) + return 0; + switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_BACO: if (!adev->in_suspend) @@ -1467,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->df.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; case CHIP_RAVEN: case CHIP_RENOIR: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; case CHIP_ARCTURUS: soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 01e62fb8e6e0..0fa8aae2d78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 217084d56ab8..e0aadcaf6c8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 475ae68f38f5..217db187207c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 683701cf7270..3fd102efb7af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if ((adev->asic_type == CHIP_POLARIS10) || diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index e654938f6cca..1a24fadd30e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1346,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index f4db8af6536b..4f7216788f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1213,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ @@ -1624,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index ef749b02ded9..6c9de1882428 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index c8b63d57a541..70fae7977f8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -435,88 +435,88 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); offset = size; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } /** @@ -670,19 +670,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); + UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); + UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } /** @@ -772,11 +772,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); + UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); /* setup mmUVD_LMI_CTRL */ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | @@ -788,28 +788,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); + UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_CNTL), + UVD, 0, mmUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUXA0), + UVD, 0, mmUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUXB0), + UVD, 0, mmUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUX), + UVD, 0, mmUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); @@ -817,26 +817,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); /* enable LMI MC and UMC channels */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); + UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); /* unblock VCPU register access */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* enable master interrupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MASTINT_EN), + UVD, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) @@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, @@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return 0; + /*SW clock gating */ vcn_v2_5_disable_clock_gating(adev); @@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v2_5_stop_dpg_mode(adev, i); - goto power_off; + continue; } /* wait for vcn idle */ @@ -1349,7 +1353,6 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } -power_off: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); @@ -1488,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, @@ -1663,7 +1666,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (amdgpu_sriov_vf(adev)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index d9e331084ea0..407c6093c2ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -717,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; vega10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2870553a2ce0..80d22bf702e8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1237,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->xgmi_sdma_queue_count++; + if (q->properties.is_active) { dqm->queue_count++; retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); } - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9402374d2466..279541517a99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -940,14 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - dc_hardware_init(adev->dm.dc); - r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -7759,24 +7759,27 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, struct drm_crtc_state *new_crtc_state, *old_crtc_state; struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; struct dc_stream_status *status = NULL; - - struct dc_surface_update *updates; enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_info_bundle { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *bundle; - updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - if (!updates) { - DRM_ERROR("Failed to allocate plane updates\n"); + if (!bundle) { + DRM_ERROR("Failed to allocate update bundle\n"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dc_scaling_info scaling_info; - struct dc_stream_update stream_update; - memset(&stream_update, 0, sizeof(stream_update)); + memset(bundle, 0, sizeof(struct surface_info_bundle)); new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -7793,8 +7796,9 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { const struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(new_plane_state->fb); - struct dc_plane_info plane_info; - struct dc_flip_addrs flip_addr; + struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane]; + struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane]; + struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane]; uint64_t tiling_flags; new_plane_crtc = new_plane_state->crtc; @@ -7812,49 +7816,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, if (crtc != new_plane_crtc) continue; - updates[num_plane].surface = new_dm_plane_state->dc_state; + bundle->surface_updates[num_plane].surface = + new_dm_plane_state->dc_state; if (new_crtc_state->mode_changed) { - stream_update.dst = new_dm_crtc_state->stream->dst; - stream_update.src = new_dm_crtc_state->stream->src; + bundle->stream_update.dst = new_dm_crtc_state->stream->dst; + bundle->stream_update.src = new_dm_crtc_state->stream->src; } if (new_crtc_state->color_mgmt_changed) { - updates[num_plane].gamma = + bundle->surface_updates[num_plane].gamma = new_dm_plane_state->dc_state->gamma_correction; - updates[num_plane].in_transfer_func = + bundle->surface_updates[num_plane].in_transfer_func = new_dm_plane_state->dc_state->in_transfer_func; - stream_update.gamut_remap = + bundle->stream_update.gamut_remap = &new_dm_crtc_state->stream->gamut_remap_matrix; - stream_update.output_csc_transform = + bundle->stream_update.output_csc_transform = &new_dm_crtc_state->stream->csc_color_matrix; - stream_update.out_transfer_func = + bundle->stream_update.out_transfer_func = new_dm_crtc_state->stream->out_transfer_func; } ret = fill_dc_scaling_info(new_plane_state, - &scaling_info); + scaling_info); if (ret) goto cleanup; - updates[num_plane].scaling_info = &scaling_info; + bundle->surface_updates[num_plane].scaling_info = scaling_info; if (amdgpu_fb) { ret = get_fb_info(amdgpu_fb, &tiling_flags); if (ret) goto cleanup; - memset(&flip_addr, 0, sizeof(flip_addr)); - ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, - &plane_info, - &flip_addr.address); + plane_info, + &flip_addr->address); if (ret) goto cleanup; - updates[num_plane].plane_info = &plane_info; - updates[num_plane].flip_addr = &flip_addr; + bundle->surface_updates[num_plane].plane_info = plane_info; + bundle->surface_updates[num_plane].flip_addr = flip_addr; } num_plane++; @@ -7875,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - stream_update.stream = new_dm_crtc_state->stream; + bundle->stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. */ mutex_lock(&dm->dc_lock); - update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, - &stream_update, status); + update_type = dc_check_update_surfaces_for_stream( + dc, bundle->surface_updates, num_plane, + &bundle->stream_update, status); mutex_unlock(&dm->dc_lock); if (update_type > UPDATE_TYPE_MED) { @@ -7892,7 +7896,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, } cleanup: - kfree(updates); + kfree(bundle); *out_type = update_type; return ret; @@ -8163,6 +8167,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; #endif + /* + * Perform validation of MST topology in the state: + * We need to perform MST atomic check before calling + * dc_validate_global_state(), or there is a chance + * to get stuck in an infinite loop and hang eventually. + */ + ret = drm_dp_mst_atomic_check(state); + if (ret) + goto fail; + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; @@ -8191,10 +8205,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, dc_retain_state(old_dm_state->context); } } - /* Perform validation of MST topology in the state*/ - ret = drm_dp_mst_atomic_check(state); - if (ret) - goto fail; /* Store the overall update type for use later in atomic check. */ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index ae329335dfcc..0acd3409dd6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -135,6 +135,20 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, mutex_unlock(&hdcp_w->mutex); } +static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -303,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) memset(link, 0, sizeof(*link)); display->index = aconnector->base.index; + + if (config->dpms_off) { + hdcp_remove_display(hdcp_work, link_index, aconnector); + return; + } display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink != NULL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069b7a6f5597..318b474ff20e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -216,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); } - ret = drm_dp_update_payload_part1(mst_mgr); + /* It's OK for this to fail */ + drm_dp_update_payload_part1(mst_mgr); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -225,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( get_payload_table(aconnector, proposed_table); - if (ret) - return false; - return true; } @@ -285,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; @@ -299,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!mst_mgr->mst_state) return false; - ret = drm_dp_update_payload_part2(mst_mgr); - - if (ret) - return false; + /* It's OK for this to fail */ + drm_dp_update_payload_part2(mst_mgr); if (!enable) drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 96b391e4b3e7..5672f7765919 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -632,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, if (drm_dp_atomic_find_vcpi_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn,\ + vars[next_index].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return; if (!drm_dp_mst_atomic_check(state)) { diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2cb7a4288cb7..629a07a2719b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -89,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl); +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl); + static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = @@ -100,7 +104,7 @@ static void init_dig_encoder_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); - bp->cmd_tbl.dig_encoder_control = NULL; + bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } } @@ -184,6 +188,18 @@ static enum bp_result encoder_control_digx_v1_5( return result; } +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return encoder_control_digx_v1_5(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /***************************************************************************** ****************************************************************************** ** @@ -196,6 +212,10 @@ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl); + static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; @@ -209,7 +229,7 @@ static void init_transmitter_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); - bp->cmd_tbl.transmitter_control = NULL; + bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } } @@ -273,6 +293,18 @@ static enum bp_result transmitter_control_v1_6( return result; } +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return transmitter_control_v1_6(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -285,6 +317,10 @@ static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params); + static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { @@ -294,7 +330,7 @@ static void init_set_pixel_clock(struct bios_parser *bp) default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); - bp->cmd_tbl.set_pixel_clock = NULL; + bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } } @@ -400,6 +436,18 @@ static enum bp_result set_pixel_clock_v7( return result; } +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return set_pixel_clock_v7(bp, bp_params); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -632,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1( enum controller_id crtc_id, enum bp_pipe_control_action action); +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action); + static void init_enable_disp_power_gating( struct bios_parser *bp) { @@ -643,7 +696,7 @@ static void init_enable_disp_power_gating( default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); - bp->cmd_tbl.enable_disp_power_gating = NULL; + bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; break; } } @@ -658,6 +711,10 @@ static void enable_disp_power_gating_dmcub( power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; power_gating.power_gating.pwr = *pwr; + /* ATOM_ENABLE is old API in DMUB */ + if (power_gating.power_gating.pwr.enable == ATOM_ENABLE) + power_gating.power_gating.pwr.enable = ATOM_INIT; + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -695,6 +752,19 @@ static enum bp_result enable_disp_power_gating_v2_1( return result; } +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return enable_disp_power_gating_v2_1(bp, crtc_id, action); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ******************************************************************************* ** diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index a27d84ca15a5..1a37550731de 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1435,6 +1435,7 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dc_context *ctx = dc->ctx; struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( @@ -1446,17 +1447,28 @@ void dcn_bw_update_from_pplib(struct dc *dc) res = verify_clock_values(&fclks); if (res) { - ASSERT(fclks.num_levels >= 3); - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; } else BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6c797fac189d..04441dbcba76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2462,12 +2462,7 @@ void dc_set_power_state( enum dc_acpi_cm_power_state power_state) { struct kref refcount; - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), - GFP_KERNEL); - - ASSERT(dml); - if (!dml) - return; + struct display_mode_lib *dml; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: @@ -2490,6 +2485,12 @@ void dc_set_power_state( * clean state, and dc hw programming optimizations will not * cause any trouble. */ + dml = kzalloc(sizeof(struct display_mode_lib), + GFP_KERNEL); + + ASSERT(dml); + if (!dml) + return; /* Preserve refcount */ refcount = dc->current_state->refcount; @@ -2503,10 +2504,10 @@ void dc_set_power_state( dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; + kfree(dml); + break; } - - kfree(dml); } void dc_resume(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 260c0b62d37d..a09119c10d7c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -851,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link, if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle plug in without display or downstream unplug*/ + /* Active dongle downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink != NULL) { + if (prev_sink != NULL) /* Downstream unplug */ dc_sink_release(prev_sink); - } else { - /* Empty dongle plug in */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } return true; } @@ -969,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link, same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && - reason != DETECT_REASON_HPDRX) { + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings @@ -2882,7 +2875,16 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) // Clear all of MST payload then reallocate for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + + /* driver enable split pipe for external monitors + * we have to check pipe_ctx is split pipe or not + * If it's split pipe, driver using top pipe to + * reaallocate. + */ + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->stream->dpms_off == false && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { deallocate_mst_payload(pipe_ctx); @@ -2891,7 +2893,11 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->stream->dpms_off == false && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* enable/disable PHY will clear connection between BE and FE diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 6ab298c65247..cb731c1d30b1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -983,7 +983,7 @@ static enum link_training_result perform_clock_recovery_sequence( offset); /* 2. update DPCD of the receiver*/ - if (!retries_cr) + if (!retry_count) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration.*/ dpcd_set_lt_pattern_and_lane_settings( @@ -3680,7 +3680,7 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; - uint8_t count = 0; + int count; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3725,11 +3725,11 @@ static void set_crtc_test_pattern(struct dc_link *link, width, height); /* wait for dpg to blank pixel data with test pattern */ - for (count = 0; count < 1000; count++) + for (count = 0; count < 1000; count++) { if (opp->funcs->dpg_is_blanked(opp)) break; - else - udelay(100); + udelay(100); + } } } break; @@ -3925,8 +3925,38 @@ bool dc_link_dp_set_test_pattern( sizeof(training_pattern)); } } else { - /* CRTC Patterns */ + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + /* Set Test Pattern state */ link->test_pattern_enabled = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3fa85a54360f..8ff25b5dd2f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.68" +#define DC_VER "3.2.69" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -425,6 +425,7 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; + bool disable_dsc; }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 1cd4d8fc361f..066188ba7949 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size( dce_i2c_hw->buffer_used_bytes; } -static uint32_t get_speed( - const struct dce_i2c_hw *dce_i2c_hw) -{ - uint32_t pre_scale = 0; - - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - dce_i2c_hw->reference_frequency / pre_scale : - dce_i2c_hw->default_speed; -} - static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *reply) @@ -278,16 +264,25 @@ static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { + uint32_t xtal_ref_div = 0; + uint32_t prescale = 0; + + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); + + if (xtal_ref_div == 0) + xtal_ref_div = 2; + + prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; if (speed) { if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); else REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } } @@ -344,9 +339,7 @@ static void release_engine( bool safe_to_reset; /* Restore original HW engine speed */ - - set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); - + set_speed(dce_i2c_hw, dce_i2c_hw->default_speed); /* Reset HW engine */ { @@ -378,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( { uint32_t counter = 0; enum gpio_result result; - uint32_t current_speed; struct dce_i2c_hw *dce_i2c_hw = NULL; if (!ddc) @@ -416,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( dce_i2c_hw->ddc = ddc; - current_speed = get_speed(dce_i2c_hw); - - if (current_speed) - dce_i2c_hw->original_speed = current_speed; - if (!setup_engine(dce_i2c_hw)) { release_engine(dce_i2c_hw); return NULL; @@ -478,13 +465,9 @@ static void submit_channel_request_hw( static uint32_t get_transaction_timeout_hw( const struct dce_i2c_hw *dce_i2c_hw, - uint32_t length) + uint32_t length, + uint32_t speed) { - - uint32_t speed = get_speed(dce_i2c_hw); - - - uint32_t period_timeout; uint32_t num_of_clock_stretches; @@ -504,7 +487,8 @@ static uint32_t get_transaction_timeout_hw( bool dce_i2c_hw_engine_submit_payload( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *payload, - bool middle_of_transaction) + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; @@ -542,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload( /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( - dce_i2c_hw, payload->length + 1); + dce_i2c_hw, payload->length + 1, speed); submit_channel_request_hw( dce_i2c_hw, &request); @@ -588,13 +572,11 @@ bool dce_i2c_submit_command_hw( struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_hw_engine_submit_payload( - dce_i2c_hw, payload, mot)) { + dce_i2c_hw, payload, mot, cmd->speed)) { result = false; break; } - - ++index_of_payload; } @@ -625,7 +607,6 @@ void dce_i2c_hw_construct( dce_i2c_hw->buffer_used_bytes = 0; dce_i2c_hw->transaction_count = 0; dce_i2c_hw->engine_keep_power_up_count = 1; - dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; @@ -640,9 +621,6 @@ void dce100_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { - - uint32_t xtal_ref_div = 0; - dce_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, @@ -650,21 +628,6 @@ void dce100_i2c_hw_construct( shifts, masks); dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; - - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); - - if (xtal_ref_div == 0) - xtal_ref_div = 2; - - /*Calculating Reference Clock by divding original frequency by - * XTAL_REF_DIV. - * At upper level, uint32_t reference_frequency = - * dal_dce_i2c_get_reference_clock(as) >> 1 - * which already divided by 2. So we need x2 to get original - * reference clock from ppll_info - */ - dce_i2c_hw->reference_frequency = - (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; } void dce112_i2c_hw_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index d4b2037f7d74..fb055e6883c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -256,7 +256,6 @@ struct i2c_request_transaction_data { struct dce_i2c_hw { struct ddc *ddc; - uint32_t original_speed; uint32_t engine_keep_power_up_count; uint32_t transaction_count; uint32_t buffer_used_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f2127afb37b2..1008ac8a0f2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2911,6 +2911,33 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) hubbub->funcs->update_dchub(hubbub, dh_data); } +static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *test_pipe; + const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; + int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + + /** + * Disable the cursor if there's another pipe above this with a + * plane that contains this pipe's viewport to prevent double cursor + * and incorrect scaling artifacts. + */ + for (test_pipe = pipe_ctx->top_pipe; test_pipe; + test_pipe = test_pipe->top_pipe) { + if (!test_pipe->plane_state->visible) + continue; + + r2 = &test_pipe->plane_res.scl_data.recout; + r2_r = r2->x + r2->width; + r2_b = r2->y + r2->height; + + if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) + return true; + } + + return false; +} + void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -2956,6 +2983,9 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; + if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) + pos_cpy.enable = false; + // Swap axis and mirror horizontally if (param.rotation == ROTATION_ANGLE_90) { uint32_t temp_x = pos_cpy.x; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index da63fc53cc4a..cf09b9335728 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -261,28 +261,28 @@ static void hubp21_apply_PLAT_54186_wa( address->video_progressive.luma_addr.high_part == 0xf4) return; - if ((rotation_angle == 0 || rotation_angle == 180) + if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180) && viewport_c_height <= 512) return; - if ((rotation_angle == 90 || rotation_angle == 270) + if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270) && viewport_c_width <= 512) return; switch (rotation_angle) { - case 0: /* 0 degree rotation */ + case ROTATION_ANGLE_0: /* 0 degree rotation */ row_height = 128; patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; patched_viewport_width = viewport_c_width; hubp21->PLAT_54186_wa_chroma_addr_offset = 0; break; - case 2: /* 180 degree rotation */ + case ROTATION_ANGLE_180: /* 180 degree rotation */ row_height = 128; patched_viewport_height = viewport_c_height + row_height; patched_viewport_width = viewport_c_width; hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; break; - case 1: /* 90 degree rotation */ + case ROTATION_ANGLE_90: /* 90 degree rotation */ row_height = 256; if (h_mirror_en) { patched_viewport_height = viewport_c_height; @@ -294,7 +294,7 @@ static void hubp21_apply_PLAT_54186_wa( hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; } break; - case 3: /* 270 degree rotation */ + case ROTATION_ANGLE_270: /* 270 degree rotation */ row_height = 256; if (h_mirror_en) { patched_viewport_height = viewport_c_height; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 1d741bca2211..0d506d30d6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -830,7 +830,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 3840, + .max_downscale_src_width = 4096, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index e7a8ac7a1f22..45f028986a8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -3894,13 +3895,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 22f3b5a4b3b9..485a9c62ec58 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -39,6 +39,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -3935,15 +3936,22 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index af35b3bea909..e6617c958bb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN21_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN21_MAX_420_IMAGE_WIDTH 4096 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3971,15 +3972,22 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index e7a44df676ca..2875efd85467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -763,8 +763,8 @@ struct vba_vars_st { double SwathWidthC[DC__NUM_DPP__MAX]; unsigned int BytePerPixelY[DC__NUM_DPP__MAX]; unsigned int BytePerPixelC[DC__NUM_DPP__MAX]; - long dummyinteger1; - long dummyinteger2; + unsigned int dummyinteger1; + unsigned int dummyinteger2; double FinalDRAMClockChangeLatency; double Tdmdl_vm[DC__NUM_DPP__MAX]; double Tdmdl[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 8b78fcbfe746..87d682d25278 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -224,7 +224,8 @@ static void get_dsc_enc_caps( memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) { - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (!dsc->ctx->dc->debug.disable_dsc) + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); if (dsc->ctx->dc->debug.native422_support) dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index 8e23a7017588..f8917594036a 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -231,6 +231,8 @@ struct dmub_srv_base_funcs { struct dmub_srv_hw_funcs { /* private: internal use only */ + void (*init)(struct dmub_srv *dmub); + void (*reset)(struct dmub_srv *dmub); void (*reset_release)(struct dmub_srv *dmub); @@ -417,6 +419,21 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params); /** + * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized + * @dmub: the dmub service + * + * Before destroying the DMUB service or releasing the backing framebuffer + * memory we'll need to put the DMCUB into reset first. + * + * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); + +/** * dmub_srv_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service * @cmd: the command to queue diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index cd51c6138894..b2ca8e0dbac9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -54,6 +54,19 @@ const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { /* Shared functions. */ +static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, + uint64_t *fb_base, + uint64_t *fb_offset) +{ + uint32_t tmp; + + REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); + *fb_base = (uint64_t)tmp << 24; + + REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); + *fb_offset = (uint64_t)tmp << 24; +} + static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, uint64_t fb_base, uint64_t fb_offset, @@ -67,6 +80,8 @@ void dmub_dcn20_reset(struct dmub_srv *dmub) REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); + REG_WRITE(DMCUB_INBOX1_WPTR, 0); } void dmub_dcn20_reset_release(struct dmub_srv *dmub) @@ -82,7 +97,9 @@ void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, const struct dmub_window *cw1) { union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, @@ -118,7 +135,9 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw6) { union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); @@ -173,8 +192,6 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); - REG_WRITE(DMCUB_INBOX1_RPTR, 0); - REG_WRITE(DMCUB_INBOX1_WPTR, 0); } uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 53bfd4da69ad..04b0fa13153d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -92,7 +92,9 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH14) \ DMUB_SR(DMCUB_SCRATCH15) \ DMUB_SR(CC_DC_PIPE_DIS) \ - DMUB_SR(MMHUBBUB_SOFT_RESET) + DMUB_SR(MMHUBBUB_SOFT_RESET) \ + DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ + DMUB_SR(DCN_VM_FB_OFFSET) #define DMUB_COMMON_FIELDS() \ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ @@ -121,7 +123,9 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ - DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) + DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ + DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) struct dmub_srv_common_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index dee676335d73..85a518bf8a76 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -312,6 +312,9 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) if (!dmub->sw_init) return DMUB_STATUS_INVALID; + if (!dmub->hw_init) + return DMUB_STATUS_OK; + if (dmub->hw_funcs.is_hw_init) *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); @@ -415,6 +418,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_init == false) + return DMUB_STATUS_OK; + + if (dmub->hw_funcs.reset) + dmub->hw_funcs.reset(dmub); + + dmub->hw_init = false; + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, const struct dmub_cmd_header *cmd) { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 1b278c42809a..cac09d500fda 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1673,129 +1673,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, - const struct freesync_hdr_tf_params *fs_params) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - struct gamma_pixel *axis_x = NULL; - struct pixel_gamma_point *coeff = NULL; - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; - bool ret = false; - - if (output_tf->type == TF_TYPE_BYPASS) - return false; - - /* we can use hardcoded curve for plain SRGB TF */ - if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && - output_tf->tf == TRANSFER_FUNCTION_SRGB) { - if (ramp == NULL) - return true; - if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) - return true; - } - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - if (ramp && ramp->type != GAMMA_CS_TFM_1D && - (mapUserRamp || ramp->type != GAMMA_RGB_256)) { - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), - GFP_KERNEL); - if (!axis_x) - goto axis_x_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - build_evenly_distributed_points( - axis_x, - ramp->num_entries, - dividers); - - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) - scale_gamma(rgb_user, ramp, dividers); - else if (ramp->type == GAMMA_RGB_FLOAT_1024) - scale_gamma_dx(rgb_user, ramp, dividers); - } - - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), - GFP_KERNEL); - if (!coeff) - goto coeff_alloc_fail; - - tf = output_tf->tf; - if (tf == TRANSFER_FUNCTION_PQ) { - tf_pts->end_exponent = 7; - tf_pts->x_point_at_y1_red = 125; - tf_pts->x_point_at_y1_green = 125; - tf_pts->x_point_at_y1_blue = 125; - - build_pq(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - output_tf->sdr_ref_white_level); - } else if (tf == TRANSFER_FUNCTION_GAMMA22 && - fs_params != NULL && fs_params->skip_tm == 0) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - } else if (tf == TRANSFER_FUNCTION_HLG) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - - } else { - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - build_regamma(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, tf); - } - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, rgb_regamma, - MAX_HW_POINTS, tf_pts, - (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && - (ramp && ramp->type != GAMMA_CS_TFM_1D)); - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); - - ret = true; - - kvfree(coeff); -coeff_alloc_fail: - kvfree(rgb_regamma); -rgb_regamma_alloc_fail: - kvfree(axis_x); -axis_x_alloc_fail: - kvfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, const struct regamma_lut *regamma) { @@ -2043,14 +1920,14 @@ rgb_user_alloc_fail: return ret; } - -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, +static bool calculate_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points, + struct pwl_float_data_ex *rgb_regamma, + const struct freesync_hdr_tf_params *fs_params, uint32_t sdr_ref_white_level) { uint32_t i; bool ret = false; - struct pwl_float_data_ex *rgb_regamma = NULL; if (trans == TRANSFER_FUNCTION_UNITY || trans == TRANSFER_FUNCTION_LINEAR) { @@ -2060,68 +1937,33 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, points->x_point_at_y1_blue = 1; for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = coordinates_x[i].x; - points->green[i] = coordinates_x[i].x; - points->blue[i] = coordinates_x[i].x; + rgb_regamma[i].r = coordinates_x[i].x; + rgb_regamma[i].g = coordinates_x[i].x; + rgb_regamma[i].b = coordinates_x[i].x; } + ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 7; points->x_point_at_y1_red = 125; points->x_point_at_y1_green = 125; points->x_point_at_y1_blue = 125; - build_pq(rgb_regamma, MAX_HW_POINTS, coordinates_x, sdr_ref_white_level); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; - - kvfree(rgb_regamma); - } else if (trans == TRANSFER_FUNCTION_SRGB || - trans == TRANSFER_FUNCTION_BT709 || - trans == TRANSFER_FUNCTION_GAMMA22 || - trans == TRANSFER_FUNCTION_GAMMA24 || - trans == TRANSFER_FUNCTION_GAMMA26) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - points->end_exponent = 0; - points->x_point_at_y1_red = 1; - points->x_point_at_y1_green = 1; - points->x_point_at_y1_blue = 1; - build_regamma(rgb_regamma, + ret = true; + } else if (trans == TRANSFER_FUNCTION_GAMMA22 && + fs_params != NULL && fs_params->skip_tm == 0) { + build_freesync_hdr(rgb_regamma, MAX_HW_POINTS, coordinates_x, - trans); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; + fs_params); - kvfree(rgb_regamma); + ret = true; } else if (trans == TRANSFER_FUNCTION_HLG) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 4; points->x_point_at_y1_red = 12; points->x_point_at_y1_green = 12; @@ -2131,18 +1973,127 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, MAX_HW_POINTS, coordinates_x, 80, 1000); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } + + ret = true; + } else { + // trans == TRANSFER_FUNCTION_SRGB + // trans == TRANSFER_FUNCTION_BT709 + // trans == TRANSFER_FUNCTION_GAMMA22 + // trans == TRANSFER_FUNCTION_GAMMA24 + // trans == TRANSFER_FUNCTION_GAMMA26 + points->end_exponent = 0; + points->x_point_at_y1_red = 1; + points->x_point_at_y1_green = 1; + points->x_point_at_y1_blue = 1; + + build_regamma(rgb_regamma, + MAX_HW_POINTS, + coordinates_x, + trans); + ret = true; - kvfree(rgb_regamma); } -rgb_regamma_alloc_fail: + return ret; } +bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, + const struct freesync_hdr_tf_params *fs_params) +{ + struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; + struct dividers dividers; + + struct pwl_float_data *rgb_user = NULL; + struct pwl_float_data_ex *rgb_regamma = NULL; + struct gamma_pixel *axis_x = NULL; + struct pixel_gamma_point *coeff = NULL; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + bool ret = false; + + if (output_tf->type == TF_TYPE_BYPASS) + return false; + + /* we can use hardcoded curve for plain SRGB TF */ + if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && + output_tf->tf == TRANSFER_FUNCTION_SRGB) { + if (ramp == NULL) + return true; + if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || + (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + return true; + } + + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + + if (ramp && ramp->type != GAMMA_CS_TFM_1D && + (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), + GFP_KERNEL); + if (!rgb_user) + goto rgb_user_alloc_fail; + + axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), + GFP_KERNEL); + if (!axis_x) + goto axis_x_alloc_fail; + + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); + + build_evenly_distributed_points( + axis_x, + ramp->num_entries, + dividers); + + if (ramp->type == GAMMA_RGB_256 && mapUserRamp) + scale_gamma(rgb_user, ramp, dividers); + else if (ramp->type == GAMMA_RGB_FLOAT_1024) + scale_gamma_dx(rgb_user, ramp, dividers); + } + + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), + GFP_KERNEL); + if (!rgb_regamma) + goto rgb_regamma_alloc_fail; + + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), + GFP_KERNEL); + if (!coeff) + goto coeff_alloc_fail; + + tf = output_tf->tf; + + ret = calculate_curve(tf, + tf_pts, + rgb_regamma, + fs_params, + output_tf->sdr_ref_white_level); + + if (ret) { + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, + coordinates_x, axis_x, rgb_regamma, + MAX_HW_POINTS, tf_pts, + (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && + (ramp && ramp->type != GAMMA_CS_TFM_1D)); + + if (ramp && ramp->type == GAMMA_CS_TFM_1D) + apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); + } + + kvfree(coeff); +coeff_alloc_fail: + kvfree(rgb_regamma); +rgb_regamma_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: + kvfree(rgb_user); +rgb_user_alloc_fail: + return ret; +} bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 44ddea58523a..9994817a9a03 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points, - uint32_t sdr_ref_white_level); - bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 6e5ecefe7d9d..b9992ebf77a6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, bool update = false; unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - //Compute the exit refresh rate and exit frame duration + /* Compute the exit refresh rate and exit frame duration */ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index f98d3d9ecb6d..af78e4f1be68 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -63,7 +63,7 @@ struct mod_hdcp_transition_input_hdcp1 { uint8_t hdcp_capable_dp; uint8_t binfo_read_dp; uint8_t r0p_available_dp; - uint8_t link_integiry_check; + uint8_t link_integrity_check; uint8_t reauth_request_check; uint8_t stream_encryption_dp; }; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 04845e43df15..37670db64855 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -283,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, @@ -431,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 21ebc62bb9d9..76edcbe51f71 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -241,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A4_AUTHENTICATED: - if (input->link_integiry_check != PASS || + if (input->link_integrity_check != PASS || input->reauth_request_check != PASS) { /* 1A-07: restart hdcp on a link integrity failure */ fail_and_restart_in_ms(0, &status, output); @@ -249,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A6_WAIT_FOR_READY: - if (input->link_integiry_check == FAIL || + if (input->link_integrity_check == FAIL || input->reauth_request_check == FAIL) { fail_and_restart_in_ms(0, &status, output); break; diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h index 87c84691b5be..bb2c9c7a18df 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -71,4 +71,7 @@ #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL +#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL +#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 65e9f756e86e..7afa87c7ff54 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -53,4 +53,12 @@ #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +//DF_CS_UMC_AON0_DramLimitAddress0 +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0 +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index c9e3f6d849a8..ea316d8dcb37 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -2060,7 +2060,8 @@ // addressBlock: gc_sqdec //SQ_CONFIG -#define SQ_CONFIG__UNUSED__SHIFT 0x0 +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0 +#define SQ_CONFIG__UNUSED__SHIFT 0x1 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7 #define SQ_CONFIG__DEBUG_EN__SHIFT 0x8 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9 @@ -2079,7 +2080,8 @@ #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f -#define SQ_CONFIG__UNUSED_MASK 0x0000007FL +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L +#define SQ_CONFIG__UNUSED_MASK 0x0000007EL #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L #define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h new file mode 100644 index 000000000000..f41556abfbbc --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_OFFSET_HEADER +#define _gc_9_4_1_OFFSET_HEADER + +// addressBlock: gc_grbmdec +// base address: 0x8000 +#define mmGRBM_CNTL 0x0000 +#define mmGRBM_CNTL_BASE_IDX 0 +#define mmGRBM_SKEW_CNTL 0x0001 +#define mmGRBM_SKEW_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS2 0x0002 +#define mmGRBM_STATUS2_BASE_IDX 0 +#define mmGRBM_PWR_CNTL 0x0003 +#define mmGRBM_PWR_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS 0x0004 +#define mmGRBM_STATUS_BASE_IDX 0 +#define mmGRBM_STATUS_SE0 0x0005 +#define mmGRBM_STATUS_SE0_BASE_IDX 0 +#define mmGRBM_STATUS_SE1 0x0006 +#define mmGRBM_STATUS_SE1_BASE_IDX 0 +#define mmGRBM_SOFT_RESET 0x0008 +#define mmGRBM_SOFT_RESET_BASE_IDX 0 +#define mmGRBM_GFX_CLKEN_CNTL 0x000c +#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0 +#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d +#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0 +#define mmGRBM_STATUS_SE2 0x000e +#define mmGRBM_STATUS_SE2_BASE_IDX 0 +#define mmGRBM_STATUS_SE3 0x000f +#define mmGRBM_STATUS_SE3_BASE_IDX 0 +#define mmGRBM_READ_ERROR 0x0016 +#define mmGRBM_READ_ERROR_BASE_IDX 0 +#define mmGRBM_READ_ERROR2 0x0017 +#define mmGRBM_READ_ERROR2_BASE_IDX 0 +#define mmGRBM_INT_CNTL 0x0018 +#define mmGRBM_INT_CNTL_BASE_IDX 0 +#define mmGRBM_TRAP_OP 0x0019 +#define mmGRBM_TRAP_OP_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR 0x001a +#define mmGRBM_TRAP_ADDR_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR_MSK 0x001b +#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0 +#define mmGRBM_TRAP_WD 0x001c +#define mmGRBM_TRAP_WD_BASE_IDX 0 +#define mmGRBM_TRAP_WD_MSK 0x001d +#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0 +#define mmGRBM_DSM_BYPASS 0x001e +#define mmGRBM_DSM_BYPASS_BASE_IDX 0 +#define mmGRBM_WRITE_ERROR 0x001f +#define mmGRBM_WRITE_ERROR_BASE_IDX 0 +#define mmGRBM_IOV_ERROR 0x0020 +#define mmGRBM_IOV_ERROR_BASE_IDX 0 +#define mmGRBM_CHIP_REVISION 0x0021 +#define mmGRBM_CHIP_REVISION_BASE_IDX 0 +#define mmGRBM_GFX_CNTL 0x0022 +#define mmGRBM_GFX_CNTL_BASE_IDX 0 +#define mmGRBM_RSMU_CFG 0x0023 +#define mmGRBM_RSMU_CFG_BASE_IDX 0 +#define mmGRBM_IH_CREDIT 0x0024 +#define mmGRBM_IH_CREDIT_BASE_IDX 0 +#define mmGRBM_PWR_CNTL2 0x0025 +#define mmGRBM_PWR_CNTL2_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026 +#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027 +#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0 +#define mmGRBM_RSMU_READ_ERROR 0x0028 +#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0 +#define mmGRBM_CHICKEN_BITS 0x0029 +#define mmGRBM_CHICKEN_BITS_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE0 0x002a +#define mmGRBM_FENCE_RANGE0_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE1 0x002b +#define mmGRBM_FENCE_RANGE1_BASE_IDX 0 +#define mmGRBM_NOWHERE 0x003f +#define mmGRBM_NOWHERE_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG0 0x0040 +#define mmGRBM_SCRATCH_REG0_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG1 0x0041 +#define mmGRBM_SCRATCH_REG1_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG2 0x0042 +#define mmGRBM_SCRATCH_REG2_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG3 0x0043 +#define mmGRBM_SCRATCH_REG3_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG4 0x0044 +#define mmGRBM_SCRATCH_REG4_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG5 0x0045 +#define mmGRBM_SCRATCH_REG5_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG6 0x0046 +#define mmGRBM_SCRATCH_REG6_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG7 0x0047 +#define mmGRBM_SCRATCH_REG7_BASE_IDX 0 + +// addressBlock: gc_cppdec2 +// base address: 0xc600 +#define mmCPF_EDC_TAG_CNT 0x1189 +#define mmCPF_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPF_EDC_ROQ_CNT 0x118a +#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0 +#define mmCPG_EDC_TAG_CNT 0x118b +#define mmCPG_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPG_EDC_DMA_CNT 0x118d +#define mmCPG_EDC_DMA_CNT_BASE_IDX 0 +#define mmCPC_EDC_SCRATCH_CNT 0x118e +#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0 +#define mmCPC_EDC_UCODE_CNT 0x118f +#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0 +#define mmDC_EDC_STATE_CNT 0x1191 +#define mmDC_EDC_STATE_CNT_BASE_IDX 0 +#define mmDC_EDC_CSINVOC_CNT 0x1192 +#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0 +#define mmDC_EDC_RESTORE_CNT 0x1193 +#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0 + +// addressBlock: gc_gdsdec +// base address: 0x9700 +#define mmGDS_EDC_CNT 0x05c5 +#define mmGDS_EDC_CNT_BASE_IDX 0 +#define mmGDS_EDC_GRBM_CNT 0x05c6 +#define mmGDS_EDC_GRBM_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_DED 0x05c7 +#define mmGDS_EDC_OA_DED_BASE_IDX 0 +#define mmGDS_EDC_OA_PHY_CNT 0x05cb +#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_PIPE_CNT 0x05cc +#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0 + +// addressBlock: gc_shsdec +// base address: 0x9000 +#define mmSPI_EDC_CNT 0x0445 +#define mmSPI_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_sqdec +// base address: 0x8c00 +#define mmSQC_EDC_CNT2 0x032c +#define mmSQC_EDC_CNT2_BASE_IDX 0 +#define mmSQC_EDC_CNT3 0x032d +#define mmSQC_EDC_CNT3_BASE_IDX 0 +#define mmSQC_EDC_PARITY_CNT3 0x032e +#define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0 +#define mmSQC_EDC_CNT 0x03a2 +#define mmSQC_EDC_CNT_BASE_IDX 0 +#define mmSQ_EDC_SEC_CNT 0x03a3 +#define mmSQ_EDC_SEC_CNT_BASE_IDX 0 +#define mmSQ_EDC_DED_CNT 0x03a4 +#define mmSQ_EDC_DED_CNT_BASE_IDX 0 +#define mmSQ_EDC_INFO 0x03a5 +#define mmSQ_EDC_INFO_BASE_IDX 0 +#define mmSQ_EDC_CNT 0x03a6 +#define mmSQ_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tcdec +// base address: 0xac00 +#define mmTCP_EDC_CNT 0x0b17 +#define mmTCP_EDC_CNT_BASE_IDX 0 +#define mmTCP_EDC_CNT_NEW 0x0b18 +#define mmTCP_EDC_CNT_NEW_BASE_IDX 0 +#define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1 +#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0 +#define mmTCI_EDC_CNT 0x0b60 +#define mmTCI_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT 0x0b82 +#define mmTCC_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT2 0x0b83 +#define mmTCC_EDC_CNT2_BASE_IDX 0 +#define mmTCA_EDC_CNT 0x0bc5 +#define mmTCA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTD_EDC_CNT 0x052e +#define mmTD_EDC_CNT_BASE_IDX 0 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_ea_gceadec2 +// base address: 0x9c00 +#define mmGCEA_EDC_CNT 0x0706 +#define mmGCEA_EDC_CNT_BASE_IDX 0 +#define mmGCEA_EDC_CNT2 0x0707 +#define mmGCEA_EDC_CNT2_BASE_IDX 0 +#define mmGCEA_EDC_CNT3 0x071b +#define mmGCEA_EDC_CNT3_BASE_IDX 0 + +// addressBlock: gc_gfxudec +// base address: 0x30000 +#define mmSCRATCH_REG0 0x2040 +#define mmSCRATCH_REG0_BASE_IDX 1 +#define mmSCRATCH_REG1 0x2041 +#define mmSCRATCH_REG1_BASE_IDX 1 +#define mmSCRATCH_REG2 0x2042 +#define mmSCRATCH_REG2_BASE_IDX 1 +#define mmSCRATCH_REG3 0x2043 +#define mmSCRATCH_REG3_BASE_IDX 1 +#define mmSCRATCH_REG4 0x2044 +#define mmSCRATCH_REG4_BASE_IDX 1 +#define mmSCRATCH_REG5 0x2045 +#define mmSCRATCH_REG5_BASE_IDX 1 +#define mmSCRATCH_REG6 0x2046 +#define mmSCRATCH_REG6_BASE_IDX 1 +#define mmSCRATCH_REG7 0x2047 +#define mmSCRATCH_REG7_BASE_IDX 1 +#define mmGRBM_GFX_INDEX 0x2200 +#define mmGRBM_GFX_INDEX_BASE_IDX 1 + +// addressBlock: gc_utcl2_atcl2dec +// base address: 0xa000 +#define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e +#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f +#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810 +#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811 +#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0 + +// addressBlock: gc_utcl2_vml2pfdec +// base address: 0xa100 +#define mmVML2_MEM_ECC_INDEX 0x0860 +#define mmVML2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_INDEX 0x0862 +#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0 + +#define mmVML2_MEM_ECC_CNTL 0x0863 +#define mmVML2_MEM_ECC_CNTL_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_CNTL 0x0864 +#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_CNTL 0x0865 +#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0 + +// addressBlock: gc_rlcpdec +// base address: 0x3b000 +#define mmRLC_EDC_CNT 0x4d40 +#define mmRLC_EDC_CNT_BASE_IDX 1 +#define mmRLC_EDC_CNT2 0x4d41 +#define mmRLC_EDC_CNT2_BASE_IDX 1 + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h new file mode 100644 index 000000000000..f26246a600c6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_SH_MASK_HEADER +#define _gc_9_4_1_SH_MASK_HEADER + +// addressBlock: gc_cppdec2 +//CPF_EDC_TAG_CNT +#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPF_EDC_ROQ_CNT +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L +//CPG_EDC_TAG_CNT +#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPG_EDC_DMA_CNT +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0 +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4 +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6 +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L +//CPC_EDC_SCRATCH_CNT +#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL +//CPC_EDC_UCODE_CNT +#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL +//DC_EDC_STATE_CNT +#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +//DC_EDC_CSINVOC_CNT +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L +//DC_EDC_RESTORE_CNT +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L + +// addressBlock: gc_gdsdec +//GDS_EDC_CNT +#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0 +#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_CNT__UNUSED__SHIFT 0x6 +#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L +#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L +//GDS_EDC_GRBM_CNT +#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0 +#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2 +#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4 +#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L +#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL +#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L +//GDS_EDC_OA_DED +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1 +#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3 +#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4 +#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5 +#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6 +#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7 +#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8 +#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9 +#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa +#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb +#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L +#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L +#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L +#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L +#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L +#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L +#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L +#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L +#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L +#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L +#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PHY_CNT +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PIPE_CNT +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe +#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L +#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L + +// addressBlock: gc_shsdec +//SPI_EDC_CNT +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0 +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10 +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12 +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_sqdec +//SQC_EDC_CNT2 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L +//SQC_EDC_CNT3 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L +//SQC_EDC_PARITY_CNT3 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L +//SQC_EDC_CNT +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L +//SQ_EDC_SEC_CNT +#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0 +#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8 +#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10 +#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL +#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L +#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L +//SQ_EDC_DED_CNT +#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0 +#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8 +#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10 +#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL +#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L +#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L +//SQ_EDC_INFO +#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0 +#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4 +#define SQ_EDC_INFO__SOURCE__SHIFT 0x6 +#define SQ_EDC_INFO__VM_ID__SHIFT 0x9 +#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL +#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L +#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L +#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L +//SQ_EDC_CNT +#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0 +#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2 +#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4 +#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6 +#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8 +#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa +#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc +#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe +#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10 +#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12 +#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14 +#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16 +#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18 +#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a +#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L +#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL +#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L +#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L +#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L +#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L +#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L +#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L +#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L +#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L +#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L +#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L +#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L +#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L + +// addressBlock: gc_tpdec +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_tcdec +//TCP_EDC_CNT +#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL +#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L +#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L +//TCP_EDC_CNT_NEW +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6 +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18 +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L +//TCP_ATC_EDC_GATCL1_CNT +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0 +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL +//TCI_EDC_CNT +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2 +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL +//TCA_EDC_CNT +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6 +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L +//TCC_EDC_CNT +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L +//TCC_EDC_CNT2 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_tpdec +//TD_EDC_CNT +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0 +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2 +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4 +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6 +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8 +#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L +#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_ea_gceadec2 +//GCEA_EDC_CNT +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT2 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT3 +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_gfxudec +//GRBM_GFX_INDEX +#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0 +#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8 +#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10 +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f +#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL +#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L +#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L + +// addressBlock: gc_utcl2_atcl2dec +//ATC_L2_CNTL +//ATC_L2_CACHE_4K_DSM_INDEX +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_2M_DSM_INDEX +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_4K_DSM_CNTL +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L +//ATC_L2_CACHE_2M_DSM_CNTL +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L + +// addressBlock: gc_utcl2_vml2pfdec +//VML2_MEM_ECC_INDEX +#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_WALKER_MEM_ECC_INDEX +#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//UTCL2_MEM_ECC_INDEX +#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_MEM_ECC_CNTL +#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//VML2_WALKER_MEM_ECC_CNTL +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//UTCL2_MEM_ECC_CNTL +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L + +// addressBlock: gc_rlcpdec +//RLC_EDC_CNT +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L +//RLC_EDC_CNT2 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h index 40dfbf16bd34..111a71b434e2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h @@ -11185,6 +11185,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -11193,6 +11201,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA0_DSM_CNTL #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -14197,6 +14213,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -14205,6 +14229,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA1_DSM_CNTL #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -17209,6 +17241,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -17217,6 +17257,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA2_DSM_CNTL #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -20221,6 +20269,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -20229,6 +20285,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA3_DSM_CNTL #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -23233,6 +23297,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -23241,6 +23313,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA4_DSM_CNTL #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -34952,6 +35032,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -34960,6 +35048,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA5_DSM_CNTL #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -37964,6 +38060,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -37972,6 +38076,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA6_DSM_CNTL #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -40976,6 +41088,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -40984,6 +41104,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA7_DSM_CNTL #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 99469479e277..99ad4ddbe12f 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -21,6 +21,7 @@ */ #include <linux/firmware.h> +#include <linux/pci.h> #include "pp_debug.h" #include "amdgpu.h" @@ -1137,6 +1138,23 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ret = smu_system_features_control(smu, true); if (ret) return ret; + + if (adev->asic_type == CHIP_NAVI10) { + if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || + adev->pdev->revision == 0xc3 || + adev->pdev->revision == 0xca || + adev->pdev->revision == 0xcb)) || + (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || + adev->pdev->revision == 0xf4 || + adev->pdev->revision == 0xf5 || + adev->pdev->revision == 0xf6))) { + ret = smu_disable_umc_cdr_12gbps_workaround(smu); + if (ret) { + pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); + return ret; + } + } + } } if (adev->asic_type != CHIP_ARCTURUS) { ret = smu_notify_display_change(smu); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 4e8ab139bb3b..689072a312a7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].latency_in_us = latency_required ? - smu10_get_mem_latency(hwmgr, - pclk_vol_table->entries[i].clk) : - 0; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].latency_in_us = latency_required ? + smu10_get_mem_latency(hwmgr, + pclk_vol_table->entries[i].clk) : + 0; + clocks->num_levels++; + } } return 0; @@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; + clocks->num_levels++; + } } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index d70abada66bf..bf04cfefb283 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddc_table.dpm_levels[i].enabled = true; } data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; @@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) /* Initialize Vddci DPM table based on allow Mclk values */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddci_table.dpm_levels[i].enabled = true; } data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; } @@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; } data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index b0591a8dda41..97b6714e83e6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -273,6 +273,7 @@ struct smu_table_context uint8_t thermal_controller_type; void *overdrive_table; + void *boot_overdrive_table; }; struct smu_dpm_context { @@ -565,6 +566,7 @@ struct pptable_funcs { int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int (*override_pcie_parameters)(struct smu_context *smu); uint32_t (*get_pptable_power_limit)(struct smu_context *smu); + int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); }; int smu_load_microcode(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index d8c9b7f91fcc..a5b4df146713 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -170,6 +170,8 @@ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ __SMU_DUMMY_MAP(DFCstateControl), \ + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ + __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h index 373861ddccd0..406bfd187ce8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h @@ -120,7 +120,10 @@ #define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 #define PPSMC_MSG_BacoAudioD3PME 0x48 -#define PPSMC_Message_Count 0x49 +#define PPSMC_MSG_DALDisableDummyPstateChange 0x49 +#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A + +#define PPSMC_Message_Count 0x4B typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 93c66c69ca28..19a9846b730e 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -119,6 +119,10 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange), + MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange), + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm), + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive), }; static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { @@ -737,6 +741,15 @@ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_tabl return od_table->cap[feature]; } +static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table, + enum SMU_11_0_ODSETTING_ID setting, + uint32_t *min, uint32_t *max) +{ + if (min) + *min = od_table->min[setting]; + if (max) + *max = od_table->max[setting]; +} static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) @@ -755,6 +768,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; struct smu_11_0_overdrive_table *od_settings = smu->od_settings; + uint32_t min_value, max_value; switch (clk_type) { case SMU_GFXCLK: @@ -843,7 +857,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) break; size += sprintf(buf + size, "OD_MCLK:\n"); - size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); + size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax); break; case SMU_OD_VDDC_CURVE: if (!smu->od_enabled || !od_table || !od_settings) @@ -868,6 +882,55 @@ static int navi10_print_clk_levels(struct smu_context *smu, size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); } break; + case SMU_OD_RANGE: + if (!smu->od_enabled || !od_table || !od_settings) + break; + size = sprintf(buf, "%s:\n", "OD_RANGE"); + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, + &min_value, NULL); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX, + NULL, &max_value); + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, + &min_value, &max_value); + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + min_value, max_value); + } + + break; default: break; } @@ -949,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu, case SMU_GFXCLK: case SMU_DCEFCLK: case SMU_SOCCLK: + case SMU_MCLK: + case SMU_UCLK: ret = smu_get_dpm_level_count(smu, clk_type, &level_count); if (ret) return ret; @@ -1871,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab return 0; } +static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, + uint16_t *voltage, + uint32_t freq) +{ + uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16); + uint32_t value = 0; + int ret; + + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_GetVoltageByDpm, + param); + if (ret) { + pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); + return ret; + } + + smu_read_smc_arg(smu, &value); + *voltage = (uint16_t)value; + + return 0; +} + static int navi10_setup_od_limits(struct smu_context *smu) { struct smu_11_0_overdrive_table *overdrive_table = NULL; struct smu_11_0_powerplay_table *powerplay_table = NULL; @@ -1890,23 +1977,54 @@ static int navi10_setup_od_limits(struct smu_context *smu) { } static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { - OverDriveTable_t *od_table; + OverDriveTable_t *od_table, *boot_od_table; int ret = 0; ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); if (ret) return ret; + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; + boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; if (initialize) { ret = navi10_setup_od_limits(smu); if (ret) { pr_err("Failed to retrieve board OD limits\n"); return ret; } + if (od_table) { + if (!od_table->GfxclkVolt1) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt1, + od_table->GfxclkFreq1); + if (ret) + od_table->GfxclkVolt1 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1; + } + if (!od_table->GfxclkVolt2) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt2, + od_table->GfxclkFreq2); + if (ret) + od_table->GfxclkVolt2 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2; + } + + if (!od_table->GfxclkVolt3) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt3, + od_table->GfxclkFreq3); + if (ret) + od_table->GfxclkVolt3 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3; + } + } } - od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; if (od_table) { navi10_dump_od_table(od_table); } @@ -2002,6 +2120,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL return ret; od_table->UclkFmax = input[1]; break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; + } + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); + break; case PP_OD_COMMIT_DPM_TABLE: navi10_dump_od_table(od_table); ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); @@ -2091,6 +2216,61 @@ static int navi10_run_btc(struct smu_context *smu) return ret; } +static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) +{ + int result = 0; + + if (!enable) + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + else + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + + return result; +} + +static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu) +{ + uint32_t uclk_count, uclk_min, uclk_max; + uint32_t smu_version; + int ret = 0; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + /* This workaround is available only for 42.50 or later SMC firmwares */ + if (smu_version < 0x2A3200) + return 0; + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max); + if (ret) + return ret; + + /* Force UCLK out of the highest DPM */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min); + if (ret) + return ret; + + /* Revert the UCLK Hardmax */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max); + if (ret) + return ret; + + /* + * In this case, SMU already disabled dummy pstate during enablement + * of UCLK DPM, we have to re-enabled it. + * */ + return navi10_dummy_pstate_control(smu, true); +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -2185,6 +2365,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .od_edit_dpm_table = navi10_od_edit_dpm_table, .get_pptable_power_limit = navi10_get_pptable_power_limit, .run_btc = navi10_run_btc, + .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 783319ec8bf9..7bd200ffcda8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -207,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) +#define smu_disable_umc_cdr_12gbps_workaround(smu) \ + ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 02f8c9cb89d9..0dc49479a7eb 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1882,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, pr_err("Failed to export overdrive table!\n"); return ret; } + if (!table_context->boot_overdrive_table) { + table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL); + if (!table_context->boot_overdrive_table) { + return -ENOMEM; + } + } } ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); if (ret) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index a3915bfcce81..275dbf65f1a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -128,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return -EINVAL); } else { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return -EINVAL); } @@ -158,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return -EINVAL); smc_features_low = smu9_get_argument(hwmgr); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return -EINVAL); smc_features_high = smu9_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 0db57fb83d30..49e5ef3e3876 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -316,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return ret); } else { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return ret); } @@ -347,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return ret); smc_features_low = vega20_get_argument(hwmgr); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return ret); smc_features_high = vega20_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 38febd5ca4da..4ad8d6c14ee5 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -1706,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; int ret; - if (initialize) { - if (table_context->overdrive_table) - return -EINVAL; - - table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); - - if (!table_context->overdrive_table) - return -ENOMEM; - - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, - table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; - } + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); + if (ret) + return ret; + if (initialize) { ret = vega20_set_default_od8_setttings(smu); if (ret) return ret; @@ -2778,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, break; case PP_OD_RESTORE_DEFAULT_TABLE: - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; } - + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); break; case PP_OD_COMMIT_DPM_TABLE: diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h index df8336b593f7..ff94f3f6f264 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.h +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -6,6 +6,7 @@ struct nv50_core { const struct nv50_core_func *func; struct nv50_dmac chan; + bool assign_windows; }; int nv50_core_new(struct nouveau_drm *, struct nv50_core **); @@ -18,6 +19,10 @@ struct nv50_core_func { struct nvif_device *); void (*update)(struct nv50_core *, u32 *interlock, bool ntfy); + struct { + void (*owner)(struct nv50_core *); + } wndw; + const struct nv50_head_func *head; const struct nv50_outp_func { void (*ctrl)(struct nv50_core *, int or, u32 ctrl, @@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *); void corec37d_update(struct nv50_core *, u32 *, bool); +void corec37d_wndw_owner(struct nv50_core *); extern const struct nv50_outp_func sorc37d; int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **); diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 40d9b654ab8c..3b36dc8d36b2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -25,6 +25,20 @@ #include <nouveau_bo.h> void +corec37d_wndw_owner(struct nv50_core *core) +{ + const u32 windows = 8; /*XXX*/ + u32 *push, i; + if ((push = evo_wait(&core->chan, 2 * windows))) { + for (i = 0; i < windows; i++) { + evo_mthd(push, 0x1000 + (i * 0x080), 1); + evo_data(push, i >> 1); + } + evo_kick(push, &core->chan); + } +} + +void corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy) { u32 *push; @@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000001f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00127fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -99,6 +111,7 @@ corec37d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc37d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index b606d68cda10..147adcd60937 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000000f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00117fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -50,6 +48,7 @@ corec57d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc57d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 2f123082c85d..a3dc2ba19fb2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1933,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) struct nouveau_drm *drm = nouveau_drm(dev); struct nv50_disp *disp = nv50_disp(dev); struct nv50_atom *atom = nv50_atom(state); + struct nv50_core *core = disp->core; struct nv50_outp_atom *outp, *outt; u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; int i; @@ -2051,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } } + /* Update window->head assignment. + * + * This has to happen in an update that's not interlocked with + * any window channels to avoid hitting HW error checks. + * + *TODO: Proper handling of window ownership (Turing apparently + * supports non-fixed mappings). + */ + if (core->assign_windows) { + core->func->wndw.owner(core); + core->func->update(core, interlock, false); + core->assign_windows = false; + interlock[NV50_DISP_INTERLOCK_CORE] = 0; + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index 3aa2cc3af1e2..c1032527f791 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -155,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp) if (stat & 0x00000008) stat &= ~0x00000008; + if (stat & 0x00000080) { + u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000); + nvkm_warn(subdev, "error %08x\n", error); + stat &= ~0x00000080; + } + if (stat & 0x00000100) { unsigned long wndws = nvkm_rd32(device, 0x611858); unsigned long other = nvkm_rd32(device, 0x61185c); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 856526cb2caf..d07c7db0c815 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc) DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); + msleep(10); + WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); @@ -672,7 +674,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index) { struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc; - int i; radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (radeon_crtc == NULL) @@ -701,12 +702,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->mode_set.num_connectors = 0; #endif - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = i << 2; - radeon_crtc->lut_g[i] = i << 2; - radeon_crtc->lut_b[i] = i << 2; - } - if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) radeon_atombios_init_crtc(dev, radeon_crtc); else diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index fd470d6bf3f4..96565171d13e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -327,7 +327,6 @@ enum radeon_flip_status { struct radeon_crtc { struct drm_crtc base; int crtc_id; - u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; bool cursor_out_of_bounds; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index ec79e8e5ad3c..63bccd201b97 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -45,7 +45,7 @@ * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * - * Note: the sched_list should have atleast one element to schedule + * Note: the sched_list should have at least one element to schedule * the entity * * Returns 0 on success or a negative error code on failure. diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index aa9e49f04988..bd268028fb3d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, free_pages((unsigned long)virt, get_order(size)); } -static int host1x_drm_probe(struct host1x_device *dev) +static bool host1x_drm_wants_iommu(struct host1x_device *dev) { - struct drm_driver *driver = &tegra_drm_driver; struct iommu_domain *domain; - struct tegra_drm *tegra; - struct drm_device *drm; - int err; - - drm = drm_dev_alloc(driver, &dev->dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); - if (!tegra) { - err = -ENOMEM; - goto put; - } /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are @@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1x_device *dev) * up the device tree appropriately. This is considered an problem * of integration, so care must be taken for the DT to be consistent. */ - domain = iommu_get_domain_for_dev(drm->dev->parent); + domain = iommu_get_domain_for_dev(dev->dev.parent); + + /* + * Tegra20 and Tegra30 don't support addressing memory beyond the + * 32-bit boundary, so the regular GATHER opcodes will always be + * sufficient and whether or not the host1x is attached to an IOMMU + * doesn't matter. + */ + if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32)) + return true; + + return domain != NULL; +} + +static int host1x_drm_probe(struct host1x_device *dev) +{ + struct drm_driver *driver = &tegra_drm_driver; + struct tegra_drm *tegra; + struct drm_device *drm; + int err; + + drm = drm_dev_alloc(driver, &dev->dev); + if (IS_ERR(drm)) + return PTR_ERR(drm); + + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) { + err = -ENOMEM; + goto put; + } - if (domain && iommu_present(&platform_bus_type)) { + if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { tegra->domain = iommu_domain_alloc(&platform_bus_type); if (!tegra->domain) { err = -ENOMEM; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 1237df157e05..623768100c6a 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, /* * If we've manually mapped the buffer object through the IOMMU, make * sure to return the IOVA address of our mapping. + * + * Similarly, for buffers that have been allocated by the DMA API the + * physical address can be used for devices that are not attached to + * an IOMMU. For these devices, callers must pass a valid pointer via + * the @phys argument. + * + * Imported buffers were also already mapped at import time, so the + * existing mapping can be reused. */ - if (phys && obj->mm) { + if (phys) { *phys = obj->iova; return NULL; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index cadcdd9ea427..9ccfb56e9b01 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -3,6 +3,8 @@ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved. */ +#include <linux/iommu.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> @@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane_funcs = { static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) { + struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev); unsigned int i; int err; for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + dma_addr_t phys_addr, *phys; + struct sg_table *sgt; - if (!dc->client.group) { - struct sg_table *sgt; + if (!domain || dc->client.group) + phys = &phys_addr; + else + phys = NULL; - sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); - goto unpin; - } + sgt = host1x_bo_pin(dc->dev, &bo->base, phys); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + if (sgt) { err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); if (err == 0) { @@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) state->iova[i] = sg_dma_address(sgt->sgl); state->sgt[i] = sgt; } else { - state->iova[i] = bo->iova; + state->iova[i] = phys_addr; } } @@ -156,9 +164,11 @@ unpin: struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); struct sg_table *sgt = state->sgt[i]; - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); - host1x_bo_unpin(dc->dev, &bo->base, sgt); + if (sgt) + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; state->sgt[i] = NULL; } @@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + struct sg_table *sgt = state->sgt[i]; - if (!dc->client.group) { - struct sg_table *sgt = state->sgt[i]; - - if (sgt) { - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - host1x_bo_unpin(dc->dev, &bo->base, sgt); - } - } + if (sgt) + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; state->sgt[i] = NULL; } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 41d24949478e..81226a4953c1 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3915,6 +3915,17 @@ static int tegra_sor_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); + INIT_LIST_HEAD(&sor->client.list); + sor->client.ops = &sor_client_ops; + sor->client.dev = &pdev->dev; + + err = host1x_client_register(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + goto rpm_disable; + } + /* * On Tegra210 and earlier, provide our own implementation for the * pad output clock. @@ -3922,16 +3933,17 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->clk_pad) { char *name; + name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout", + sor->index); + if (!name) { + err = -ENOMEM; + goto unregister; + } + err = host1x_client_resume(&sor->client); if (err < 0) { dev_err(sor->dev, "failed to resume: %d\n", err); - goto remove; - } - - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); - if (!name) { - err = -ENOMEM; - goto remove; + goto unregister; } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); @@ -3940,24 +3952,17 @@ static int tegra_sor_probe(struct platform_device *pdev) if (IS_ERR(sor->clk_pad)) { err = PTR_ERR(sor->clk_pad); - dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n", - err); - goto remove; - } - - INIT_LIST_HEAD(&sor->client.list); - sor->client.ops = &sor_client_ops; - sor->client.dev = &pdev->dev; - - err = host1x_client_register(&sor->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", + dev_err(sor->dev, "failed to register SOR pad clock: %d\n", err); - goto remove; + goto unregister; } return 0; +unregister: + host1x_client_unregister(&sor->client); +rpm_disable: + pm_runtime_disable(&pdev->dev); remove: if (sor->ops && sor->ops->remove) sor->ops->remove(sor); @@ -3971,8 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev) struct tegra_sor *sor = platform_get_drvdata(pdev); int err; - pm_runtime_disable(&pdev->dev); - err = host1x_client_unregister(&sor->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", @@ -3980,6 +3983,8 @@ static int tegra_sor_remove(struct platform_device *pdev) return err; } + pm_runtime_disable(&pdev->dev); + if (sor->ops && sor->ops->remove) { err = sor->ops->remove(sor); if (err < 0) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 60b2fedd0061..a10643aa89aa 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -8,6 +8,7 @@ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/host1x.h> +#include <linux/iommu.h> #include <linux/kref.h> #include <linux/module.h> #include <linux/scatterlist.h> @@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) { struct host1x_client *client = job->client; struct device *dev = client->dev; + struct iommu_domain *domain; unsigned int i; int err; + domain = iommu_get_domain_for_dev(dev); job->num_unpins = 0; for (i = 0; i < job->num_relocs; i++) { @@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - if (client->group) + /* + * If the client device is not attached to an IOMMU, the + * physical address of the buffer object can be used. + * + * Similarly, when an IOMMU domain is shared between all + * host1x clients, the IOVA is already available, so no + * need to map the buffer object again. + * + * XXX Note that this isn't always safe to do because it + * relies on an assumption that no cache maintenance is + * needed on the buffer objects. + */ + if (!domain || client->group) phys = &phys_addr; else phys = NULL; @@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) dma_addr_t phys_addr; unsigned long shift; struct iova *alloc; + dma_addr_t *phys; unsigned int j; g->bo = host1x_bo_get(g->bo); @@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - sgt = host1x_bo_pin(host->dev, g->bo, NULL); + /** + * If the host1x is not attached to an IOMMU, there is no need + * to map the buffer object for the host1x, since the physical + * address can simply be used. + */ + if (!iommu_get_domain_for_dev(host->dev)) + phys = &phys_addr; + else + phys = NULL; + + sgt = host1x_bo_pin(host->dev, g->bo, phys); if (IS_ERR(sgt)) { err = PTR_ERR(sgt); goto unpin; @@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->unpins[job->num_unpins].size = gather_size; phys_addr = iova_dma_addr(&host->iova, alloc); - } else { + } else if (sgt) { err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); if (!err) { @@ -222,6 +248,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } + job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].dev = host->dev; phys_addr = sg_dma_address(sgt->sgl); } @@ -229,7 +256,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->addr_phys[job->num_unpins] = phys_addr; job->gather_addr_phys[i] = phys_addr; - job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].bo = g->bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 8a7732c0bef3..286d3cfda7de 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -259,7 +259,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_SENSOR }, + { SCMI_PROTOCOL_SENSOR, "hwmon" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6a0aa76859f3..2ddca08f8a76 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -367,7 +367,8 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)" config I2C_ALTERA tristate "Altera Soft IP I2C" - depends on (ARCH_SOCFPGA || NIOS2) && OF + depends on ARCH_SOCFPGA || NIOS2 || COMPILE_TEST + depends on OF help If you say yes to this option, support will be included for the Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures. @@ -387,7 +388,7 @@ config I2C_ASPEED config I2C_AT91 tristate "Atmel AT91 I2C Two-Wire interface (TWI)" - depends on ARCH_AT91 + depends on ARCH_AT91 || COMPILE_TEST help This supports the use of the I2C interface on Atmel AT91 processors. @@ -440,7 +441,8 @@ config I2C_AXXIA config I2C_BCM2835 tristate "Broadcom BCM2835 I2C controller" - depends on ARCH_BCM2835 || ARCH_BRCMSTB + depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST + depends on COMMON_CLK help If you say yes to this option, support will be included for the BCM2835 I2C controller. @@ -463,8 +465,8 @@ config I2C_BCM_IPROC config I2C_BCM_KONA tristate "BCM Kona I2C adapter" - depends on ARCH_BCM_MOBILE - default y + depends on ARCH_BCM_MOBILE || COMPILE_TEST + default y if ARCH_BCM_MOBILE help If you say yes to this option, support will be included for the I2C interface on the Broadcom Kona family of processors. @@ -511,7 +513,7 @@ config I2C_CPM config I2C_DAVINCI tristate "DaVinci I2C driver" - depends on ARCH_DAVINCI || ARCH_KEYSTONE + depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST help Support for TI DaVinci I2C controller driver. @@ -572,7 +574,7 @@ config I2C_DESIGNWARE_BAYTRAIL config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" - depends on ARCH_DIGICOLOR + depends on ARCH_DIGICOLOR || COMPILE_TEST help Support for Conexant Digicolor SoCs (CX92755) I2C controller driver. @@ -610,11 +612,12 @@ config I2C_EMEV2 I2C interface on the Renesas Electronics EM/EV family of processors. config I2C_EXYNOS5 - tristate "Exynos5 high-speed I2C driver" - depends on ARCH_EXYNOS && OF - default y + tristate "Exynos high-speed I2C driver" + depends on OF + depends on ARCH_EXYNOS || COMPILE_TEST + default y if ARCH_EXYNOS help - High-speed I2C controller on Exynos5 based Samsung SoCs. + High-speed I2C controller on Exynos5 and newer Samsung SoCs. config I2C_GPIO tristate "GPIO-based bitbanging I2C" @@ -634,7 +637,7 @@ config I2C_GPIO_FAULT_INJECTOR config I2C_HIGHLANDER tristate "Highlander FPGA SMBus interface" - depends on SH_HIGHLANDER + depends on SH_HIGHLANDER || COMPILE_TEST help If you say yes to this option, support will be included for the SMBus interface located in the FPGA on various Highlander @@ -686,7 +689,7 @@ config I2C_IMX_LPI2C config I2C_IOP3XX tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface" - depends on ARCH_IOP32X || ARCH_IXP4XX + depends on ARCH_IOP32X || ARCH_IXP4XX || COMPILE_TEST help Say Y here if you want to use the IIC bus controller on the Intel IOPx3xx I/O Processors or IXP4xx Network Processors. @@ -726,6 +729,7 @@ config I2C_LPC2K config I2C_MESON tristate "Amlogic Meson I2C controller" depends on ARCH_MESON || COMPILE_TEST + depends on COMMON_CLK help If you say yes to this option, support will be included for the I2C interface on the Amlogic Meson family of SoCs. @@ -759,7 +763,7 @@ config I2C_MT7621 config I2C_MV64XXX tristate "Marvell mv64xxx I2C Controller" - depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU + depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Marvell 64xxx line of host bridges. @@ -770,7 +774,7 @@ config I2C_MV64XXX config I2C_MXS tristate "Freescale i.MX28 I2C interface" - depends on SOC_IMX28 + depends on SOC_IMX28 || COMPILE_TEST select STMP_DEVICE help Say Y here if you want to use the I2C bus controller on @@ -799,7 +803,7 @@ config I2C_OCORES config I2C_OMAP tristate "OMAP I2C adapter" - depends on ARCH_OMAP || ARCH_K3 + depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST default y if MACH_OMAP_H3 || MACH_OMAP_OSK help If you say yes to this option, support will be included for the @@ -833,7 +837,7 @@ config I2C_PCA_PLATFORM config I2C_PMCMSP tristate "PMC MSP I2C TWI Controller" - depends on PMC_MSP + depends on PMC_MSP || COMPILE_TEST help This driver supports the PMC TWI controller on MSP devices. @@ -842,7 +846,7 @@ config I2C_PMCMSP config I2C_PNX tristate "I2C bus support for Philips PNX and NXP LPC targets" - depends on ARCH_LPC32XX + depends on ARCH_LPC32XX || COMPILE_TEST help This driver supports the Philips IP3204 I2C IP block master and/or slave controller @@ -863,7 +867,7 @@ config I2C_PUV3 config I2C_PXA tristate "Intel PXA2XX I2C adapter" - depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF) + depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF) || COMPILE_TEST help If you have devices in the PXA I2C bus, say yes to this option. This driver can also be built as a module. If so, the module @@ -932,11 +936,11 @@ config HAVE_S3C2410_I2C respective Kconfig file. config I2C_S3C2410 - tristate "S3C2410 I2C Driver" - depends on HAVE_S3C2410_I2C + tristate "S3C/Exynos I2C Driver" + depends on HAVE_S3C2410_I2C || COMPILE_TEST help Say Y here to include support for I2C controller in the - Samsung SoCs. + Samsung SoCs (S3C, S5Pv210, Exynos). config I2C_SH7760 tristate "Renesas SH7760 I2C Controller" @@ -971,7 +975,7 @@ config I2C_SIMTEC config I2C_SIRF tristate "CSR SiRFprimaII I2C interface" - depends on ARCH_SIRF + depends on ARCH_SIRF || COMPILE_TEST help If you say yes to this option, support will be included for the CSR SiRFprimaII I2C interface. @@ -981,14 +985,14 @@ config I2C_SIRF config I2C_SPRD tristate "Spreadtrum I2C interface" - depends on I2C=y && ARCH_SPRD + depends on I2C=y && (ARCH_SPRD || COMPILE_TEST) help If you say yes to this option, support will be included for the Spreadtrum I2C interface. config I2C_ST tristate "STMicroelectronics SSC I2C support" - depends on ARCH_STI + depends on ARCH_STI || COMPILE_TEST help Enable this option to add support for STMicroelectronics SoCs hardware SSC (Synchronous Serial Controller) as an I2C controller. @@ -1019,7 +1023,7 @@ config I2C_STM32F7 config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" - depends on MACH_U300 + depends on MACH_U300 || COMPILE_TEST default y if MACH_U300 help If you say yes to this option, support will be included for the @@ -1055,15 +1059,16 @@ config I2C_SYNQUACER config I2C_TEGRA tristate "NVIDIA Tegra internal I2C controller" - depends on ARCH_TEGRA + depends on ARCH_TEGRA || (COMPILE_TEST && (ARC || ARM || ARM64 || M68K || RISCV || SUPERH || SPARC)) + # COMPILE_TEST needs architectures with readsX()/writesX() primitives help If you say yes to this option, support will be included for the I2C controller embedded in NVIDIA Tegra SOCs config I2C_TEGRA_BPMP tristate "NVIDIA Tegra BPMP I2C controller" - depends on TEGRA_BPMP - default y + depends on TEGRA_BPMP || COMPILE_TEST + default y if TEGRA_BPMP help If you say yes to this option, support will be included for the I2C controller embedded in NVIDIA Tegra SoCs accessed via the BPMP. @@ -1101,7 +1106,7 @@ config I2C_VERSATILE config I2C_WMT tristate "Wondermedia WM8xxx SoC I2C bus support" - depends on ARCH_VT8500 + depends on ARCH_VT8500 || COMPILE_TEST help Say yes if you want to support the I2C bus on Wondermedia 8xxx-series SoCs. @@ -1142,7 +1147,7 @@ config I2C_XILINX config I2C_XLR tristate "Netlogic XLR and Sigma Designs I2C support" - depends on CPU_XLR || ARCH_TANGO + depends on CPU_XLR || ARCH_TANGO || COMPILE_TEST help This driver enables support for the on-chip I2C interface of the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. @@ -1202,46 +1207,12 @@ config I2C_PARPORT This supports parallel port I2C adapters such as the ones made by Philips or Velleman, Analog Devices evaluation boards, and more. Basically any adapter using the parallel port as an I2C bus with - no extra chipset is supported by this driver, or could be. - - This driver is a replacement for (and was inspired by) an older - driver named i2c-philips-par. The new driver supports more devices, - and makes it easier to add support for new devices. - - An adapter type parameter is now mandatory. Please read the file - Documentation/i2c/busses/i2c-parport.rst for details. - - Another driver exists, named i2c-parport-light, which doesn't depend - on the parport driver. This is meant for embedded systems. Don't say - Y here if you intend to say Y or M there. + no extra chipset is supported by this driver, or could be. Please + read the file Documentation/i2c/busses/i2c-parport.rst for details. This support is also available as a module. If so, the module will be called i2c-parport. -config I2C_PARPORT_LIGHT - tristate "Parallel port adapter (light)" - select I2C_ALGOBIT - select I2C_SMBUS - help - This supports parallel port I2C adapters such as the ones made by - Philips or Velleman, Analog Devices evaluation boards, and more. - Basically any adapter using the parallel port as an I2C bus with - no extra chipset is supported by this driver, or could be. - - This driver is a light version of i2c-parport. It doesn't depend - on the parport driver, and uses direct I/O access instead. This - might be preferred on embedded systems where wasting memory for - the clean but heavy parport handling is not an option. The - drawback is a reduced portability and the impossibility to - daisy-chain other parallel port devices. - - Don't say Y here if you said Y or M to i2c-parport. Saying M to - both is possible but both modules should not be loaded at the same - time. - - This support is also available as a module. If so, the module - will be called i2c-parport-light. - config I2C_ROBOTFUZZ_OSIF tristate "RobotFuzz Open Source InterFace USB adapter" depends on USB @@ -1328,7 +1299,7 @@ config I2C_ICY config I2C_MLXCPLD tristate "Mellanox I2C driver" - depends on X86_64 + depends on X86_64 || COMPILE_TEST help This exposes the Mellanox platform I2C busses to the linux I2C layer for X86 based systems. diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 3ab8aebc39c9..25d60889713c 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -128,7 +128,6 @@ obj-$(CONFIG_I2C_ZX2967) += i2c-zx2967.o obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o -obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index 5137e6297022..3da1a8acecb5 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -66,55 +66,26 @@ static struct at91_twi_pdata at91rm9200_config = { .clk_max_div = 5, .clk_offset = 3, .has_unre_flag = true, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata at91sam9261_config = { .clk_max_div = 5, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata at91sam9260_config = { .clk_max_div = 7, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata at91sam9g20_config = { .clk_max_div = 7, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata at91sam9g10_config = { .clk_max_div = 7, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static const struct platform_device_id at91_twi_devtypes[] = { @@ -142,23 +113,13 @@ static const struct platform_device_id at91_twi_devtypes[] = { static struct at91_twi_pdata at91sam9x5_config = { .clk_max_div = 7, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, - .has_hold_field = false, - .has_dig_filtr = false, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata sama5d4_config = { .clk_max_div = 7, .clk_offset = 4, - .has_unre_flag = false, - .has_alt_cmd = false, .has_hold_field = true, .has_dig_filtr = true, - .has_adv_dig_filtr = false, - .has_ana_filtr = false, }; static struct at91_twi_pdata sama5d2_config = { diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 9d71ce15db05..1105aee6634a 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -208,6 +208,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + id->err_status = 0; /* Handling nack and arbitration lost interrupt */ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { @@ -241,10 +242,17 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) !id->bus_hold_flag) cdns_i2c_clear_bus_hold(id); - *(id->p_recv_buf)++ = - cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); - id->recv_count--; - id->curr_recv_count--; + if (id->recv_count > 0) { + *(id->p_recv_buf)++ = + cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + id->recv_count--; + id->curr_recv_count--; + } else { + dev_err(id->adap.dev.parent, + "xfer_size reg rollover. xfer aborted!\n"); + id->err_status |= CDNS_I2C_IXR_TO; + break; + } if (cdns_is_holdquirk(id, hold_quirk)) break; @@ -342,7 +350,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) } /* Update the status for errors */ - id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; + id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; if (id->err_status) status = IRQ_HANDLED; @@ -500,7 +508,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap) cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET); /* Update the transfercount register to zero */ cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET); - /* Clear the interupt status register */ + /* Clear the interrupt status register */ regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET); /* Clear the status register */ @@ -921,17 +929,18 @@ static int cdns_i2c_probe(struct platform_device *pdev) id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) { - dev_err(&pdev->dev, "input clock not found.\n"); + if (PTR_ERR(id->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(id->clk); } ret = clk_prepare_enable(id->clk); if (ret) dev_err(&pdev->dev, "Unable to enable clock.\n"); - pm_runtime_enable(id->dev); pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(id->dev); pm_runtime_set_active(id->dev); + pm_runtime_enable(id->dev); id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) @@ -980,8 +989,8 @@ static int cdns_i2c_probe(struct platform_device *pdev) err_clk_dis: clk_disable_unprepare(id->clk); - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); return ret; } @@ -997,10 +1006,13 @@ static int cdns_i2c_remove(struct platform_device *pdev) { struct cdns_i2c *id = platform_get_drvdata(pdev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + i2c_del_adapter(&id->adap); clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); clk_disable_unprepare(id->clk); - pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index b8fde61bb5d8..35e55feda763 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -388,9 +388,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) */ if (acpi_dev_present("INT33FE", NULL, -1)) { board_info.irq = adap->client_irq; - adap->client = i2c_new_device(&adap->adapter, &board_info); - if (!adap->client) { - ret = -ENOMEM; + adap->client = i2c_new_client_device(&adap->adapter, &board_info); + if (IS_ERR(adap->client)) { + ret = PTR_ERR(adap->client); goto del_adapter; } } diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index 958161c71985..790ea3fda693 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -273,6 +273,7 @@ static int ec_i2c_probe(struct platform_device *pdev) bus->adap.dev.parent = &pdev->dev; bus->adap.dev.of_node = pdev->dev.of_node; bus->adap.retries = I2C_MAX_RETRIES; + ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev)); err = i2c_add_adapter(&bus->adap); if (err) @@ -298,7 +299,7 @@ static const struct of_device_id cros_ec_i2c_of_match[] = { MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match); static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = { - { "GOOG001A", 0 }, + { "GOOG0012", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 16dd338877d0..3b7d58c2fe85 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -130,6 +130,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, + { "HISI02A3", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index abfe3094c047..803dad70e2a7 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -322,7 +322,7 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, tmp |= (SMMR_MODE0 | SMMR_MODE1); break; default: - dev_err(dev->dev, "unsupported xfer size %d\n", dev->buf_len); + dev_err(dev->dev, "unsupported xfer size %zu\n", dev->buf_len); return -EINVAL; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index f5e69fe56532..ca4f096fef74 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -68,6 +68,7 @@ * Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes * Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes * Jasper Lake (SOC) 0x4da3 32 hard yes yes yes + * Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -244,6 +245,7 @@ #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 +#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3 struct i801_mux_config { char *gpio_chip; @@ -1074,6 +1076,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) }, @@ -1142,7 +1145,7 @@ static void dmi_check_onboard_device(u8 type, const char *name, memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = dmi_devices[i].i2c_addr; strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE); - i2c_new_device(adap, &info); + i2c_new_client_device(adap, &info); break; } } @@ -1296,7 +1299,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv) memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = dell_lis3lv02d_devices[i].i2c_addr; strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE); - i2c_new_device(&priv->adapter, &info); + i2c_new_client_device(&priv->adapter, &info); } /* Register optional slaves */ @@ -1312,7 +1315,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv) memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = apanel_addr; strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE); - i2c_new_device(&priv->adapter, &info); + i2c_new_client_device(&priv->adapter, &info); } if (dmi_name_in_vendors("FUJITSU")) @@ -1742,6 +1745,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: case PCI_DEVICE_ID_INTEL_DNV_SMBUS: case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: + case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS: priv->features |= FEATURE_BLOCK_PROC; priv->features |= FEATURE_I2C_BLOCK_READ; priv->features |= FEATURE_IRQ; diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 25dcd73acd63..16a67a64284a 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -4,6 +4,7 @@ * * Copyright (C) 2006 - 2009 Ingenic Semiconductor Inc. * Copyright (C) 2015 Imagination Technologies + * Copyright (C) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> */ #include <linux/bitops.h> @@ -17,6 +18,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> @@ -55,6 +57,7 @@ #define JZ4780_I2C_ACKGC 0x98 #define JZ4780_I2C_ENSTA 0x9C #define JZ4780_I2C_SDAHD 0xD0 +#define X1000_I2C_SDAHD 0x7C #define JZ4780_I2C_CTRL_STPHLD BIT(7) #define JZ4780_I2C_CTRL_SLVDIS BIT(6) @@ -73,6 +76,8 @@ #define JZ4780_I2C_STA_TFNF BIT(1) #define JZ4780_I2C_STA_ACT BIT(0) +#define X1000_I2C_DC_STOP BIT(9) + static const char * const jz4780_i2c_abrt_src[] = { "ABRT_7B_ADDR_NOACK", "ABRT_10ADDR1_NOACK", @@ -130,18 +135,33 @@ static const char * const jz4780_i2c_abrt_src[] = { #define JZ4780_I2CFLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1)) #define JZ4780_I2C_FIFO_LEN 16 -#define TX_LEVEL 3 -#define RX_LEVEL (JZ4780_I2C_FIFO_LEN - TX_LEVEL - 1) + +#define X1000_I2C_FIFO_LEN 64 #define JZ4780_I2C_TIMEOUT 300 #define BUFSIZE 200 +enum ingenic_i2c_version { + ID_JZ4780, + ID_X1000, +}; + +/* ingenic_i2c_config: SoC specific config data. */ +struct ingenic_i2c_config { + enum ingenic_i2c_version version; + + int fifosize; + int tx_level; + int rx_level; +}; + struct jz4780_i2c { void __iomem *iomem; int irq; struct clk *clk; struct i2c_adapter adap; + const struct ingenic_i2c_config *cdata; /* lock to protect rbuf and wbuf between xfer_rd/wr and irq handler */ spinlock_t lock; @@ -340,11 +360,18 @@ static int jz4780_i2c_set_speed(struct jz4780_i2c *i2c) if (hold_time >= 0) { /*i2c hold time enable */ - hold_time |= JZ4780_I2C_SDAHD_HDENB; - jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time); + if (i2c->cdata->version >= ID_X1000) { + jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, hold_time); + } else { + hold_time |= JZ4780_I2C_SDAHD_HDENB; + jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time); + } } else { /* disable hold time */ - jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0); + if (i2c->cdata->version >= ID_X1000) + jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, 0); + else + jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0); } return 0; @@ -359,9 +386,11 @@ static int jz4780_i2c_cleanup(struct jz4780_i2c *i2c) spin_lock_irqsave(&i2c->lock, flags); /* can send stop now if need */ - tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); - tmp &= ~JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + if (i2c->cdata->version < ID_X1000) { + tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); + tmp &= ~JZ4780_I2C_CTRL_STPHLD; + jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + } /* disable all interrupts first */ jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0); @@ -399,11 +428,19 @@ static int jz4780_i2c_prepare(struct jz4780_i2c *i2c) return jz4780_i2c_enable(i2c); } -static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, int cmd_count) +static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, + int cmd_count, + int cmd_left) { int i; - for (i = 0; i < cmd_count; i++) + for (i = 0; i < cmd_count - 1; i++) + jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ); + + if ((cmd_left == 0) && (i2c->cdata->version >= ID_X1000)) + jz4780_i2c_writew(i2c, JZ4780_I2C_DC, + JZ4780_I2C_DC_READ | X1000_I2C_DC_STOP); + else jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ); } @@ -458,37 +495,44 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) rd_left = i2c->rd_total_len - i2c->rd_data_xfered; - if (rd_left <= JZ4780_I2C_FIFO_LEN) + if (rd_left <= i2c->cdata->fifosize) jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, rd_left - 1); } if (intst & JZ4780_I2C_INTST_TXEMP) { if (i2c->is_write == 0) { int cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered; - int max_send = (JZ4780_I2C_FIFO_LEN - 1) + int max_send = (i2c->cdata->fifosize - 1) - (i2c->rd_cmd_xfered - i2c->rd_data_xfered); int cmd_to_send = min(cmd_left, max_send); if (i2c->rd_cmd_xfered != 0) cmd_to_send = min(cmd_to_send, - JZ4780_I2C_FIFO_LEN - - TX_LEVEL - 1); + i2c->cdata->fifosize + - i2c->cdata->tx_level - 1); if (cmd_to_send) { - jz4780_i2c_send_rcmd(i2c, cmd_to_send); i2c->rd_cmd_xfered += cmd_to_send; + cmd_left = i2c->rd_total_len - + i2c->rd_cmd_xfered; + jz4780_i2c_send_rcmd(i2c, + cmd_to_send, cmd_left); + } - cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered; if (cmd_left == 0) { intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM); intmsk &= ~JZ4780_I2C_INTM_MTXEMP; jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, intmsk); - tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); - tmp &= ~JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + if (i2c->cdata->version < ID_X1000) { + tmp = jz4780_i2c_readw(i2c, + JZ4780_I2C_CTRL); + tmp &= ~JZ4780_I2C_CTRL_STPHLD; + jz4780_i2c_writew(i2c, + JZ4780_I2C_CTRL, tmp); + } } } else { unsigned short data; @@ -497,23 +541,26 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); while ((i2c_sta & JZ4780_I2C_STA_TFNF) && - (i2c->wt_len > 0)) { + (i2c->wt_len > 0)) { i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); data = *i2c->wbuf; data &= ~JZ4780_I2C_DC_READ; - jz4780_i2c_writew(i2c, JZ4780_I2C_DC, - data); + if ((!i2c->stop_hold) && (i2c->cdata->version >= + ID_X1000)) + data |= X1000_I2C_DC_STOP; + jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); i2c->wbuf++; i2c->wt_len--; } if (i2c->wt_len == 0) { - if (!i2c->stop_hold) { + if ((!i2c->stop_hold) && (i2c->cdata->version < + ID_X1000)) { tmp = jz4780_i2c_readw(i2c, - JZ4780_I2C_CTRL); + JZ4780_I2C_CTRL); tmp &= ~JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, - tmp); + jz4780_i2c_writew(i2c, + JZ4780_I2C_CTRL, tmp); } jz4780_i2c_trans_done(i2c); @@ -567,20 +614,22 @@ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c, i2c->rd_data_xfered = 0; i2c->rd_cmd_xfered = 0; - if (len <= JZ4780_I2C_FIFO_LEN) + if (len <= i2c->cdata->fifosize) jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, len - 1); else - jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, RX_LEVEL); + jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, i2c->cdata->rx_level); - jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL); + jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level); jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MRXFL | JZ4780_I2C_INTM_MTXEMP | JZ4780_I2C_INTM_MTXABT | JZ4780_I2C_INTM_MRXOF); - tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); - tmp |= JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + if (i2c->cdata->version < ID_X1000) { + tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); + tmp |= JZ4780_I2C_CTRL_STPHLD; + jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + } spin_unlock_irqrestore(&i2c->lock, flags); @@ -626,14 +675,16 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c, i2c->wbuf = buf; i2c->wt_len = len; - jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL); + jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level); jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MTXEMP | JZ4780_I2C_INTM_MTXABT); - tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); - tmp |= JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + if (i2c->cdata->version < ID_X1000) { + tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); + tmp |= JZ4780_I2C_CTRL_STPHLD; + jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + } spin_unlock_irqrestore(&i2c->lock, flags); @@ -716,8 +767,25 @@ static const struct i2c_algorithm jz4780_i2c_algorithm = { .functionality = jz4780_i2c_functionality, }; +static const struct ingenic_i2c_config jz4780_i2c_config = { + .version = ID_JZ4780, + + .fifosize = JZ4780_I2C_FIFO_LEN, + .tx_level = JZ4780_I2C_FIFO_LEN / 2, + .rx_level = JZ4780_I2C_FIFO_LEN / 2 - 1, +}; + +static const struct ingenic_i2c_config x1000_i2c_config = { + .version = ID_X1000, + + .fifosize = X1000_I2C_FIFO_LEN, + .tx_level = X1000_I2C_FIFO_LEN / 2, + .rx_level = X1000_I2C_FIFO_LEN / 2 - 1, +}; + static const struct of_device_id jz4780_i2c_of_matches[] = { - { .compatible = "ingenic,jz4780-i2c", }, + { .compatible = "ingenic,jz4780-i2c", .data = &jz4780_i2c_config }, + { .compatible = "ingenic,x1000-i2c", .data = &x1000_i2c_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches); @@ -734,6 +802,12 @@ static int jz4780_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; + i2c->cdata = device_get_match_data(&pdev->dev); + if (!i2c->cdata) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; + } + i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &jz4780_i2c_algorithm; i2c->adap.algo_data = i2c; @@ -777,9 +851,11 @@ static int jz4780_i2c_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); - tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); - tmp &= ~JZ4780_I2C_CTRL_STPHLD; - jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + if (i2c->cdata->version < ID_X1000) { + tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL); + tmp &= ~JZ4780_I2C_CTRL_STPHLD; + jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp); + } jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 1e2647f9a2a7..06b3bed78421 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -10,6 +10,7 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -213,6 +214,30 @@ static void meson_i2c_prepare_xfer(struct meson_i2c *i2c) writel(i2c->tokens[1], i2c->regs + REG_TOK_LIST1); } +static void meson_i2c_transfer_complete(struct meson_i2c *i2c, u32 ctrl) +{ + if (ctrl & REG_CTRL_ERROR) { + /* + * The bit is set when the IGNORE_NAK bit is cleared + * and the device didn't respond. In this case, the + * I2C controller automatically generates a STOP + * condition. + */ + dev_dbg(i2c->dev, "error bit set\n"); + i2c->error = -ENXIO; + i2c->state = STATE_IDLE; + } else { + if (i2c->state == STATE_READ && i2c->count) + meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, + i2c->count); + + i2c->pos += i2c->count; + + if (i2c->pos >= i2c->msg->len) + i2c->state = STATE_IDLE; + } +} + static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) { struct meson_i2c *i2c = dev_id; @@ -232,27 +257,9 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) return IRQ_NONE; } - if (ctrl & REG_CTRL_ERROR) { - /* - * The bit is set when the IGNORE_NAK bit is cleared - * and the device didn't respond. In this case, the - * I2C controller automatically generates a STOP - * condition. - */ - dev_dbg(i2c->dev, "error bit set\n"); - i2c->error = -ENXIO; - i2c->state = STATE_IDLE; - complete(&i2c->done); - goto out; - } - - if (i2c->state == STATE_READ && i2c->count) - meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, i2c->count); + meson_i2c_transfer_complete(i2c, ctrl); - i2c->pos += i2c->count; - - if (i2c->pos >= i2c->msg->len) { - i2c->state = STATE_IDLE; + if (i2c->state == STATE_IDLE) { complete(&i2c->done); goto out; } @@ -279,10 +286,11 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) } static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, - int last) + int last, bool atomic) { unsigned long time_left, flags; int ret = 0; + u32 ctrl; i2c->msg = msg; i2c->last = last; @@ -300,13 +308,24 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE; meson_i2c_prepare_xfer(i2c); - reinit_completion(&i2c->done); + + if (!atomic) + reinit_completion(&i2c->done); /* Start the transfer */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START); - time_left = msecs_to_jiffies(I2C_TIMEOUT_MS); - time_left = wait_for_completion_timeout(&i2c->done, time_left); + if (atomic) { + ret = readl_poll_timeout_atomic(i2c->regs + REG_CTRL, ctrl, + !(ctrl & REG_CTRL_STATUS), + 10, I2C_TIMEOUT_MS * 1000); + } else { + time_left = msecs_to_jiffies(I2C_TIMEOUT_MS); + time_left = wait_for_completion_timeout(&i2c->done, time_left); + + if (!time_left) + ret = -ETIMEDOUT; + } /* * Protect access to i2c struct and registers from interrupt @@ -315,13 +334,14 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, */ spin_lock_irqsave(&i2c->lock, flags); + if (atomic && !ret) + meson_i2c_transfer_complete(i2c, ctrl); + /* Abort any active operation */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); - if (!time_left) { + if (ret) i2c->state = STATE_IDLE; - ret = -ETIMEDOUT; - } if (i2c->error) ret = i2c->error; @@ -331,8 +351,8 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg, return ret; } -static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) +static int meson_i2c_xfer_messages(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num, bool atomic) { struct meson_i2c *i2c = adap->algo_data; int i, ret = 0; @@ -340,7 +360,7 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, clk_enable(i2c->clk); for (i = 0; i < num; i++) { - ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1); + ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1, atomic); if (ret) break; } @@ -350,14 +370,27 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, return ret ?: i; } +static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + return meson_i2c_xfer_messages(adap, msgs, num, false); +} + +static int meson_i2c_xfer_atomic(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + return meson_i2c_xfer_messages(adap, msgs, num, true); +} + static u32 meson_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm meson_i2c_algorithm = { - .master_xfer = meson_i2c_xfer, - .functionality = meson_i2c_func, + .master_xfer = meson_i2c_xfer, + .master_xfer_atomic = meson_i2c_xfer_atomic, + .functionality = meson_i2c_func, }; static int meson_i2c_probe(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 5a1235fd86bb..62e18b4db0ed 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -280,9 +280,9 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq) i2cd->gpu_ccgx_ucsi->addr = 0x8; i2cd->gpu_ccgx_ucsi->irq = irq; i2cd->gpu_ccgx_ucsi->properties = ccgx_props; - i2cd->ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); - if (!i2cd->ccgx_client) - return -ENODEV; + i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); + if (IS_ERR(i2cd->ccgx_client)) + return PTR_ERR(i2cd->ccgx_client); return 0; } diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index ca8b3ecfa93d..f5fc75b65a19 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -731,7 +731,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) /* add in known devices to the bus */ if (pdata) { for (i = 0; i < pdata->num_devices; i++) - i2c_new_device(&i2c->adap, pdata->devices + i); + i2c_new_client_device(&i2c->adap, pdata->devices + i); } return 0; diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c deleted file mode 100644 index 00f6aaf22cfc..000000000000 --- a/drivers/i2c/busses/i2c-parport-light.c +++ /dev/null @@ -1,267 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* ------------------------------------------------------------------------ * - * i2c-parport-light.c I2C bus over parallel port * - * ------------------------------------------------------------------------ * - Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de> - - Based on older i2c-velleman.c driver - Copyright (C) 1995-2000 Simon G. Vogl - With some changes from: - Frodo Looijaard <frodol@dds.nl> - Kyösti Mälkki <kmalkki@cc.hut.fi> - - * ------------------------------------------------------------------------ */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/ioport.h> -#include <linux/i2c.h> -#include <linux/i2c-algo-bit.h> -#include <linux/i2c-smbus.h> -#include <linux/io.h> -#include "i2c-parport.h" - -#define DEFAULT_BASE 0x378 -#define DRVNAME "i2c-parport-light" - -static struct platform_device *pdev; - -static u16 base; -module_param_hw(base, ushort, ioport, 0); -MODULE_PARM_DESC(base, "Base I/O address"); - -static int irq; -module_param_hw(irq, int, irq, 0); -MODULE_PARM_DESC(irq, "IRQ (optional)"); - -/* ----- Low-level parallel port access ----------------------------------- */ - -static inline void port_write(unsigned char p, unsigned char d) -{ - outb(d, base+p); -} - -static inline unsigned char port_read(unsigned char p) -{ - return inb(base+p); -} - -/* ----- Unified line operation functions --------------------------------- */ - -static inline void line_set(int state, const struct lineop *op) -{ - u8 oldval = port_read(op->port); - - /* Touch only the bit(s) needed */ - if ((op->inverted && !state) || (!op->inverted && state)) - port_write(op->port, oldval | op->val); - else - port_write(op->port, oldval & ~op->val); -} - -static inline int line_get(const struct lineop *op) -{ - u8 oldval = port_read(op->port); - - return ((op->inverted && (oldval & op->val) != op->val) - || (!op->inverted && (oldval & op->val) == op->val)); -} - -/* ----- I2C algorithm call-back functions and structures ----------------- */ - -static void parport_setscl(void *data, int state) -{ - line_set(state, &adapter_parm[type].setscl); -} - -static void parport_setsda(void *data, int state) -{ - line_set(state, &adapter_parm[type].setsda); -} - -static int parport_getscl(void *data) -{ - return line_get(&adapter_parm[type].getscl); -} - -static int parport_getsda(void *data) -{ - return line_get(&adapter_parm[type].getsda); -} - -/* Encapsulate the functions above in the correct structure - Note that getscl will be set to NULL by the attaching code for adapters - that cannot read SCL back */ -static struct i2c_algo_bit_data parport_algo_data = { - .setsda = parport_setsda, - .setscl = parport_setscl, - .getsda = parport_getsda, - .getscl = parport_getscl, - .udelay = 50, - .timeout = HZ, -}; - -/* ----- Driver registration ---------------------------------------------- */ - -static struct i2c_adapter parport_adapter = { - .owner = THIS_MODULE, - .class = I2C_CLASS_HWMON, - .algo_data = &parport_algo_data, - .name = "Parallel port adapter (light)", -}; - -/* SMBus alert support */ -static struct i2c_smbus_alert_setup alert_data = { -}; -static struct i2c_client *ara; -static struct lineop parport_ctrl_irq = { - .val = (1 << 4), - .port = PORT_CTRL, -}; - -static int i2c_parport_probe(struct platform_device *pdev) -{ - int err; - - /* Reset hardware to a sane state (SCL and SDA high) */ - parport_setsda(NULL, 1); - parport_setscl(NULL, 1); - /* Other init if needed (power on...) */ - if (adapter_parm[type].init.val) { - line_set(1, &adapter_parm[type].init); - /* Give powered devices some time to settle */ - msleep(100); - } - - parport_adapter.dev.parent = &pdev->dev; - err = i2c_bit_add_bus(&parport_adapter); - if (err) { - dev_err(&pdev->dev, "Unable to register with I2C\n"); - return err; - } - - /* Setup SMBus alert if supported */ - if (adapter_parm[type].smbus_alert && irq) { - alert_data.irq = irq; - ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data); - if (ara) - line_set(1, &parport_ctrl_irq); - else - dev_warn(&pdev->dev, "Failed to register ARA client\n"); - } - - return 0; -} - -static int i2c_parport_remove(struct platform_device *pdev) -{ - if (ara) { - line_set(0, &parport_ctrl_irq); - i2c_unregister_device(ara); - ara = NULL; - } - i2c_del_adapter(&parport_adapter); - - /* Un-init if needed (power off...) */ - if (adapter_parm[type].init.val) - line_set(0, &adapter_parm[type].init); - - return 0; -} - -static struct platform_driver i2c_parport_driver = { - .driver = { - .name = DRVNAME, - }, - .probe = i2c_parport_probe, - .remove = i2c_parport_remove, -}; - -static int __init i2c_parport_device_add(u16 address) -{ - int err; - - pdev = platform_device_alloc(DRVNAME, -1); - if (!pdev) { - err = -ENOMEM; - printk(KERN_ERR DRVNAME ": Device allocation failed\n"); - goto exit; - } - - err = platform_device_add(pdev); - if (err) { - printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", - err); - goto exit_device_put; - } - - return 0; - -exit_device_put: - platform_device_put(pdev); -exit: - return err; -} - -static int __init i2c_parport_init(void) -{ - int err; - - if (type < 0) { - printk(KERN_ERR DRVNAME ": adapter type unspecified\n"); - return -ENODEV; - } - - if (type >= ARRAY_SIZE(adapter_parm)) { - printk(KERN_ERR DRVNAME ": invalid type (%d)\n", type); - return -ENODEV; - } - - if (base == 0) { - pr_info(DRVNAME ": using default base 0x%x\n", DEFAULT_BASE); - base = DEFAULT_BASE; - } - - if (!request_region(base, 3, DRVNAME)) - return -EBUSY; - - if (irq != 0) - pr_info(DRVNAME ": using irq %d\n", irq); - - if (!adapter_parm[type].getscl.val) - parport_algo_data.getscl = NULL; - - /* Sets global pdev as a side effect */ - err = i2c_parport_device_add(base); - if (err) - goto exit_release; - - err = platform_driver_register(&i2c_parport_driver); - if (err) - goto exit_device; - - return 0; - -exit_device: - platform_device_unregister(pdev); -exit_release: - release_region(base, 3); - return err; -} - -static void __exit i2c_parport_exit(void) -{ - platform_driver_unregister(&i2c_parport_driver); - platform_device_unregister(pdev); - release_region(base, 3); -} - -MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); -MODULE_DESCRIPTION("I2C bus over parallel port (light)"); -MODULE_LICENSE("GPL"); - -module_init(i2c_parport_init); -module_exit(i2c_parport_exit); diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index e8ed882de402..81eb441b2387 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -25,7 +25,90 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/mutex.h> -#include "i2c-parport.h" + +#define PORT_DATA 0 +#define PORT_STAT 1 +#define PORT_CTRL 2 + +struct lineop { + u8 val; + u8 port; + u8 inverted; +}; + +struct adapter_parm { + struct lineop setsda; + struct lineop setscl; + struct lineop getsda; + struct lineop getscl; + struct lineop init; + unsigned int smbus_alert:1; +}; + +static const struct adapter_parm adapter_parm[] = { + /* type 0: Philips adapter */ + { + .setsda = { 0x80, PORT_DATA, 1 }, + .setscl = { 0x08, PORT_CTRL, 0 }, + .getsda = { 0x80, PORT_STAT, 0 }, + .getscl = { 0x08, PORT_STAT, 0 }, + }, + /* type 1: home brew teletext adapter */ + { + .setsda = { 0x02, PORT_DATA, 0 }, + .setscl = { 0x01, PORT_DATA, 0 }, + .getsda = { 0x80, PORT_STAT, 1 }, + }, + /* type 2: Velleman K8000 adapter */ + { + .setsda = { 0x02, PORT_CTRL, 1 }, + .setscl = { 0x08, PORT_CTRL, 1 }, + .getsda = { 0x10, PORT_STAT, 0 }, + }, + /* type 3: ELV adapter */ + { + .setsda = { 0x02, PORT_DATA, 1 }, + .setscl = { 0x01, PORT_DATA, 1 }, + .getsda = { 0x40, PORT_STAT, 1 }, + .getscl = { 0x08, PORT_STAT, 1 }, + }, + /* type 4: ADM1032 evaluation board */ + { + .setsda = { 0x02, PORT_DATA, 1 }, + .setscl = { 0x01, PORT_DATA, 1 }, + .getsda = { 0x10, PORT_STAT, 1 }, + .init = { 0xf0, PORT_DATA, 0 }, + .smbus_alert = 1, + }, + /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ + { + .setsda = { 0x02, PORT_DATA, 1 }, + .setscl = { 0x01, PORT_DATA, 1 }, + .getsda = { 0x10, PORT_STAT, 1 }, + }, + /* type 6: Barco LPT->DVI (K5800236) adapter */ + { + .setsda = { 0x02, PORT_DATA, 1 }, + .setscl = { 0x01, PORT_DATA, 1 }, + .getsda = { 0x20, PORT_STAT, 0 }, + .getscl = { 0x40, PORT_STAT, 0 }, + .init = { 0xfc, PORT_DATA, 0 }, + }, + /* type 7: One For All JP1 parallel port adapter */ + { + .setsda = { 0x01, PORT_DATA, 0 }, + .setscl = { 0x02, PORT_DATA, 0 }, + .getsda = { 0x80, PORT_STAT, 1 }, + .init = { 0x04, PORT_DATA, 1 }, + }, + /* type 8: VCT-jig */ + { + .setsda = { 0x04, PORT_DATA, 1 }, + .setscl = { 0x01, PORT_DATA, 1 }, + .getsda = { 0x40, PORT_STAT, 0 }, + .getscl = { 0x80, PORT_STAT, 1 }, + }, +}; /* ----- Device list ------------------------------------------------------ */ @@ -40,9 +123,30 @@ struct i2c_par { static LIST_HEAD(adapter_list); static DEFINE_MUTEX(adapter_list_lock); + #define MAX_DEVICE 4 static int parport[MAX_DEVICE] = {0, -1, -1, -1}; +module_param_array(parport, int, NULL, 0); +MODULE_PARM_DESC(parport, + "List of parallel ports to bind to, by index.\n" + " At most " __stringify(MAX_DEVICE) " devices are supported.\n" + " Default is one device connected to parport0.\n" +); +static int type = -1; +module_param(type, int, 0); +MODULE_PARM_DESC(type, + "Type of adapter:\n" + " 0 = Philips adapter\n" + " 1 = home brew teletext adapter\n" + " 2 = Velleman K8000 adapter\n" + " 3 = ELV adapter\n" + " 4 = ADM1032 evaluation board\n" + " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n" + " 6 = Barco LPT->DVI (K5800236) adapter\n" + " 7 = One For All JP1 parallel port adapter\n" + " 8 = VCT-jig\n" +); /* ----- Low-level parallel port access ----------------------------------- */ @@ -311,12 +415,5 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); MODULE_DESCRIPTION("I2C bus over parallel port"); MODULE_LICENSE("GPL"); -module_param_array(parport, int, NULL, 0); -MODULE_PARM_DESC(parport, - "List of parallel ports to bind to, by index.\n" - " Atmost " __stringify(MAX_DEVICE) " devices are supported.\n" - " Default is one device connected to parport0.\n" -); - module_init(i2c_parport_init); module_exit(i2c_parport_exit); diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h deleted file mode 100644 index 3b32d92b1264..000000000000 --- a/drivers/i2c/busses/i2c-parport.h +++ /dev/null @@ -1,106 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* ------------------------------------------------------------------------ * - * i2c-parport.h I2C bus over parallel port * - * ------------------------------------------------------------------------ * - Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de> - - * ------------------------------------------------------------------------ */ - -#define PORT_DATA 0 -#define PORT_STAT 1 -#define PORT_CTRL 2 - -struct lineop { - u8 val; - u8 port; - u8 inverted; -}; - -struct adapter_parm { - struct lineop setsda; - struct lineop setscl; - struct lineop getsda; - struct lineop getscl; - struct lineop init; - unsigned int smbus_alert:1; -}; - -static const struct adapter_parm adapter_parm[] = { - /* type 0: Philips adapter */ - { - .setsda = { 0x80, PORT_DATA, 1 }, - .setscl = { 0x08, PORT_CTRL, 0 }, - .getsda = { 0x80, PORT_STAT, 0 }, - .getscl = { 0x08, PORT_STAT, 0 }, - }, - /* type 1: home brew teletext adapter */ - { - .setsda = { 0x02, PORT_DATA, 0 }, - .setscl = { 0x01, PORT_DATA, 0 }, - .getsda = { 0x80, PORT_STAT, 1 }, - }, - /* type 2: Velleman K8000 adapter */ - { - .setsda = { 0x02, PORT_CTRL, 1 }, - .setscl = { 0x08, PORT_CTRL, 1 }, - .getsda = { 0x10, PORT_STAT, 0 }, - }, - /* type 3: ELV adapter */ - { - .setsda = { 0x02, PORT_DATA, 1 }, - .setscl = { 0x01, PORT_DATA, 1 }, - .getsda = { 0x40, PORT_STAT, 1 }, - .getscl = { 0x08, PORT_STAT, 1 }, - }, - /* type 4: ADM1032 evaluation board */ - { - .setsda = { 0x02, PORT_DATA, 1 }, - .setscl = { 0x01, PORT_DATA, 1 }, - .getsda = { 0x10, PORT_STAT, 1 }, - .init = { 0xf0, PORT_DATA, 0 }, - .smbus_alert = 1, - }, - /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ - { - .setsda = { 0x02, PORT_DATA, 1 }, - .setscl = { 0x01, PORT_DATA, 1 }, - .getsda = { 0x10, PORT_STAT, 1 }, - }, - /* type 6: Barco LPT->DVI (K5800236) adapter */ - { - .setsda = { 0x02, PORT_DATA, 1 }, - .setscl = { 0x01, PORT_DATA, 1 }, - .getsda = { 0x20, PORT_STAT, 0 }, - .getscl = { 0x40, PORT_STAT, 0 }, - .init = { 0xfc, PORT_DATA, 0 }, - }, - /* type 7: One For All JP1 parallel port adapter */ - { - .setsda = { 0x01, PORT_DATA, 0 }, - .setscl = { 0x02, PORT_DATA, 0 }, - .getsda = { 0x80, PORT_STAT, 1 }, - .init = { 0x04, PORT_DATA, 1 }, - }, - /* type 8: VCT-jig */ - { - .setsda = { 0x04, PORT_DATA, 1 }, - .setscl = { 0x01, PORT_DATA, 1 }, - .getsda = { 0x40, PORT_STAT, 0 }, - .getscl = { 0x80, PORT_STAT, 1 }, - }, -}; - -static int type = -1; -module_param(type, int, 0); -MODULE_PARM_DESC(type, - "Type of adapter:\n" - " 0 = Philips adapter\n" - " 1 = home brew teletext adapter\n" - " 2 = Velleman K8000 adapter\n" - " 3 = ELV adapter\n" - " 4 = ADM1032 evaluation board\n" - " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n" - " 6 = Barco LPT->DVI (K5800236) adapter\n" - " 7 = One For All JP1 parallel port adapter\n" - " 8 = VCT-jig\n" -); diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 4fde74eb34a7..5d89c7c1b3a8 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -274,8 +274,8 @@ static int pmcmsptwi_probe(struct platform_device *pldev) if (!request_mem_region(res->start, resource_size(res), pldev->name)) { dev_err(&pldev->dev, - "Unable to get memory/io address region 0x%08x\n", - res->start); + "Unable to get memory/io address region %pap\n", + &res->start); rc = -EBUSY; goto ret_err; } @@ -285,7 +285,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev) resource_size(res)); if (!pmcmsptwi_data.iobase) { dev_err(&pldev->dev, - "Unable to ioremap address 0x%08x\n", res->start); + "Unable to ioremap address %pap\n", &res->start); rc = -EIO; goto ret_unreserve; } diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 6e0e546ef83f..686c06f31625 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -734,8 +734,8 @@ static int i2c_pnx_probe(struct platform_device *pdev) if (ret < 0) goto out_clock; - dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n", - alg_data->adapter.name, res->start, alg_data->irq); + dev_dbg(&pdev->dev, "%s: Master at %pap, irq %d.\n", + alg_data->adapter.name, &res->start, alg_data->irq); return 0; diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 504f5bf0e625..973e5339033c 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -240,8 +240,8 @@ static void i2c_powermac_create_one(struct i2c_adapter *adap, strncpy(info.type, type, sizeof(info.type)); info.addr = addr; - newdev = i2c_new_device(adap, &info); - if (!newdev) + newdev = i2c_new_client_device(adap, &info); + if (IS_ERR(newdev)) dev_err(&adap->dev, "i2c-powermac: Failure to register missing %s\n", type); @@ -359,8 +359,8 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, info.irq = irq_of_parse_and_map(node, 0); info.of_node = of_node_get(node); - newdev = i2c_new_device(adap, &info); - if (!newdev) { + newdev = i2c_new_client_device(adap, &info); + if (IS_ERR(newdev)) { dev_err(&adap->dev, "i2c-powermac: Failure to register" " %pOF\n", node); of_node_put(node); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index b2634afe066d..5c3e8ac6ad92 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -169,6 +169,24 @@ #define STM32F7_AUTOSUSPEND_DELAY (HZ / 100) /** + * struct stm32f7_i2c_regs - i2c f7 registers backup + * @cr1: Control register 1 + * @cr2: Control register 2 + * @oar1: Own address 1 register + * @oar2: Own address 2 register + * @pecr: PEC register + * @tmgr: Timing register + */ +struct stm32f7_i2c_regs { + u32 cr1; + u32 cr2; + u32 oar1; + u32 oar2; + u32 pecr; + u32 tmgr; +}; + +/** * struct stm32f7_i2c_spec - private i2c specification timing * @rate: I2C bus speed (Hz) * @rate_min: 80% of I2C bus speed (Hz) @@ -276,6 +294,7 @@ struct stm32f7_i2c_msg { * @timing: I2C computed timings * @slave: list of slave devices registered on the I2C bus * @slave_running: slave device currently used + * @backup_regs: backup of i2c controller registers (for suspend/resume) * @slave_dir: transfer direction for the current slave device * @master_mode: boolean to know in which mode the I2C is running (master or * slave) @@ -298,6 +317,7 @@ struct stm32f7_i2c_dev { struct stm32f7_i2c_timings timing; struct i2c_client *slave[STM32F7_I2C_MAX_SLAVE]; struct i2c_client *slave_running; + struct stm32f7_i2c_regs backup_regs; u32 slave_dir; bool master_mode; struct stm32_i2c_dma *dma; @@ -2027,8 +2047,7 @@ static int stm32f7_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int stm32f7_i2c_runtime_suspend(struct device *dev) +static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); @@ -2038,7 +2057,7 @@ static int stm32f7_i2c_runtime_suspend(struct device *dev) return 0; } -static int stm32f7_i2c_runtime_resume(struct device *dev) +static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev) { struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); int ret; @@ -2053,11 +2072,101 @@ static int stm32f7_i2c_runtime_resume(struct device *dev) return 0; } -#endif + +static int __maybe_unused +stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev) +{ + int ret; + struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs; + + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + return ret; + + backup_regs->cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1); + backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); + backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1); + backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2); + backup_regs->pecr = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR); + backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR); + + pm_runtime_put_sync(i2c_dev->dev); + + return ret; +} + +static int __maybe_unused +stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev) +{ + u32 cr1; + int ret; + struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs; + + ret = pm_runtime_get_sync(i2c_dev->dev); + if (ret < 0) + return ret; + + cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1); + if (cr1 & STM32F7_I2C_CR1_PE) + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_PE); + + writel_relaxed(backup_regs->tmgr, i2c_dev->base + STM32F7_I2C_TIMINGR); + writel_relaxed(backup_regs->cr1 & ~STM32F7_I2C_CR1_PE, + i2c_dev->base + STM32F7_I2C_CR1); + if (backup_regs->cr1 & STM32F7_I2C_CR1_PE) + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_PE); + writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2); + writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1); + writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2); + writel_relaxed(backup_regs->pecr, i2c_dev->base + STM32F7_I2C_PECR); + + pm_runtime_put_sync(i2c_dev->dev); + + return ret; +} + +static int __maybe_unused stm32f7_i2c_suspend(struct device *dev) +{ + struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int ret; + + i2c_mark_adapter_suspended(&i2c_dev->adap); + ret = stm32f7_i2c_regs_backup(i2c_dev); + if (ret < 0) { + i2c_mark_adapter_resumed(&i2c_dev->adap); + return ret; + } + + pinctrl_pm_select_sleep_state(dev); + pm_runtime_force_suspend(dev); + + return 0; +} + +static int __maybe_unused stm32f7_i2c_resume(struct device *dev) +{ + struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + pinctrl_pm_select_default_state(dev); + + ret = stm32f7_i2c_regs_restore(i2c_dev); + if (ret < 0) + return ret; + i2c_mark_adapter_resumed(&i2c_dev->adap); + + return 0; +} static const struct dev_pm_ops stm32f7_i2c_pm_ops = { SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend, stm32f7_i2c_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(stm32f7_i2c_suspend, stm32f7_i2c_resume) }; static const struct of_device_id stm32f7_i2c_match[] = { diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 8c3e2d409d63..42e0a53e7fa4 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -444,7 +444,7 @@ static int stu300_wait_while_busy(struct stu300_dev *dev) "Attempt: %d\n", i+1); dev_err(&dev->pdev->dev, "base address = " - "0x%08x, reinit hardware\n", (u32) dev->virtbase); + "0x%p, reinit hardware\n", dev->virtbase); (void) stu300_init_hw(dev); } diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c index 0bff3f3a8779..b4050f5b6746 100644 --- a/drivers/i2c/busses/i2c-taos-evm.c +++ b/drivers/i2c/busses/i2c-taos-evm.c @@ -49,10 +49,10 @@ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter) if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) { dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n", tsl2550_info.type, tsl2550_info.addr); - return i2c_new_device(adapter, &tsl2550_info); + return i2c_new_client_device(adapter, &tsl2550_info); } - return NULL; + return ERR_PTR(-ENODEV); } static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr, diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 61339c665ebd..cbc2ad49043e 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -16,7 +16,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/irq.h> #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> @@ -129,11 +131,12 @@ #define I2C_PACKET_HEADER_SIZE 12 /* - * Upto I2C_PIO_MODE_MAX_LEN bytes, controller will use PIO mode, - * above this, controller will use DMA to fill FIFO. - * MAX PIO len is 20 bytes excluding packet header. + * I2C Controller will use PIO mode for transfers up to 32 bytes in order to + * avoid DMA overhead, otherwise external APB DMA controller will be used. + * Note that the actual MAX PIO length is 20 bytes because 32 bytes include + * I2C_PACKET_HEADER_SIZE. */ -#define I2C_PIO_MODE_MAX_LEN 32 +#define I2C_PIO_MODE_PREFERRED_LEN 32 /* * msg_end_type: The bus control which need to be send at end of transfer. @@ -230,7 +233,6 @@ struct tegra_i2c_hw_feature { * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt - * @irq_disabled: used to track whether or not the interrupt is enabled * @is_dvc: identifies the DVC I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message @@ -240,7 +242,6 @@ struct tegra_i2c_hw_feature { * @bus_clk_rate: current I2C bus clock rate * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes * @is_multimaster_mode: track if I2C controller is in multi-master mode - * @xfer_lock: lock to serialize transfer submission and processing * @tx_dma_chan: DMA transmit channel * @rx_dma_chan: DMA receive channel * @dma_phys: handle to DMA resources @@ -248,6 +249,7 @@ struct tegra_i2c_hw_feature { * @dma_buf_size: DMA buffer size * @is_curr_dma_xfer: indicates active DMA transfer * @dma_complete: DMA completion notifier + * @is_curr_atomic_xfer: indicates active atomic transfer */ struct tegra_i2c_dev { struct device *dev; @@ -260,7 +262,6 @@ struct tegra_i2c_dev { phys_addr_t base_phys; int cont_id; int irq; - bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; @@ -270,8 +271,6 @@ struct tegra_i2c_dev { u32 bus_clk_rate; u16 clk_divisor_non_hs_mode; bool is_multimaster_mode; - /* xfer_lock: lock to serialize transfer submission and processing */ - spinlock_t xfer_lock; struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; dma_addr_t dma_phys; @@ -279,17 +278,18 @@ struct tegra_i2c_dev { unsigned int dma_buf_size; bool is_curr_dma_xfer; struct completion dma_complete; + bool is_curr_atomic_xfer; }; static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { - writel(val, i2c_dev->base + reg); + writel_relaxed(val, i2c_dev->base + reg); } static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { - return readl(i2c_dev->base + reg); + return readl_relaxed(i2c_dev->base + reg); } /* @@ -307,16 +307,16 @@ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { - writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); /* Read back register to make sure that register writes completed */ if (reg != I2C_TX_FIFO) - readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { - return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, @@ -687,13 +687,15 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD); addr = i2c_dev->base + reg_offset; i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD); - if (in_interrupt()) - err = readl_poll_timeout_atomic(addr, val, val == 0, - 1000, - I2C_CONFIG_LOAD_TIMEOUT); + + if (i2c_dev->is_curr_atomic_xfer) + err = readl_relaxed_poll_timeout_atomic( + addr, val, val == 0, 1000, + I2C_CONFIG_LOAD_TIMEOUT); else - err = readl_poll_timeout(addr, val, val == 0, 1000, - I2C_CONFIG_LOAD_TIMEOUT); + err = readl_relaxed_poll_timeout( + addr, val, val == 0, 1000, + I2C_CONFIG_LOAD_TIMEOUT); if (err) { dev_warn(i2c_dev->dev, @@ -790,11 +792,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) if (err) return err; - if (i2c_dev->irq_disabled) { - i2c_dev->irq_disabled = false; - enable_irq(i2c_dev->irq); - } - return 0; } @@ -825,18 +822,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) status = i2c_readl(i2c_dev, I2C_INT_STATUS); - spin_lock(&i2c_dev->xfer_lock); if (status == 0) { dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_CNFG)); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; - - if (!i2c_dev->irq_disabled) { - disable_irq_nosync(i2c_dev->irq); - i2c_dev->irq_disabled = true; - } goto err; } @@ -925,7 +916,6 @@ err: complete(&i2c_dev->msg_complete); done: - spin_unlock(&i2c_dev->xfer_lock); return IRQ_HANDLED; } @@ -999,6 +989,64 @@ out: i2c_writel(i2c_dev, val, reg); } +static unsigned long +tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + ktime_t ktime = ktime_get(); + ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms); + + do { + u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); + + if (status) { + tegra_i2c_isr(i2c_dev->irq, i2c_dev); + + if (completion_done(complete)) { + s64 delta = ktime_ms_delta(ktimeout, ktime); + + return msecs_to_jiffies(delta) ?: 1; + } + } + + ktime = ktime_get(); + + } while (ktime_before(ktime, ktimeout)); + + return 0; +} + +static unsigned long +tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + unsigned long ret; + + if (i2c_dev->is_curr_atomic_xfer) { + ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete, + timeout_ms); + } else { + enable_irq(i2c_dev->irq); + ret = wait_for_completion_timeout(complete, + msecs_to_jiffies(timeout_ms)); + disable_irq(i2c_dev->irq); + + /* + * There is a chance that completion may happen after IRQ + * synchronization, which is done by disable_irq(). + */ + if (ret == 0 && completion_done(complete)) { + dev_warn(i2c_dev->dev, + "completion done after timeout\n"); + ret = 1; + } + } + + return ret; +} + static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1020,8 +1068,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(50)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, 50); if (time_left == 0) { dev_err(i2c_dev->dev, "timed out for bus clear\n"); return -ETIMEDOUT; @@ -1044,7 +1092,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, u32 packet_header; u32 int_mask; unsigned long time_left; - unsigned long flags; size_t xfer_size; u32 *buffer = NULL; int err = 0; @@ -1065,8 +1112,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, xfer_size = msg->len + I2C_PACKET_HEADER_SIZE; xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD); - i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_MAX_LEN) && - i2c_dev->dma_buf; + i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_PREFERRED_LEN) && + i2c_dev->dma_buf && + !i2c_dev->is_curr_atomic_xfer; tegra_i2c_config_fifo_trig(i2c_dev, xfer_size); dma = i2c_dev->is_curr_dma_xfer; /* @@ -1075,7 +1123,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, i2c_dev->bus_clk_rate); - spin_lock_irqsave(&i2c_dev->xfer_lock, flags); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); @@ -1090,7 +1137,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting RX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { @@ -1149,7 +1196,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting TX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { tegra_i2c_fill_tx_fifo(i2c_dev); @@ -1169,20 +1216,16 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK)); -unlock: - spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); - if (dma) { - if (err) - return err; + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->dma_complete, xfer_time); + + dmaengine_terminate_sync(i2c_dev->msg_read ? + i2c_dev->rx_dma_chan : + i2c_dev->tx_dma_chan); - time_left = wait_for_completion_timeout(&i2c_dev->dma_complete, - msecs_to_jiffies(xfer_time)); - if (time_left == 0) { + if (!time_left && !completion_done(&i2c_dev->dma_complete)) { dev_err(i2c_dev->dev, "DMA transfer timeout\n"); - dmaengine_terminate_sync(i2c_dev->msg_read ? - i2c_dev->rx_dma_chan : - i2c_dev->tx_dma_chan); tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1195,20 +1238,15 @@ unlock: memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, msg->len); } - - if (i2c_dev->msg_err != I2C_ERR_NONE) - dmaengine_synchronize(i2c_dev->msg_read ? - i2c_dev->rx_dma_chan : - i2c_dev->tx_dma_chan); } - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(xfer_time)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, xfer_time); + tegra_i2c_mask_irq(i2c_dev, int_mask); if (time_left == 0) { dev_err(i2c_dev->dev, "i2c transfer timed out\n"); - tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1270,6 +1308,19 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], return ret ?: i; } +static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap, + struct i2c_msg msgs[], int num) +{ + struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); + int ret; + + i2c_dev->is_curr_atomic_xfer = true; + ret = tegra_i2c_xfer(adap, msgs, num); + i2c_dev->is_curr_atomic_xfer = false; + + return ret; +} + static u32 tegra_i2c_func(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1297,8 +1348,9 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) } static const struct i2c_algorithm tegra_i2c_algo = { - .master_xfer = tegra_i2c_xfer, - .functionality = tegra_i2c_func, + .master_xfer = tegra_i2c_xfer, + .master_xfer_atomic = tegra_i2c_xfer_atomic, + .functionality = tegra_i2c_func, }; /* payload size is only 12 bit */ @@ -1568,7 +1620,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) I2C_PACKET_HEADER_SIZE; init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); - spin_lock_init(&i2c_dev->xfer_lock); if (!i2c_dev->hw->has_single_clk_source) { fast_clk = devm_clk_get(&pdev->dev, "fast-clk"); @@ -1607,6 +1658,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto unprepare_fast_clk; } + pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); @@ -1644,6 +1696,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto release_dma; } + irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); if (ret) { diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 43e3603489ee..7279ca0eaa2d 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -84,7 +84,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) pmsg->buf, pmsg->len) != pmsg->len) { dev_err(&adapter->dev, "failure reading data\n"); - ret = -EREMOTEIO; + ret = -EIO; goto out; } } else { @@ -94,7 +94,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) pmsg->buf, pmsg->len) != pmsg->len) { dev_err(&adapter->dev, "failure writing data\n"); - ret = -EREMOTEIO; + ret = -EIO; goto out; } } @@ -102,13 +102,13 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) /* read status */ if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) { dev_err(&adapter->dev, "failure reading status\n"); - ret = -EREMOTEIO; + ret = -EIO; goto out; } dev_dbg(&adapter->dev, " status = %d\n", *pstatus); if (*pstatus == STATUS_ADDRESS_NAK) { - ret = -EREMOTEIO; + ret = -ENXIO; goto out; } } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index d8d49f1814c7..90c1c362394d 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -156,6 +156,8 @@ struct xiic_i2c { #define XIIC_RESET_MASK 0xAUL #define XIIC_PM_TIMEOUT 1000 /* ms */ +/* timeout waiting for the controller to respond */ +#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* * The following constant is used for the device global interrupt enable * register, to enable all interrupts for the device, this is the only bit @@ -166,7 +168,7 @@ struct xiic_i2c { #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos) #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos) -static void xiic_start_xfer(struct xiic_i2c *i2c); +static int xiic_start_xfer(struct xiic_i2c *i2c); static void __xiic_start_xfer(struct xiic_i2c *i2c); /* @@ -247,17 +249,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask) xiic_irq_en(i2c, mask); } -static void xiic_clear_rx_fifo(struct xiic_i2c *i2c) +static int xiic_clear_rx_fifo(struct xiic_i2c *i2c) { u8 sr; + unsigned long timeout; + + timeout = jiffies + XIIC_I2C_TIMEOUT; for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET); !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK); - sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) + sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) { xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET); + if (time_after(jiffies, timeout)) { + dev_err(i2c->dev, "Failed to clear rx fifo\n"); + return -ETIMEDOUT; + } + } + + return 0; } -static void xiic_reinit(struct xiic_i2c *i2c) +static int xiic_reinit(struct xiic_i2c *i2c) { + int ret; + xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK); /* Set receive Fifo depth to maximum (zero based). */ @@ -270,12 +284,16 @@ static void xiic_reinit(struct xiic_i2c *i2c) xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK); /* make sure RX fifo is empty */ - xiic_clear_rx_fifo(i2c); + ret = xiic_clear_rx_fifo(i2c); + if (ret) + return ret; /* Enable interrupts */ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK); xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK); + + return 0; } static void xiic_deinit(struct xiic_i2c *i2c) @@ -655,12 +673,18 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c) } -static void xiic_start_xfer(struct xiic_i2c *i2c) +static int xiic_start_xfer(struct xiic_i2c *i2c) { + int ret; mutex_lock(&i2c->lock); - xiic_reinit(i2c); - __xiic_start_xfer(i2c); + + ret = xiic_reinit(i2c); + if (!ret) + __xiic_start_xfer(i2c); + mutex_unlock(&i2c->lock); + + return ret; } static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) @@ -682,7 +706,11 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) i2c->tx_msg = msgs; i2c->nmsgs = num; - xiic_start_xfer(i2c); + err = xiic_start_xfer(i2c); + if (err < 0) { + dev_err(adap->dev.parent, "Error xiic_start_xfer\n"); + goto out; + } if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || (i2c->state == STATE_DONE), HZ)) { @@ -760,7 +788,8 @@ static int xiic_i2c_probe(struct platform_device *pdev) i2c->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) { - dev_err(&pdev->dev, "input clock not found.\n"); + if (PTR_ERR(i2c->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(i2c->clk); } ret = clk_prepare_enable(i2c->clk); @@ -769,10 +798,10 @@ static int xiic_i2c_probe(struct platform_device *pdev) return ret; } i2c->dev = &pdev->dev; - pm_runtime_enable(i2c->dev); pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT); pm_runtime_use_autosuspend(i2c->dev); pm_runtime_set_active(i2c->dev); + pm_runtime_enable(i2c->dev); ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr, xiic_process, IRQF_ONESHOT, pdev->name, i2c); @@ -794,7 +823,11 @@ static int xiic_i2c_probe(struct platform_device *pdev) if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK)) i2c->endianness = BIG; - xiic_reinit(i2c); + ret = xiic_reinit(i2c); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot xiic_reinit\n"); + goto err_clk_dis; + } /* add i2c adapter to i2c tree */ ret = i2c_add_adapter(&i2c->adap); @@ -806,7 +839,7 @@ static int xiic_i2c_probe(struct platform_device *pdev) if (pdata) { /* add in known devices to the bus */ for (i = 0; i < pdata->num_devices; i++) - i2c_new_device(&i2c->adap, pdata->devices + i); + i2c_new_client_device(&i2c->adap, pdata->devices + i); } return 0; @@ -826,14 +859,16 @@ static int xiic_i2c_remove(struct platform_device *pdev) /* remove adapter & data */ i2c_del_adapter(&i2c->adap); - ret = clk_prepare_enable(i2c->clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable clock.\n"); + ret = pm_runtime_get_sync(i2c->dev); + if (ret < 0) return ret; - } + xiic_deinit(i2c); + pm_runtime_put_sync(i2c->dev); clk_disable_unprepare(i2c->clk); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); return 0; } diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 62a1c92ab803..8f3dbc97a057 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -225,7 +225,7 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter, adev->power.flags.ignore_parent = true; acpi_device_set_enumerated(adev); - if (!i2c_new_device(adapter, info)) { + if (IS_ERR(i2c_new_client_device(adapter, info))) { adev->power.flags.ignore_parent = false; dev_err(&adapter->dev, "failed to add I2C device %s from ACPI\n", @@ -451,7 +451,8 @@ struct notifier_block i2c_acpi_notifier = { * resources, in that case this function can be used to create an i2c-client * for other I2cSerialBus resources in the Current Resource Settings table. * - * Also see i2c_new_device, which this function calls to create the i2c-client. + * Also see i2c_new_client_device, which this function calls to create the + * i2c-client. * * Returns a pointer to the new i2c-client, or error pointer in case of failure. * Specifically, -EPROBE_DEFER is returned if the adapter is not found. @@ -461,7 +462,6 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, { struct i2c_acpi_lookup lookup; struct i2c_adapter *adapter; - struct i2c_client *client; struct acpi_device *adev; LIST_HEAD(resource_list); int ret; @@ -489,11 +489,7 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, if (!adapter) return ERR_PTR(-EPROBE_DEFER); - client = i2c_new_device(adapter, info); - if (!client) - return ERR_PTR(-ENODEV); - - return client; + return i2c_new_client_device(adapter, info); } EXPORT_SYMBOL_GPL(i2c_acpi_new_device); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 35b209797d7b..cefad0881942 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -456,15 +456,15 @@ static void i2c_client_dev_release(struct device *dev) } static ssize_t -show_name(struct device *dev, struct device_attribute *attr, char *buf) +name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); } -static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static DEVICE_ATTR_RO(name); static ssize_t -show_modalias(struct device *dev, struct device_attribute *attr, char *buf) +modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); int len; @@ -479,7 +479,7 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); } -static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); +static DEVICE_ATTR_RO(modalias); static struct attribute *i2c_dev_attrs[] = { &dev_attr_name.attr, @@ -831,8 +831,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device); /** - * i2c_unregister_device - reverse effect of i2c_new_device() - * @client: value returned from i2c_new_device() + * i2c_unregister_device - reverse effect of i2c_new_*_device() + * @client: value returned from i2c_new_*_device() * Context: can sleep */ void i2c_unregister_device(struct i2c_client *client) @@ -1023,8 +1023,8 @@ EXPORT_SYMBOL_GPL(i2c_adapter_depth); * the user to provide incorrect parameters. */ static ssize_t -i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +new_device_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_board_info info; @@ -1079,7 +1079,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); +static DEVICE_ATTR_WO(new_device); /* * And of course let the users delete the devices they instantiated, if @@ -1091,8 +1091,8 @@ static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); * the user to delete the wrong device. */ static ssize_t -i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +delete_device_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct i2c_adapter *adap = to_i2c_adapter(dev); struct i2c_client *client, *next; @@ -1135,7 +1135,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, return res; } static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, - i2c_sysfs_delete_device); + delete_device_store); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, @@ -1178,9 +1178,8 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter) down_read(&__i2c_board_lock); list_for_each_entry(devinfo, &__i2c_board_list, list) { - if (devinfo->busnum == adapter->nr - && !i2c_new_device(adapter, - &devinfo->board_info)) + if (devinfo->busnum == adapter->nr && + IS_ERR(i2c_new_client_device(adapter, &devinfo->board_info))) dev_err(&adapter->dev, "Can't create device at 0x%02x\n", devinfo->board_info.addr); @@ -2167,8 +2166,8 @@ static int i2c_detect_address(struct i2c_client *temp_client, dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n", info.type, info.addr); - client = i2c_new_device(adapter, &info); - if (client) + client = i2c_new_client_device(adapter, &info); + if (!IS_ERR(client)) list_add_tail(&client->detected, &driver->clients); else dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n", diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index e4d296b40baa..6787c1f71483 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -75,11 +75,10 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, if (ret) return ERR_PTR(ret); - client = i2c_new_device(adap, &info); - if (!client) { + client = i2c_new_client_device(adap, &info); + if (IS_ERR(client)) dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node); - return ERR_PTR(-EINVAL); - } + return client; } diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index 50e1fb4aedf5..6daec8d3d331 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -16,6 +16,7 @@ * warranty of any kind, whether express or implied. */ +#include <linux/bitops.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> @@ -42,20 +43,20 @@ #define PCA9541_CONTROL 0x01 #define PCA9541_ISTAT 0x02 -#define PCA9541_CTL_MYBUS (1 << 0) -#define PCA9541_CTL_NMYBUS (1 << 1) -#define PCA9541_CTL_BUSON (1 << 2) -#define PCA9541_CTL_NBUSON (1 << 3) -#define PCA9541_CTL_BUSINIT (1 << 4) -#define PCA9541_CTL_TESTON (1 << 6) -#define PCA9541_CTL_NTESTON (1 << 7) - -#define PCA9541_ISTAT_INTIN (1 << 0) -#define PCA9541_ISTAT_BUSINIT (1 << 1) -#define PCA9541_ISTAT_BUSOK (1 << 2) -#define PCA9541_ISTAT_BUSLOST (1 << 3) -#define PCA9541_ISTAT_MYTEST (1 << 6) -#define PCA9541_ISTAT_NMYTEST (1 << 7) +#define PCA9541_CTL_MYBUS BIT(0) +#define PCA9541_CTL_NMYBUS BIT(1) +#define PCA9541_CTL_BUSON BIT(2) +#define PCA9541_CTL_NBUSON BIT(3) +#define PCA9541_CTL_BUSINIT BIT(4) +#define PCA9541_CTL_TESTON BIT(6) +#define PCA9541_CTL_NTESTON BIT(7) + +#define PCA9541_ISTAT_INTIN BIT(0) +#define PCA9541_ISTAT_BUSINIT BIT(1) +#define PCA9541_ISTAT_BUSOK BIT(2) +#define PCA9541_ISTAT_BUSLOST BIT(3) +#define PCA9541_ISTAT_MYTEST BIT(6) +#define PCA9541_ISTAT_NMYTEST BIT(7) #define BUSON (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON) #define MYBUS (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS) diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 923aa3a5a3dc..a0d926ae3f86 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -86,7 +86,7 @@ struct pca954x { u8 last_chan; /* last register value */ /* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */ - s8 idle_state; + s32 idle_state; struct i2c_client *client; @@ -229,20 +229,23 @@ static int pca954x_reg_write(struct i2c_adapter *adap, I2C_SMBUS_BYTE, &dummy); } +static u8 pca954x_regval(struct pca954x *data, u8 chan) +{ + /* We make switches look like muxes, not sure how to be smarter. */ + if (data->chip->muxtype == pca954x_ismux) + return chan | data->chip->enable; + else + return 1 << chan; +} + static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan) { struct pca954x *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; - const struct chip_desc *chip = data->chip; u8 regval; int ret = 0; - /* we make switches look like muxes, not sure how to be smarter */ - if (chip->muxtype == pca954x_ismux) - regval = chan | chip->enable; - else - regval = 1 << chan; - + regval = pca954x_regval(data, chan); /* Only select the channel if its different from the last channel */ if (data->last_chan != regval) { ret = pca954x_reg_write(muxc->parent, client, regval); @@ -256,7 +259,7 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan) { struct pca954x *data = i2c_mux_priv(muxc); struct i2c_client *client = data->client; - s8 idle_state; + s32 idle_state; idle_state = READ_ONCE(data->idle_state); if (idle_state >= 0) @@ -402,6 +405,22 @@ static void pca954x_cleanup(struct i2c_mux_core *muxc) i2c_mux_del_adapters(muxc); } +static int pca954x_init(struct i2c_client *client, struct pca954x *data) +{ + int ret; + + if (data->idle_state >= 0) + data->last_chan = pca954x_regval(data, data->idle_state); + else + data->last_chan = 0; /* Disconnect multiplexer */ + + ret = i2c_smbus_write_byte(client, data->last_chan); + if (ret < 0) + data->last_chan = 0; + + return ret; +} + /* * I2C init/probing/exit functions */ @@ -411,7 +430,6 @@ static int pca954x_probe(struct i2c_client *client, struct i2c_adapter *adap = client->adapter; struct device *dev = &client->dev; struct device_node *np = dev->of_node; - bool idle_disconnect_dt; struct gpio_desc *gpio; struct i2c_mux_core *muxc; struct pca954x *data; @@ -462,23 +480,24 @@ static int pca954x_probe(struct i2c_client *client, } } - /* Write the mux register at addr to verify + data->idle_state = MUX_IDLE_AS_IS; + if (of_property_read_u32(np, "idle-state", &data->idle_state)) { + if (np && of_property_read_bool(np, "i2c-mux-idle-disconnect")) + data->idle_state = MUX_IDLE_DISCONNECT; + } + + /* + * Write the mux register at addr to verify * that the mux is in fact present. This also - * initializes the mux to disconnected state. + * initializes the mux to a channel + * or disconnected state. */ - if (i2c_smbus_write_byte(client, 0) < 0) { + ret = pca954x_init(client, data); + if (ret < 0) { dev_warn(dev, "probe failed\n"); return -ENODEV; } - data->last_chan = 0; /* force the first selection */ - data->idle_state = MUX_IDLE_AS_IS; - - idle_disconnect_dt = np && - of_property_read_bool(np, "i2c-mux-idle-disconnect"); - if (idle_disconnect_dt) - data->idle_state = MUX_IDLE_DISCONNECT; - ret = pca954x_irq_setup(muxc); if (ret) goto fail_cleanup; @@ -530,9 +549,13 @@ static int pca954x_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct i2c_mux_core *muxc = i2c_get_clientdata(client); struct pca954x *data = i2c_mux_priv(muxc); + int ret; - data->last_chan = 0; - return i2c_smbus_write_byte(client, 0); + ret = pca954x_init(client, data); + if (ret < 0) + dev_err(&client->dev, "failed to verify mux presence\n"); + + return ret; } #endif diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7833e650789f..d55606608ac8 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -63,6 +63,7 @@ static struct cpuidle_driver intel_idle_driver = { }; /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = CPUIDLE_STATE_MAX - 1; +static unsigned int disabled_states_mask; static unsigned int mwait_substates; @@ -1131,6 +1132,10 @@ static bool no_acpi __read_mostly; module_param(no_acpi, bool, 0444); MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); +static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */ +module_param_named(use_acpi, force_use_acpi, bool, 0444); +MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list"); + static struct acpi_processor_power acpi_state_table __initdata; /** @@ -1230,6 +1235,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) if (cx->type > ACPI_STATE_C2) state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + if (disabled_states_mask & BIT(cstate)) + state->flags |= CPUIDLE_FLAG_OFF; + state->enter = intel_idle; state->enter_s2idle = intel_idle_s2idle; } @@ -1258,6 +1266,8 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) return true; } #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +#define force_use_acpi (false) + static inline bool intel_idle_acpi_cst_extract(void) { return false; } static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } @@ -1460,8 +1470,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; - if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && - !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + if ((disabled_states_mask & BIT(drv->state_count)) || + ((icpu->use_acpi || force_use_acpi) && + intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; drv->state_count++; @@ -1480,6 +1492,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) { cpuidle_poll_state_init(drv); + + if (disabled_states_mask & BIT(0)) + drv->states[0].flags |= CPUIDLE_FLAG_OFF; + drv->state_count = 1; if (icpu) @@ -1607,7 +1623,7 @@ static int __init intel_idle_init(void) icpu = (const struct idle_cpu *)id->driver_data; if (icpu) { cpuidle_state_table = icpu->state_table; - if (icpu->use_acpi) + if (icpu->use_acpi || force_use_acpi) intel_idle_acpi_cst_extract(); } else if (!intel_idle_acpi_cst_extract()) { return -ENODEV; @@ -1660,3 +1676,11 @@ device_initcall(intel_idle_init); * is the easiest way (currently) to continue doing that. */ module_param(max_cstate, int, 0444); +/* + * The positions of the bits that are set in this number are the indices of the + * idle states to be disabled by default (as reflected by the names of the + * corresponding idle state directories in sysfs, "state0", "state1" ... + * "state<i>" ..., where <i> is the index of the given state). + */ +module_param_named(states_off, disabled_states_mask, uint, 0444); +MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c index 19f086716dc5..02b7b28e6969 100644 --- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c +++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c @@ -143,7 +143,6 @@ static const struct mbox_chan_ops a37xx_mbox_ops = { static int armada_37xx_mbox_probe(struct platform_device *pdev) { struct a37xx_mbox *mbox; - struct resource *regs; struct mbox_chan *chans; int ret; @@ -156,9 +155,7 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev) if (!chans) return -ENOMEM; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - mbox->base = devm_ioremap_resource(&pdev->dev, regs); + mbox->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mbox->base)) { dev_err(&pdev->dev, "ioremap failed\n"); return PTR_ERR(mbox->base); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index e230052c2107..b952bd45bd6a 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -364,7 +364,7 @@ static int read_page(struct file *file, unsigned long index, int ret = 0; struct inode *inode = file_inode(file); struct buffer_head *bh; - sector_t block; + sector_t block, blk_cur; pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); @@ -375,17 +375,21 @@ static int read_page(struct file *file, unsigned long index, goto out; } attach_page_buffers(page, bh); - block = index << (PAGE_SHIFT - inode->i_blkbits); + blk_cur = index << (PAGE_SHIFT - inode->i_blkbits); while (bh) { + block = blk_cur; + if (count == 0) bh->b_blocknr = 0; else { - bh->b_blocknr = bmap(inode, block); - if (bh->b_blocknr == 0) { - /* Cannot use this file! */ + ret = bmap(inode, &block); + if (ret || !block) { ret = -EINVAL; + bh->b_blocknr = 0; goto out; } + + bh->b_blocknr = block; bh->b_bdev = inode->i_sb->s_bdev; if (count < (1<<inode->i_blkbits)) count = 0; @@ -399,7 +403,7 @@ static int read_page(struct file *file, unsigned long index, set_buffer_mapped(bh); submit_bh(REQ_OP_READ, 0, bh); } - block++; + blk_cur++; bh = bh->b_this_page; } page->index = index; diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 095f8a3b2cfc..886aea587276 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -267,7 +267,6 @@ static int mvebu_devbus_probe(struct platform_device *pdev) struct devbus_read_params r; struct devbus_write_params w; struct devbus *devbus; - struct resource *res; struct clk *clk; unsigned long rate; int err; @@ -277,8 +276,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev) return -ENOMEM; devbus->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - devbus->base = devm_ioremap_resource(&pdev->dev, res); + devbus->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(devbus->base)) return PTR_ERR(devbus->base); diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig index e9c3ce92350c..20a8406ce786 100644 --- a/drivers/memory/samsung/Kconfig +++ b/drivers/memory/samsung/Kconfig @@ -8,7 +8,7 @@ config SAMSUNG_MC if SAMSUNG_MC config EXYNOS5422_DMC - tristate "EXYNOS5422 Dynamic Memory Controller driver" + tristate "Exynos5422 Dynamic Memory Controller driver" depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM) select DDR depends on DEVFREQ_GOV_SIMPLE_ONDEMAND diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index c27c6105c66d..6510d7bab217 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -3,7 +3,7 @@ // Copyright (c) 2015 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS - SROM Controller support +// Exynos - SROM Controller support // Author: Pankaj Dubey <pankaj.dubey@samsung.com> #include <linux/io.h> diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index 47dbf6d1789f..81a1b1d01683 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -1374,7 +1374,6 @@ static int exynos5_dmc_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct exynos5_dmc *dmc; - struct resource *res; int irq[2]; dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL); @@ -1386,13 +1385,11 @@ static int exynos5_dmc_probe(struct platform_device *pdev) dmc->dev = dev; platform_set_drvdata(pdev, dmc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmc->base_drexi0 = devm_ioremap_resource(dev, res); + dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmc->base_drexi0)) return PTR_ERR(dmc->base_drexi0); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - dmc->base_drexi1 = devm_ioremap_resource(dev, res); + dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dmc->base_drexi1)) return PTR_ERR(dmc->base_drexi1); diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile index 3d23c4261104..529d10bc5650 100644 --- a/drivers/memory/tegra/Makefile +++ b/drivers/memory/tegra/Makefile @@ -13,4 +13,5 @@ obj-$(CONFIG_TEGRA_MC) += tegra-mc.o obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o -obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o +obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o +obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c index 464f0ceaee63..21f05240682b 100644 --- a/drivers/memory/tegra/tegra124-emc.c +++ b/drivers/memory/tegra/tegra124-emc.c @@ -467,12 +467,20 @@ struct tegra_emc { void __iomem *regs; + struct clk *clk; + enum emc_dram_type dram_type; unsigned int dram_num; struct emc_timing last_timing; struct emc_timing *timings; unsigned int num_timings; + + struct { + struct dentry *root; + unsigned long min_rate; + unsigned long max_rate; + } debugfs; }; /* Timing change sequence functions */ @@ -998,38 +1006,51 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code) return NULL; } -/* Debugfs entry */ +/* + * debugfs interface + * + * The memory controller driver exposes some files in debugfs that can be used + * to control the EMC frequency. The top-level directory can be found here: + * + * /sys/kernel/debug/emc + * + * It contains the following files: + * + * - available_rates: This file contains a list of valid, space-separated + * EMC frequencies. + * + * - min_rate: Writing a value to this file sets the given frequency as the + * floor of the permitted range. If this is higher than the currently + * configured EMC frequency, this will cause the frequency to be + * increased so that it stays within the valid range. + * + * - max_rate: Similarily to the min_rate file, writing a value to this file + * sets the given frequency as the ceiling of the permitted range. If + * the value is lower than the currently configured EMC frequency, this + * will cause the frequency to be decreased so that it stays within the + * valid range. + */ -static int emc_debug_rate_get(void *data, u64 *rate) +static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) { - struct clk *c = data; - - *rate = clk_get_rate(c); - - return 0; -} + unsigned int i; -static int emc_debug_rate_set(void *data, u64 rate) -{ - struct clk *c = data; + for (i = 0; i < emc->num_timings; i++) + if (rate == emc->timings[i].rate) + return true; - return clk_set_rate(c, rate); + return false; } -DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get, - emc_debug_rate_set, "%lld\n"); - -static int emc_debug_supported_rates_show(struct seq_file *s, void *data) +static int tegra_emc_debug_available_rates_show(struct seq_file *s, + void *data) { struct tegra_emc *emc = s->private; const char *prefix = ""; unsigned int i; for (i = 0; i < emc->num_timings; i++) { - struct emc_timing *timing = &emc->timings[i]; - - seq_printf(s, "%s%lu", prefix, timing->rate); - + seq_printf(s, "%s%lu", prefix, emc->timings[i].rate); prefix = " "; } @@ -1038,46 +1059,126 @@ static int emc_debug_supported_rates_show(struct seq_file *s, void *data) return 0; } -static int emc_debug_supported_rates_open(struct inode *inode, - struct file *file) +static int tegra_emc_debug_available_rates_open(struct inode *inode, + struct file *file) { - return single_open(file, emc_debug_supported_rates_show, + return single_open(file, tegra_emc_debug_available_rates_show, inode->i_private); } -static const struct file_operations emc_debug_supported_rates_fops = { - .open = emc_debug_supported_rates_open, +static const struct file_operations tegra_emc_debug_available_rates_fops = { + .open = tegra_emc_debug_available_rates_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.min_rate; + + return 0; +} + +static int tegra_emc_debug_min_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_min_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.min_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops, + tegra_emc_debug_min_rate_get, + tegra_emc_debug_min_rate_set, "%llu\n"); + +static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.max_rate; + + return 0; +} + +static int tegra_emc_debug_max_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_max_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.max_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops, + tegra_emc_debug_max_rate_get, + tegra_emc_debug_max_rate_set, "%llu\n"); + static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc) { - struct dentry *root, *file; - struct clk *clk; + unsigned int i; + int err; - root = debugfs_create_dir("emc", NULL); - if (!root) { - dev_err(dev, "failed to create debugfs directory\n"); - return; + emc->clk = devm_clk_get(dev, "emc"); + if (IS_ERR(emc->clk)) { + if (PTR_ERR(emc->clk) != -ENODEV) { + dev_err(dev, "failed to get EMC clock: %ld\n", + PTR_ERR(emc->clk)); + return; + } } - clk = clk_get_sys("tegra-clk-debug", "emc"); - if (IS_ERR(clk)) { - dev_err(dev, "failed to get debug clock: %ld\n", PTR_ERR(clk)); + emc->debugfs.min_rate = ULONG_MAX; + emc->debugfs.max_rate = 0; + + for (i = 0; i < emc->num_timings; i++) { + if (emc->timings[i].rate < emc->debugfs.min_rate) + emc->debugfs.min_rate = emc->timings[i].rate; + + if (emc->timings[i].rate > emc->debugfs.max_rate) + emc->debugfs.max_rate = emc->timings[i].rate; + } + + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { + dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n", + emc->debugfs.min_rate, emc->debugfs.max_rate, + emc->clk); return; } - file = debugfs_create_file("rate", S_IRUGO | S_IWUSR, root, clk, - &emc_debug_rate_fops); - if (!file) - dev_err(dev, "failed to create debugfs entry\n"); + emc->debugfs.root = debugfs_create_dir("emc", NULL); + if (!emc->debugfs.root) { + dev_err(dev, "failed to create debugfs directory\n"); + return; + } - file = debugfs_create_file("supported_rates", S_IRUGO, root, emc, - &emc_debug_supported_rates_fops); - if (!file) - dev_err(dev, "failed to create debugfs entry\n"); + debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc, + &tegra_emc_debug_available_rates_fops); + debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_min_rate_fops); + debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_max_rate_fops); } static int tegra_emc_probe(struct platform_device *pdev) diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c new file mode 100644 index 000000000000..97f26bc77ad4 --- /dev/null +++ b/drivers/memory/tegra/tegra186-emc.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2019 NVIDIA CORPORATION. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> + +#include <soc/tegra/bpmp.h> + +struct tegra186_emc_dvfs { + unsigned long latency; + unsigned long rate; +}; + +struct tegra186_emc { + struct tegra_bpmp *bpmp; + struct device *dev; + struct clk *clk; + + struct tegra186_emc_dvfs *dvfs; + unsigned int num_dvfs; + + struct { + struct dentry *root; + unsigned long min_rate; + unsigned long max_rate; + } debugfs; +}; + +/* + * debugfs interface + * + * The memory controller driver exposes some files in debugfs that can be used + * to control the EMC frequency. The top-level directory can be found here: + * + * /sys/kernel/debug/emc + * + * It contains the following files: + * + * - available_rates: This file contains a list of valid, space-separated + * EMC frequencies. + * + * - min_rate: Writing a value to this file sets the given frequency as the + * floor of the permitted range. If this is higher than the currently + * configured EMC frequency, this will cause the frequency to be + * increased so that it stays within the valid range. + * + * - max_rate: Similarily to the min_rate file, writing a value to this file + * sets the given frequency as the ceiling of the permitted range. If + * the value is lower than the currently configured EMC frequency, this + * will cause the frequency to be decreased so that it stays within the + * valid range. + */ + +static bool tegra186_emc_validate_rate(struct tegra186_emc *emc, + unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < emc->num_dvfs; i++) + if (rate == emc->dvfs[i].rate) + return true; + + return false; +} + +static int tegra186_emc_debug_available_rates_show(struct seq_file *s, + void *data) +{ + struct tegra186_emc *emc = s->private; + const char *prefix = ""; + unsigned int i; + + for (i = 0; i < emc->num_dvfs; i++) { + seq_printf(s, "%s%lu", prefix, emc->dvfs[i].rate); + prefix = " "; + } + + seq_puts(s, "\n"); + + return 0; +} + +static int tegra186_emc_debug_available_rates_open(struct inode *inode, + struct file *file) +{ + return single_open(file, tegra186_emc_debug_available_rates_show, + inode->i_private); +} + +static const struct file_operations tegra186_emc_debug_available_rates_fops = { + .open = tegra186_emc_debug_available_rates_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int tegra186_emc_debug_min_rate_get(void *data, u64 *rate) +{ + struct tegra186_emc *emc = data; + + *rate = emc->debugfs.min_rate; + + return 0; +} + +static int tegra186_emc_debug_min_rate_set(void *data, u64 rate) +{ + struct tegra186_emc *emc = data; + int err; + + if (!tegra186_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_min_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.min_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_min_rate_fops, + tegra186_emc_debug_min_rate_get, + tegra186_emc_debug_min_rate_set, "%llu\n"); + +static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate) +{ + struct tegra186_emc *emc = data; + + *rate = emc->debugfs.max_rate; + + return 0; +} + +static int tegra186_emc_debug_max_rate_set(void *data, u64 rate) +{ + struct tegra186_emc *emc = data; + int err; + + if (!tegra186_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_max_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.max_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_max_rate_fops, + tegra186_emc_debug_max_rate_get, + tegra186_emc_debug_max_rate_set, "%llu\n"); + +static int tegra186_emc_probe(struct platform_device *pdev) +{ + struct mrq_emc_dvfs_latency_response response; + struct tegra_bpmp_message msg; + struct tegra186_emc *emc; + unsigned int i; + int err; + + emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); + if (!emc) + return -ENOMEM; + + emc->bpmp = tegra_bpmp_get(&pdev->dev); + if (IS_ERR(emc->bpmp)) { + err = PTR_ERR(emc->bpmp); + + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get BPMP: %d\n", err); + + return err; + } + + emc->clk = devm_clk_get(&pdev->dev, "emc"); + if (IS_ERR(emc->clk)) { + err = PTR_ERR(emc->clk); + dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err); + return err; + } + + platform_set_drvdata(pdev, emc); + emc->dev = &pdev->dev; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_EMC_DVFS_LATENCY; + msg.tx.data = NULL; + msg.tx.size = 0; + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + err = tegra_bpmp_transfer(emc->bpmp, &msg); + if (err < 0) { + dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err); + return err; + } + + emc->debugfs.min_rate = ULONG_MAX; + emc->debugfs.max_rate = 0; + + emc->num_dvfs = response.num_pairs; + + emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs, + sizeof(*emc->dvfs), GFP_KERNEL); + if (!emc->dvfs) + return -ENOMEM; + + dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs); + + for (i = 0; i < emc->num_dvfs; i++) { + emc->dvfs[i].rate = response.pairs[i].freq * 1000; + emc->dvfs[i].latency = response.pairs[i].latency; + + if (emc->dvfs[i].rate < emc->debugfs.min_rate) + emc->debugfs.min_rate = emc->dvfs[i].rate; + + if (emc->dvfs[i].rate > emc->debugfs.max_rate) + emc->debugfs.max_rate = emc->dvfs[i].rate; + + dev_dbg(&pdev->dev, " %2u: %lu Hz -> %lu us\n", i, + emc->dvfs[i].rate, emc->dvfs[i].latency); + } + + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { + dev_err(&pdev->dev, + "failed to set rate range [%lu-%lu] for %pC\n", + emc->debugfs.min_rate, emc->debugfs.max_rate, + emc->clk); + return err; + } + + emc->debugfs.root = debugfs_create_dir("emc", NULL); + if (!emc->debugfs.root) { + dev_err(&pdev->dev, "failed to create debugfs directory\n"); + return 0; + } + + debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, + emc, &tegra186_emc_debug_available_rates_fops); + debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra186_emc_debug_min_rate_fops); + debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra186_emc_debug_max_rate_fops); + + return 0; +} + +static int tegra186_emc_remove(struct platform_device *pdev) +{ + struct tegra186_emc *emc = platform_get_drvdata(pdev); + + debugfs_remove_recursive(emc->debugfs.root); + tegra_bpmp_put(emc->bpmp); + + return 0; +} + +static const struct of_device_id tegra186_emc_of_match[] = { +#if defined(CONFIG_ARCH_TEGRA186_SOC) + { .compatible = "nvidia,tegra186-emc" }, +#endif +#if defined(CONFIG_ARCH_TEGRA194_SOC) + { .compatible = "nvidia,tegra194-emc" }, +#endif + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, tegra186_emc_of_match); + +static struct platform_driver tegra186_emc_driver = { + .driver = { + .name = "tegra186-emc", + .of_match_table = tegra186_emc_of_match, + .suppress_bind_attrs = true, + }, + .probe = tegra186_emc_probe, + .remove = tegra186_emc_remove, +}; +module_platform_driver(tegra186_emc_driver); + +MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Tegra186 External Memory Controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c index 441213a35930..5d53f11ca7b6 100644 --- a/drivers/memory/tegra/tegra186.c +++ b/drivers/memory/tegra/tegra186.c @@ -6,16 +6,18 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/mod_devicetable.h> +#include <linux/of_device.h> #include <linux/platform_device.h> +#if defined(CONFIG_ARCH_TEGRA_186_SOC) #include <dt-bindings/memory/tegra186-mc.h> +#endif -struct tegra_mc { - struct device *dev; - void __iomem *regs; -}; +#if defined(CONFIG_ARCH_TEGRA_194_SOC) +#include <dt-bindings/memory/tegra194-mc.h> +#endif -struct tegra_mc_client { +struct tegra186_mc_client { const char *name; unsigned int sid; struct { @@ -24,7 +26,46 @@ struct tegra_mc_client { } regs; }; -static const struct tegra_mc_client tegra186_mc_clients[] = { +struct tegra186_mc_soc { + const struct tegra186_mc_client *clients; + unsigned int num_clients; +}; + +struct tegra186_mc { + struct device *dev; + void __iomem *regs; + + const struct tegra186_mc_soc *soc; +}; + +static void tegra186_mc_program_sid(struct tegra186_mc *mc) +{ + unsigned int i; + + for (i = 0; i < mc->soc->num_clients; i++) { + const struct tegra186_mc_client *client = &mc->soc->clients[i]; + u32 override, security; + + override = readl(mc->regs + client->regs.override); + security = readl(mc->regs + client->regs.security); + + dev_dbg(mc->dev, "client %s: override: %x security: %x\n", + client->name, override, security); + + dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid, + client->name); + writel(client->sid, mc->regs + client->regs.override); + + override = readl(mc->regs + client->regs.override); + security = readl(mc->regs + client->regs.security); + + dev_dbg(mc->dev, "client %s: override: %x security: %x\n", + client->name, override, security); + } +} + +#if defined(CONFIG_ARCH_TEGRA_186_SOC) +static const struct tegra186_mc_client tegra186_mc_clients[] = { { .name = "ptcr", .sid = TEGRA186_SID_PASSTHROUGH, @@ -532,17 +573,966 @@ static const struct tegra_mc_client tegra186_mc_clients[] = { }, }; +static const struct tegra186_mc_soc tegra186_mc_soc = { + .num_clients = ARRAY_SIZE(tegra186_mc_clients), + .clients = tegra186_mc_clients, +}; +#endif + +#if defined(CONFIG_ARCH_TEGRA_194_SOC) +static const struct tegra186_mc_client tegra194_mc_clients[] = { + { + .name = "ptcr", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x000, + .security = 0x004, + }, + }, { + .name = "miu7r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x008, + .security = 0x00c, + }, + }, { + .name = "miu7w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x010, + .security = 0x014, + }, + }, { + .name = "hdar", + .sid = TEGRA194_SID_HDA, + .regs = { + .override = 0x0a8, + .security = 0x0ac, + }, + }, { + .name = "host1xdmar", + .sid = TEGRA194_SID_HOST1X, + .regs = { + .override = 0x0b0, + .security = 0x0b4, + }, + }, { + .name = "nvencsrd", + .sid = TEGRA194_SID_NVENC, + .regs = { + .override = 0x0e0, + .security = 0x0e4, + }, + }, { + .name = "satar", + .sid = TEGRA194_SID_SATA, + .regs = { + .override = 0x0f8, + .security = 0x0fc, + }, + }, { + .name = "mpcorer", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x138, + .security = 0x13c, + }, + }, { + .name = "nvencswr", + .sid = TEGRA194_SID_NVENC, + .regs = { + .override = 0x158, + .security = 0x15c, + }, + }, { + .name = "hdaw", + .sid = TEGRA194_SID_HDA, + .regs = { + .override = 0x1a8, + .security = 0x1ac, + }, + }, { + .name = "mpcorew", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x1c8, + .security = 0x1cc, + }, + }, { + .name = "sataw", + .sid = TEGRA194_SID_SATA, + .regs = { + .override = 0x1e8, + .security = 0x1ec, + }, + }, { + .name = "ispra", + .sid = TEGRA194_SID_ISP, + .regs = { + .override = 0x220, + .security = 0x224, + }, + }, { + .name = "ispfalr", + .sid = TEGRA194_SID_ISP_FALCON, + .regs = { + .override = 0x228, + .security = 0x22c, + }, + }, { + .name = "ispwa", + .sid = TEGRA194_SID_ISP, + .regs = { + .override = 0x230, + .security = 0x234, + }, + }, { + .name = "ispwb", + .sid = TEGRA194_SID_ISP, + .regs = { + .override = 0x238, + .security = 0x23c, + }, + }, { + .name = "xusb_hostr", + .sid = TEGRA194_SID_XUSB_HOST, + .regs = { + .override = 0x250, + .security = 0x254, + }, + }, { + .name = "xusb_hostw", + .sid = TEGRA194_SID_XUSB_HOST, + .regs = { + .override = 0x258, + .security = 0x25c, + }, + }, { + .name = "xusb_devr", + .sid = TEGRA194_SID_XUSB_DEV, + .regs = { + .override = 0x260, + .security = 0x264, + }, + }, { + .name = "xusb_devw", + .sid = TEGRA194_SID_XUSB_DEV, + .regs = { + .override = 0x268, + .security = 0x26c, + }, + }, { + .name = "sdmmcra", + .sid = TEGRA194_SID_SDMMC1, + .regs = { + .override = 0x300, + .security = 0x304, + }, + }, { + .name = "sdmmcr", + .sid = TEGRA194_SID_SDMMC3, + .regs = { + .override = 0x310, + .security = 0x314, + }, + }, { + .name = "sdmmcrab", + .sid = TEGRA194_SID_SDMMC4, + .regs = { + .override = 0x318, + .security = 0x31c, + }, + }, { + .name = "sdmmcwa", + .sid = TEGRA194_SID_SDMMC1, + .regs = { + .override = 0x320, + .security = 0x324, + }, + }, { + .name = "sdmmcw", + .sid = TEGRA194_SID_SDMMC3, + .regs = { + .override = 0x330, + .security = 0x334, + }, + }, { + .name = "sdmmcwab", + .sid = TEGRA194_SID_SDMMC4, + .regs = { + .override = 0x338, + .security = 0x33c, + }, + }, { + .name = "vicsrd", + .sid = TEGRA194_SID_VIC, + .regs = { + .override = 0x360, + .security = 0x364, + }, + }, { + .name = "vicswr", + .sid = TEGRA194_SID_VIC, + .regs = { + .override = 0x368, + .security = 0x36c, + }, + }, { + .name = "viw", + .sid = TEGRA194_SID_VI, + .regs = { + .override = 0x390, + .security = 0x394, + }, + }, { + .name = "nvdecsrd", + .sid = TEGRA194_SID_NVDEC, + .regs = { + .override = 0x3c0, + .security = 0x3c4, + }, + }, { + .name = "nvdecswr", + .sid = TEGRA194_SID_NVDEC, + .regs = { + .override = 0x3c8, + .security = 0x3cc, + }, + }, { + .name = "aper", + .sid = TEGRA194_SID_APE, + .regs = { + .override = 0x3c0, + .security = 0x3c4, + }, + }, { + .name = "apew", + .sid = TEGRA194_SID_APE, + .regs = { + .override = 0x3d0, + .security = 0x3d4, + }, + }, { + .name = "nvjpgsrd", + .sid = TEGRA194_SID_NVJPG, + .regs = { + .override = 0x3f0, + .security = 0x3f4, + }, + }, { + .name = "nvjpgswr", + .sid = TEGRA194_SID_NVJPG, + .regs = { + .override = 0x3f0, + .security = 0x3f4, + }, + }, { + .name = "axiapr", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x410, + .security = 0x414, + }, + }, { + .name = "axiapw", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x418, + .security = 0x41c, + }, + }, { + .name = "etrr", + .sid = TEGRA194_SID_ETR, + .regs = { + .override = 0x420, + .security = 0x424, + }, + }, { + .name = "etrw", + .sid = TEGRA194_SID_ETR, + .regs = { + .override = 0x428, + .security = 0x42c, + }, + }, { + .name = "axisr", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x460, + .security = 0x464, + }, + }, { + .name = "axisw", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x468, + .security = 0x46c, + }, + }, { + .name = "eqosr", + .sid = TEGRA194_SID_EQOS, + .regs = { + .override = 0x470, + .security = 0x474, + }, + }, { + .name = "eqosw", + .sid = TEGRA194_SID_EQOS, + .regs = { + .override = 0x478, + .security = 0x47c, + }, + }, { + .name = "ufshcr", + .sid = TEGRA194_SID_UFSHC, + .regs = { + .override = 0x480, + .security = 0x484, + }, + }, { + .name = "ufshcw", + .sid = TEGRA194_SID_UFSHC, + .regs = { + .override = 0x488, + .security = 0x48c, + }, + }, { + .name = "nvdisplayr", + .sid = TEGRA194_SID_NVDISPLAY, + .regs = { + .override = 0x490, + .security = 0x494, + }, + }, { + .name = "bpmpr", + .sid = TEGRA194_SID_BPMP, + .regs = { + .override = 0x498, + .security = 0x49c, + }, + }, { + .name = "bpmpw", + .sid = TEGRA194_SID_BPMP, + .regs = { + .override = 0x4a0, + .security = 0x4a4, + }, + }, { + .name = "bpmpdmar", + .sid = TEGRA194_SID_BPMP, + .regs = { + .override = 0x4a8, + .security = 0x4ac, + }, + }, { + .name = "bpmpdmaw", + .sid = TEGRA194_SID_BPMP, + .regs = { + .override = 0x4b0, + .security = 0x4b4, + }, + }, { + .name = "aonr", + .sid = TEGRA194_SID_AON, + .regs = { + .override = 0x4b8, + .security = 0x4bc, + }, + }, { + .name = "aonw", + .sid = TEGRA194_SID_AON, + .regs = { + .override = 0x4c0, + .security = 0x4c4, + }, + }, { + .name = "aondmar", + .sid = TEGRA194_SID_AON, + .regs = { + .override = 0x4c8, + .security = 0x4cc, + }, + }, { + .name = "aondmaw", + .sid = TEGRA194_SID_AON, + .regs = { + .override = 0x4d0, + .security = 0x4d4, + }, + }, { + .name = "scer", + .sid = TEGRA194_SID_SCE, + .regs = { + .override = 0x4d8, + .security = 0x4dc, + }, + }, { + .name = "scew", + .sid = TEGRA194_SID_SCE, + .regs = { + .override = 0x4e0, + .security = 0x4e4, + }, + }, { + .name = "scedmar", + .sid = TEGRA194_SID_SCE, + .regs = { + .override = 0x4e8, + .security = 0x4ec, + }, + }, { + .name = "scedmaw", + .sid = TEGRA194_SID_SCE, + .regs = { + .override = 0x4f0, + .security = 0x4f4, + }, + }, { + .name = "apedmar", + .sid = TEGRA194_SID_APE, + .regs = { + .override = 0x4f8, + .security = 0x4fc, + }, + }, { + .name = "apedmaw", + .sid = TEGRA194_SID_APE, + .regs = { + .override = 0x500, + .security = 0x504, + }, + }, { + .name = "nvdisplayr1", + .sid = TEGRA194_SID_NVDISPLAY, + .regs = { + .override = 0x508, + .security = 0x50c, + }, + }, { + .name = "vicsrd1", + .sid = TEGRA194_SID_VIC, + .regs = { + .override = 0x510, + .security = 0x514, + }, + }, { + .name = "nvdecsrd1", + .sid = TEGRA194_SID_NVDEC, + .regs = { + .override = 0x518, + .security = 0x51c, + }, + }, { + .name = "miu0r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x530, + .security = 0x534, + }, + }, { + .name = "miu0w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x538, + .security = 0x53c, + }, + }, { + .name = "miu1r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x540, + .security = 0x544, + }, + }, { + .name = "miu1w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x548, + .security = 0x54c, + }, + }, { + .name = "miu2r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x570, + .security = 0x574, + }, + }, { + .name = "miu2w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x578, + .security = 0x57c, + }, + }, { + .name = "miu3r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x580, + .security = 0x584, + }, + }, { + .name = "miu3w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x588, + .security = 0x58c, + }, + }, { + .name = "miu4r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x590, + .security = 0x594, + }, + }, { + .name = "miu4w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x598, + .security = 0x59c, + }, + }, { + .name = "dpmur", + .sid = TEGRA194_SID_PASSTHROUGH, + .regs = { + .override = 0x598, + .security = 0x59c, + }, + }, { + .name = "vifalr", + .sid = TEGRA194_SID_VI_FALCON, + .regs = { + .override = 0x5e0, + .security = 0x5e4, + }, + }, { + .name = "vifalw", + .sid = TEGRA194_SID_VI_FALCON, + .regs = { + .override = 0x5e8, + .security = 0x5ec, + }, + }, { + .name = "dla0rda", + .sid = TEGRA194_SID_NVDLA0, + .regs = { + .override = 0x5f0, + .security = 0x5f4, + }, + }, { + .name = "dla0falrdb", + .sid = TEGRA194_SID_NVDLA0, + .regs = { + .override = 0x5f8, + .security = 0x5fc, + }, + }, { + .name = "dla0wra", + .sid = TEGRA194_SID_NVDLA0, + .regs = { + .override = 0x600, + .security = 0x604, + }, + }, { + .name = "dla0falwrb", + .sid = TEGRA194_SID_NVDLA0, + .regs = { + .override = 0x608, + .security = 0x60c, + }, + }, { + .name = "dla1rda", + .sid = TEGRA194_SID_NVDLA1, + .regs = { + .override = 0x610, + .security = 0x614, + }, + }, { + .name = "dla1falrdb", + .sid = TEGRA194_SID_NVDLA1, + .regs = { + .override = 0x618, + .security = 0x61c, + }, + }, { + .name = "dla1wra", + .sid = TEGRA194_SID_NVDLA1, + .regs = { + .override = 0x620, + .security = 0x624, + }, + }, { + .name = "dla1falwrb", + .sid = TEGRA194_SID_NVDLA1, + .regs = { + .override = 0x628, + .security = 0x62c, + }, + }, { + .name = "pva0rda", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x630, + .security = 0x634, + }, + }, { + .name = "pva0rdb", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x638, + .security = 0x63c, + }, + }, { + .name = "pva0rdc", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x640, + .security = 0x644, + }, + }, { + .name = "pva0wra", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x648, + .security = 0x64c, + }, + }, { + .name = "pva0wrb", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x650, + .security = 0x654, + }, + }, { + .name = "pva0wrc", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x658, + .security = 0x65c, + }, + }, { + .name = "pva1rda", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x660, + .security = 0x664, + }, + }, { + .name = "pva1rdb", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x668, + .security = 0x66c, + }, + }, { + .name = "pva1rdc", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x670, + .security = 0x674, + }, + }, { + .name = "pva1wra", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x678, + .security = 0x67c, + }, + }, { + .name = "pva1wrb", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x680, + .security = 0x684, + }, + }, { + .name = "pva1wrc", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x688, + .security = 0x68c, + }, + }, { + .name = "rcer", + .sid = TEGRA194_SID_RCE, + .regs = { + .override = 0x690, + .security = 0x694, + }, + }, { + .name = "rcew", + .sid = TEGRA194_SID_RCE, + .regs = { + .override = 0x698, + .security = 0x69c, + }, + }, { + .name = "rcedmar", + .sid = TEGRA194_SID_RCE, + .regs = { + .override = 0x6a0, + .security = 0x6a4, + }, + }, { + .name = "rcedmaw", + .sid = TEGRA194_SID_RCE, + .regs = { + .override = 0x6a8, + .security = 0x6ac, + }, + }, { + .name = "nvenc1srd", + .sid = TEGRA194_SID_NVENC1, + .regs = { + .override = 0x6b0, + .security = 0x6b4, + }, + }, { + .name = "nvenc1swr", + .sid = TEGRA194_SID_NVENC1, + .regs = { + .override = 0x6b8, + .security = 0x6bc, + }, + }, { + .name = "pcie0r", + .sid = TEGRA194_SID_PCIE0, + .regs = { + .override = 0x6c0, + .security = 0x6c4, + }, + }, { + .name = "pcie0w", + .sid = TEGRA194_SID_PCIE0, + .regs = { + .override = 0x6c8, + .security = 0x6cc, + }, + }, { + .name = "pcie1r", + .sid = TEGRA194_SID_PCIE1, + .regs = { + .override = 0x6d0, + .security = 0x6d4, + }, + }, { + .name = "pcie1w", + .sid = TEGRA194_SID_PCIE1, + .regs = { + .override = 0x6d8, + .security = 0x6dc, + }, + }, { + .name = "pcie2ar", + .sid = TEGRA194_SID_PCIE2, + .regs = { + .override = 0x6e0, + .security = 0x6e4, + }, + }, { + .name = "pcie2aw", + .sid = TEGRA194_SID_PCIE2, + .regs = { + .override = 0x6e8, + .security = 0x6ec, + }, + }, { + .name = "pcie3r", + .sid = TEGRA194_SID_PCIE3, + .regs = { + .override = 0x6f0, + .security = 0x6f4, + }, + }, { + .name = "pcie3w", + .sid = TEGRA194_SID_PCIE3, + .regs = { + .override = 0x6f8, + .security = 0x6fc, + }, + }, { + .name = "pcie4r", + .sid = TEGRA194_SID_PCIE4, + .regs = { + .override = 0x700, + .security = 0x704, + }, + }, { + .name = "pcie4w", + .sid = TEGRA194_SID_PCIE4, + .regs = { + .override = 0x708, + .security = 0x70c, + }, + }, { + .name = "pcie5r", + .sid = TEGRA194_SID_PCIE5, + .regs = { + .override = 0x710, + .security = 0x714, + }, + }, { + .name = "pcie5w", + .sid = TEGRA194_SID_PCIE5, + .regs = { + .override = 0x718, + .security = 0x71c, + }, + }, { + .name = "ispfalw", + .sid = TEGRA194_SID_ISP_FALCON, + .regs = { + .override = 0x720, + .security = 0x724, + }, + }, { + .name = "dla0rda1", + .sid = TEGRA194_SID_NVDLA0, + .regs = { + .override = 0x748, + .security = 0x74c, + }, + }, { + .name = "dla1rda1", + .sid = TEGRA194_SID_NVDLA1, + .regs = { + .override = 0x750, + .security = 0x754, + }, + }, { + .name = "pva0rda1", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x758, + .security = 0x75c, + }, + }, { + .name = "pva0rdb1", + .sid = TEGRA194_SID_PVA0, + .regs = { + .override = 0x760, + .security = 0x764, + }, + }, { + .name = "pva1rda1", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x768, + .security = 0x76c, + }, + }, { + .name = "pva1rdb1", + .sid = TEGRA194_SID_PVA1, + .regs = { + .override = 0x770, + .security = 0x774, + }, + }, { + .name = "pcie5r1", + .sid = TEGRA194_SID_PCIE5, + .regs = { + .override = 0x778, + .security = 0x77c, + }, + }, { + .name = "nvencsrd1", + .sid = TEGRA194_SID_NVENC, + .regs = { + .override = 0x780, + .security = 0x784, + }, + }, { + .name = "nvenc1srd1", + .sid = TEGRA194_SID_NVENC1, + .regs = { + .override = 0x788, + .security = 0x78c, + }, + }, { + .name = "ispra1", + .sid = TEGRA194_SID_ISP, + .regs = { + .override = 0x790, + .security = 0x794, + }, + }, { + .name = "pcie0r1", + .sid = TEGRA194_SID_PCIE0, + .regs = { + .override = 0x798, + .security = 0x79c, + }, + }, { + .name = "nvdec1srd", + .sid = TEGRA194_SID_NVDEC1, + .regs = { + .override = 0x7c8, + .security = 0x7cc, + }, + }, { + .name = "nvdec1srd1", + .sid = TEGRA194_SID_NVDEC1, + .regs = { + .override = 0x7d0, + .security = 0x7d4, + }, + }, { + .name = "nvdec1swr", + .sid = TEGRA194_SID_NVDEC1, + .regs = { + .override = 0x7d8, + .security = 0x7dc, + }, + }, { + .name = "miu5r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x7e0, + .security = 0x7e4, + }, + }, { + .name = "miu5w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x7e8, + .security = 0x7ec, + }, + }, { + .name = "miu6r", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x7f0, + .security = 0x7f4, + }, + }, { + .name = "miu6w", + .sid = TEGRA194_SID_MIU, + .regs = { + .override = 0x7f8, + .security = 0x7fc, + }, + }, +}; + +static const struct tegra186_mc_soc tegra194_mc_soc = { + .num_clients = ARRAY_SIZE(tegra194_mc_clients), + .clients = tegra194_mc_clients, +}; +#endif + static int tegra186_mc_probe(struct platform_device *pdev) { + struct tegra186_mc *mc; struct resource *res; - struct tegra_mc *mc; - unsigned int i; - int err = 0; + int err; mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; + mc->soc = of_device_get_match_data(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mc->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mc->regs)) @@ -550,46 +1540,63 @@ static int tegra186_mc_probe(struct platform_device *pdev) mc->dev = &pdev->dev; - for (i = 0; i < ARRAY_SIZE(tegra186_mc_clients); i++) { - const struct tegra_mc_client *client = &tegra186_mc_clients[i]; - u32 override, security; - - override = readl(mc->regs + client->regs.override); - security = readl(mc->regs + client->regs.security); + err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (err < 0) + return err; - dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n", - client->name, override, security); - - dev_dbg(&pdev->dev, "setting SID %u for %s\n", client->sid, - client->name); - writel(client->sid, mc->regs + client->regs.override); + platform_set_drvdata(pdev, mc); + tegra186_mc_program_sid(mc); - override = readl(mc->regs + client->regs.override); - security = readl(mc->regs + client->regs.security); + return 0; +} - dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n", - client->name, override, security); - } +static int tegra186_mc_remove(struct platform_device *pdev) +{ + struct tegra186_mc *mc = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, mc); + of_platform_depopulate(mc->dev); - return err; + return 0; } static const struct of_device_id tegra186_mc_of_match[] = { - { .compatible = "nvidia,tegra186-mc", }, +#if defined(CONFIG_ARCH_TEGRA_186_SOC) + { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc }, +#endif +#if defined(CONFIG_ARCH_TEGRA_194_SOC) + { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc }, +#endif { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tegra186_mc_of_match); +static int tegra186_mc_suspend(struct device *dev) +{ + return 0; +} + +static int tegra186_mc_resume(struct device *dev) +{ + struct tegra186_mc *mc = dev_get_drvdata(dev); + + tegra186_mc_program_sid(mc); + + return 0; +} + +static const struct dev_pm_ops tegra186_mc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra186_mc_suspend, tegra186_mc_resume) +}; + static struct platform_driver tegra186_mc_driver = { .driver = { .name = "tegra186-mc", .of_match_table = tegra186_mc_of_match, + .pm = &tegra186_mc_pm_ops, .suppress_bind_attrs = true, }, - .prevent_deferred_probe = true, .probe = tegra186_mc_probe, + .remove = tegra186_mc_remove, }; module_platform_driver(tegra186_mc_driver); diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index 1b23b1c34476..8ae474d9bfb9 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -8,6 +8,7 @@ #include <linux/clk.h> #include <linux/clk/tegra.h> #include <linux/completion.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -150,6 +151,12 @@ struct tegra_emc { struct emc_timing *timings; unsigned int num_timings; + + struct { + struct dentry *root; + unsigned long min_rate; + unsigned long max_rate; + } debugfs; }; static irqreturn_t tegra_emc_isr(int irq, void *data) @@ -478,6 +485,171 @@ static long emc_round_rate(unsigned long rate, return timing->rate; } +/* + * debugfs interface + * + * The memory controller driver exposes some files in debugfs that can be used + * to control the EMC frequency. The top-level directory can be found here: + * + * /sys/kernel/debug/emc + * + * It contains the following files: + * + * - available_rates: This file contains a list of valid, space-separated + * EMC frequencies. + * + * - min_rate: Writing a value to this file sets the given frequency as the + * floor of the permitted range. If this is higher than the currently + * configured EMC frequency, this will cause the frequency to be + * increased so that it stays within the valid range. + * + * - max_rate: Similarily to the min_rate file, writing a value to this file + * sets the given frequency as the ceiling of the permitted range. If + * the value is lower than the currently configured EMC frequency, this + * will cause the frequency to be decreased so that it stays within the + * valid range. + */ + +static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < emc->num_timings; i++) + if (rate == emc->timings[i].rate) + return true; + + return false; +} + +static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) +{ + struct tegra_emc *emc = s->private; + const char *prefix = ""; + unsigned int i; + + for (i = 0; i < emc->num_timings; i++) { + seq_printf(s, "%s%lu", prefix, emc->timings[i].rate); + prefix = " "; + } + + seq_puts(s, "\n"); + + return 0; +} + +static int tegra_emc_debug_available_rates_open(struct inode *inode, + struct file *file) +{ + return single_open(file, tegra_emc_debug_available_rates_show, + inode->i_private); +} + +static const struct file_operations tegra_emc_debug_available_rates_fops = { + .open = tegra_emc_debug_available_rates_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.min_rate; + + return 0; +} + +static int tegra_emc_debug_min_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_min_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.min_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops, + tegra_emc_debug_min_rate_get, + tegra_emc_debug_min_rate_set, "%llu\n"); + +static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.max_rate; + + return 0; +} + +static int tegra_emc_debug_max_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_max_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.max_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops, + tegra_emc_debug_max_rate_get, + tegra_emc_debug_max_rate_set, "%llu\n"); + +static void tegra_emc_debugfs_init(struct tegra_emc *emc) +{ + struct device *dev = emc->dev; + unsigned int i; + int err; + + emc->debugfs.min_rate = ULONG_MAX; + emc->debugfs.max_rate = 0; + + for (i = 0; i < emc->num_timings; i++) { + if (emc->timings[i].rate < emc->debugfs.min_rate) + emc->debugfs.min_rate = emc->timings[i].rate; + + if (emc->timings[i].rate > emc->debugfs.max_rate) + emc->debugfs.max_rate = emc->timings[i].rate; + } + + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { + dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n", + emc->debugfs.min_rate, emc->debugfs.max_rate, + emc->clk); + } + + emc->debugfs.root = debugfs_create_dir("emc", NULL); + if (!emc->debugfs.root) { + dev_err(emc->dev, "failed to create debugfs directory\n"); + return; + } + + debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, + emc, &tegra_emc_debug_available_rates_fops); + debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_min_rate_fops); + debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_max_rate_fops); +} + static int tegra_emc_probe(struct platform_device *pdev) { struct device_node *np; @@ -550,6 +722,9 @@ static int tegra_emc_probe(struct platform_device *pdev) goto unset_cb; } + platform_set_drvdata(pdev, emc); + tegra_emc_debugfs_init(emc); + return 0; unset_cb: diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c index b420268173fc..cc0482434c75 100644 --- a/drivers/memory/tegra/tegra210.c +++ b/drivers/memory/tegra/tegra210.c @@ -436,7 +436,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = { .reg = 0x37c, .shift = 0, .mask = 0xff, - .def = 0x39, + .def = 0x7a, }, }, { .id = 0x4b, diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c index 0b6a5e451ea3..e3efd9529506 100644 --- a/drivers/memory/tegra/tegra30-emc.c +++ b/drivers/memory/tegra/tegra30-emc.c @@ -12,6 +12,7 @@ #include <linux/clk.h> #include <linux/clk/tegra.h> #include <linux/completion.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -331,7 +332,9 @@ struct tegra_emc { struct clk *clk; void __iomem *regs; unsigned int irq; + bool bad_state; + struct emc_timing *new_timing; struct emc_timing *timings; unsigned int num_timings; @@ -345,10 +348,74 @@ struct tegra_emc { bool vref_cal_toggle : 1; bool zcal_long : 1; bool dll_on : 1; - bool prepared : 1; - bool bad_state : 1; + + struct { + struct dentry *root; + unsigned long min_rate; + unsigned long max_rate; + } debugfs; }; +static int emc_seq_update_timing(struct tegra_emc *emc) +{ + u32 val; + int err; + + writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL); + + err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val, + !(val & EMC_STATUS_TIMING_UPDATE_STALLED), + 1, 200); + if (err) { + dev_err(emc->dev, "failed to update timing: %d\n", err); + return err; + } + + return 0; +} + +static void emc_complete_clk_change(struct tegra_emc *emc) +{ + struct emc_timing *timing = emc->new_timing; + unsigned int dram_num; + bool failed = false; + int err; + + /* re-enable auto-refresh */ + dram_num = tegra_mc_get_emem_device_count(emc->mc); + writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num), + emc->regs + EMC_REFCTRL); + + /* restore auto-calibration */ + if (emc->vref_cal_toggle) + writel_relaxed(timing->emc_auto_cal_interval, + emc->regs + EMC_AUTO_CAL_INTERVAL); + + /* restore dynamic self-refresh */ + if (timing->emc_cfg_dyn_self_ref) { + emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE; + writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); + } + + /* set number of clocks to wait after each ZQ command */ + if (emc->zcal_long) + writel_relaxed(timing->emc_zcal_cnt_long, + emc->regs + EMC_ZCAL_WAIT_CNT); + + /* wait for writes to settle */ + udelay(2); + + /* update restored timing */ + err = emc_seq_update_timing(emc); + if (err) + failed = true; + + /* restore early ACK */ + mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE); + + WRITE_ONCE(emc->bad_state, failed); +} + static irqreturn_t tegra_emc_isr(int irq, void *data) { struct tegra_emc *emc = data; @@ -359,10 +426,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data) if (!status) return IRQ_NONE; - /* notify about EMC-CAR handshake completion */ - if (status & EMC_CLKCHANGE_COMPLETE_INT) - complete(&emc->clk_handshake_complete); - /* notify about HW problem */ if (status & EMC_REFRESH_OVERFLOW_INT) dev_err_ratelimited(emc->dev, @@ -371,6 +434,18 @@ static irqreturn_t tegra_emc_isr(int irq, void *data) /* clear interrupts */ writel_relaxed(status, emc->regs + EMC_INTSTATUS); + /* notify about EMC-CAR handshake completion */ + if (status & EMC_CLKCHANGE_COMPLETE_INT) { + if (completion_done(&emc->clk_handshake_complete)) { + dev_err_ratelimited(emc->dev, + "bogus handshake interrupt\n"); + return IRQ_NONE; + } + + emc_complete_clk_change(emc); + complete(&emc->clk_handshake_complete); + } + return IRQ_HANDLED; } @@ -438,24 +513,6 @@ static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing, return preset; } -static int emc_seq_update_timing(struct tegra_emc *emc) -{ - u32 val; - int err; - - writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL); - - err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val, - !(val & EMC_STATUS_TIMING_UPDATE_STALLED), - 1, 200); - if (err) { - dev_err(emc->dev, "failed to update timing: %d\n", err); - return err; - } - - return 0; -} - static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate) { struct tegra_mc *mc = emc->mc; @@ -582,8 +639,7 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300); if (err) { dev_err(emc->dev, - "failed to disable auto-cal: %d\n", - err); + "auto-cal finish timeout: %d\n", err); return err; } @@ -621,9 +677,6 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT); } - /* disable interrupt since read access is prohibited after stalling */ - disable_irq(emc->irq); - /* this read also completes the writes */ val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL); @@ -739,20 +792,18 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) emc->regs + EMC_ZQ_CAL); } - /* re-enable auto-refresh */ - writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num), - emc->regs + EMC_REFCTRL); - /* flow control marker 3 */ writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE); - reinit_completion(&emc->clk_handshake_complete); + /* + * Read and discard an arbitrary MC register (Note: EMC registers + * can't be used) to ensure the register writes are completed. + */ + mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE); - /* interrupt can be re-enabled now */ - enable_irq(emc->irq); + reinit_completion(&emc->clk_handshake_complete); - emc->bad_state = false; - emc->prepared = true; + emc->new_timing = timing; return 0; } @@ -760,52 +811,25 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) static int emc_complete_timing_change(struct tegra_emc *emc, unsigned long rate) { - struct emc_timing *timing = emc_find_timing(emc, rate); unsigned long timeout; - int ret; timeout = wait_for_completion_timeout(&emc->clk_handshake_complete, msecs_to_jiffies(100)); if (timeout == 0) { dev_err(emc->dev, "emc-car handshake failed\n"); - emc->bad_state = true; return -EIO; } - /* restore auto-calibration */ - if (emc->vref_cal_toggle) - writel_relaxed(timing->emc_auto_cal_interval, - emc->regs + EMC_AUTO_CAL_INTERVAL); - - /* restore dynamic self-refresh */ - if (timing->emc_cfg_dyn_self_ref) { - emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE; - writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); - } - - /* set number of clocks to wait after each ZQ command */ - if (emc->zcal_long) - writel_relaxed(timing->emc_zcal_cnt_long, - emc->regs + EMC_ZCAL_WAIT_CNT); - - udelay(2); - /* update restored timing */ - ret = emc_seq_update_timing(emc); - if (ret) - emc->bad_state = true; - - /* restore early ACK */ - mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE); - - emc->prepared = false; + if (READ_ONCE(emc->bad_state)) + return -EIO; - return ret; + return 0; } static int emc_unprepare_timing_change(struct tegra_emc *emc, unsigned long rate) { - if (emc->prepared && !emc->bad_state) { + if (!emc->bad_state) { /* shouldn't ever happen in practice */ dev_err(emc->dev, "timing configuration can't be reverted\n"); emc->bad_state = true; @@ -823,7 +847,13 @@ static int emc_clk_change_notify(struct notifier_block *nb, switch (msg) { case PRE_RATE_CHANGE: + /* + * Disable interrupt since read accesses are prohibited after + * stalling. + */ + disable_irq(emc->irq); err = emc_prepare_timing_change(emc, cnd->new_rate); + enable_irq(emc->irq); break; case ABORT_RATE_CHANGE: @@ -1083,6 +1113,171 @@ static long emc_round_rate(unsigned long rate, return timing->rate; } +/* + * debugfs interface + * + * The memory controller driver exposes some files in debugfs that can be used + * to control the EMC frequency. The top-level directory can be found here: + * + * /sys/kernel/debug/emc + * + * It contains the following files: + * + * - available_rates: This file contains a list of valid, space-separated + * EMC frequencies. + * + * - min_rate: Writing a value to this file sets the given frequency as the + * floor of the permitted range. If this is higher than the currently + * configured EMC frequency, this will cause the frequency to be + * increased so that it stays within the valid range. + * + * - max_rate: Similarily to the min_rate file, writing a value to this file + * sets the given frequency as the ceiling of the permitted range. If + * the value is lower than the currently configured EMC frequency, this + * will cause the frequency to be decreased so that it stays within the + * valid range. + */ + +static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < emc->num_timings; i++) + if (rate == emc->timings[i].rate) + return true; + + return false; +} + +static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) +{ + struct tegra_emc *emc = s->private; + const char *prefix = ""; + unsigned int i; + + for (i = 0; i < emc->num_timings; i++) { + seq_printf(s, "%s%lu", prefix, emc->timings[i].rate); + prefix = " "; + } + + seq_puts(s, "\n"); + + return 0; +} + +static int tegra_emc_debug_available_rates_open(struct inode *inode, + struct file *file) +{ + return single_open(file, tegra_emc_debug_available_rates_show, + inode->i_private); +} + +static const struct file_operations tegra_emc_debug_available_rates_fops = { + .open = tegra_emc_debug_available_rates_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.min_rate; + + return 0; +} + +static int tegra_emc_debug_min_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_min_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.min_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops, + tegra_emc_debug_min_rate_get, + tegra_emc_debug_min_rate_set, "%llu\n"); + +static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) +{ + struct tegra_emc *emc = data; + + *rate = emc->debugfs.max_rate; + + return 0; +} + +static int tegra_emc_debug_max_rate_set(void *data, u64 rate) +{ + struct tegra_emc *emc = data; + int err; + + if (!tegra_emc_validate_rate(emc, rate)) + return -EINVAL; + + err = clk_set_max_rate(emc->clk, rate); + if (err < 0) + return err; + + emc->debugfs.max_rate = rate; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops, + tegra_emc_debug_max_rate_get, + tegra_emc_debug_max_rate_set, "%llu\n"); + +static void tegra_emc_debugfs_init(struct tegra_emc *emc) +{ + struct device *dev = emc->dev; + unsigned int i; + int err; + + emc->debugfs.min_rate = ULONG_MAX; + emc->debugfs.max_rate = 0; + + for (i = 0; i < emc->num_timings; i++) { + if (emc->timings[i].rate < emc->debugfs.min_rate) + emc->debugfs.min_rate = emc->timings[i].rate; + + if (emc->timings[i].rate > emc->debugfs.max_rate) + emc->debugfs.max_rate = emc->timings[i].rate; + } + + err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, + emc->debugfs.max_rate); + if (err < 0) { + dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n", + emc->debugfs.min_rate, emc->debugfs.max_rate, + emc->clk); + } + + emc->debugfs.root = debugfs_create_dir("emc", NULL); + if (!emc->debugfs.root) { + dev_err(emc->dev, "failed to create debugfs directory\n"); + return; + } + + debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, + emc, &tegra_emc_debug_available_rates_fops); + debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_min_rate_fops); + debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root, + emc, &tegra_emc_debug_max_rate_fops); +} + static int tegra_emc_probe(struct platform_device *pdev) { struct platform_device *mc; @@ -1169,6 +1364,7 @@ static int tegra_emc_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, emc); + tegra_emc_debugfs_init(emc); return 0; @@ -1181,13 +1377,17 @@ unset_cb: static int tegra_emc_suspend(struct device *dev) { struct tegra_emc *emc = dev_get_drvdata(dev); + int err; - /* - * Suspending in a bad state will hang machine. The "prepared" var - * shall be always false here unless it's a kernel bug that caused - * suspending in a wrong order. - */ - if (WARN_ON(emc->prepared) || emc->bad_state) + /* take exclusive control over the clock's rate */ + err = clk_rate_exclusive_get(emc->clk); + if (err) { + dev_err(emc->dev, "failed to acquire clk: %d\n", err); + return err; + } + + /* suspending in a bad state will hang machine */ + if (WARN(emc->bad_state, "hardware in a bad state\n")) return -EINVAL; emc->bad_state = true; @@ -1202,6 +1402,8 @@ static int tegra_emc_resume(struct device *dev) emc_setup_hw(emc); emc->bad_state = false; + clk_rate_exclusive_put(emc->clk); + return 0; } diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 0681d5fdd538..031eb64549af 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later /* * at24.c - handle most I2C EEPROMs * @@ -6,23 +6,23 @@ * Copyright (C) 2008 Wolfram Sang, Pengutronix */ -#include <linux/kernel.h> +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/i2c.h> #include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> -#include <linux/slab.h> -#include <linux/delay.h> #include <linux/mutex.h> -#include <linux/mod_devicetable.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> -#include <linux/property.h> -#include <linux/acpi.h> -#include <linux/i2c.h> #include <linux/nvmem-provider.h> -#include <linux/regmap.h> +#include <linux/of_device.h> #include <linux/pm_runtime.h> -#include <linux/gpio/consumer.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> /* Address pointer is 16 bit. */ #define AT24_FLAG_ADDR16 BIT(7) @@ -88,8 +88,7 @@ struct at24_data { u8 flags; struct nvmem_device *nvmem; - - struct gpio_desc *wp_gpio; + struct regulator *vcc_reg; /* * Some chips tie up multiple I2C addresses; dummy devices reserve @@ -457,12 +456,10 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) * from this host, but not from other I2C masters. */ mutex_lock(&at24->lock); - gpiod_set_value_cansleep(at24->wp_gpio, 0); while (count) { ret = at24_regmap_write(at24, buf, off, count); if (ret < 0) { - gpiod_set_value_cansleep(at24->wp_gpio, 1); mutex_unlock(&at24->lock); pm_runtime_put(dev); return ret; @@ -472,7 +469,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) count -= ret; } - gpiod_set_value_cansleep(at24->wp_gpio, 1); mutex_unlock(&at24->lock); pm_runtime_put(dev); @@ -662,9 +658,9 @@ static int at24_probe(struct i2c_client *client) at24->client[0].client = client; at24->client[0].regmap = regmap; - at24->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_HIGH); - if (IS_ERR(at24->wp_gpio)) - return PTR_ERR(at24->wp_gpio); + at24->vcc_reg = devm_regulator_get(dev, "vcc"); + if (IS_ERR(at24->vcc_reg)) + return PTR_ERR(at24->vcc_reg); writable = !(flags & AT24_FLAG_READONLY); if (writable) { @@ -701,6 +697,12 @@ static int at24_probe(struct i2c_client *client) i2c_set_clientdata(client, at24); + err = regulator_enable(at24->vcc_reg); + if (err) { + dev_err(dev, "Failed to enable vcc regulator\n"); + return err; + } + /* enable runtime pm */ pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -713,6 +715,7 @@ static int at24_probe(struct i2c_client *client) pm_runtime_idle(dev); if (err) { pm_runtime_disable(dev); + regulator_disable(at24->vcc_reg); return -ENODEV; } @@ -728,15 +731,42 @@ static int at24_probe(struct i2c_client *client) static int at24_remove(struct i2c_client *client) { + struct at24_data *at24 = i2c_get_clientdata(client); + pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + regulator_disable(at24->vcc_reg); pm_runtime_set_suspended(&client->dev); return 0; } +static int __maybe_unused at24_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct at24_data *at24 = i2c_get_clientdata(client); + + return regulator_disable(at24->vcc_reg); +} + +static int __maybe_unused at24_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct at24_data *at24 = i2c_get_clientdata(client); + + return regulator_enable(at24->vcc_reg); +} + +static const struct dev_pm_ops at24_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(at24_suspend, at24_resume, NULL) +}; + static struct i2c_driver at24_driver = { .driver = { .name = "at24", + .pm = &at24_pm_ops, .of_match_table = at24_of_match, .acpi_match_table = ACPI_PTR(at24_acpi_ids), }, diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 4f2e6910c623..1cc2cd894f87 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) bool do_tx_balance = true; u32 hash_index = 0; const u8 *hash_start = NULL; - struct ipv6hdr *ip6hdr; skb_reset_mac_header(skb); eth_data = eth_hdr(skb); switch (ntohs(skb->protocol)) { case ETH_P_IP: { - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; if (is_broadcast_ether_addr(eth_data->h_dest) || - iph->daddr == ip_bcast || - iph->protocol == IPPROTO_IGMP) { + !pskb_network_may_pull(skb, sizeof(*iph))) { + do_tx_balance = false; + break; + } + iph = ip_hdr(skb); + if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } hash_start = (char *)&(iph->daddr); hash_size = sizeof(iph->daddr); - } break; - case ETH_P_IPV6: + } + case ETH_P_IPV6: { + const struct ipv6hdr *ip6hdr; + /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ @@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - /* Additianally, DAD probes should not be tx-balanced as that + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { + do_tx_balance = false; + break; + } + /* Additionally, DAD probes should not be tx-balanced as that * will lead to false positives for duplicate addresses and * prevent address configuration from working. */ @@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - hash_start = (char *)&(ipv6_hdr(skb)->daddr); - hash_size = sizeof(ipv6_hdr(skb)->daddr); + hash_start = (char *)&ip6hdr->daddr; + hash_size = sizeof(ip6hdr->daddr); break; - case ETH_P_IPX: - if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { + } + case ETH_P_IPX: { + const struct ipxhdr *ipxhdr; + + if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { + do_tx_balance = false; + break; + } + ipxhdr = (struct ipxhdr *)skb_network_header(skb); + + if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ do_tx_balance = false; break; } - if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { + if (ipxhdr->ipx_type != IPX_TYPE_NCP) { /* The only protocol worth balancing in * this family since it has an "ARP" like * mechanism @@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } + eth_data = eth_hdr(skb); hash_start = (char *)eth_data->h_dest; hash_size = ETH_ALEN; break; + } case ETH_P_ARP: do_tx_balance = false; if (bond_info->rlb_enabled) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 060497512159..449a22172e07 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false, ds->vlan_filtering); + b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3e8635311d0d..d1955543acd1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); + reg |= (MII_SW_OR | LINK_STS); + if (priv->type == BCM7278_DEVICE_ID) + reg |= GMII_SPEED_UP_2G; core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index c5f64959a184..1142768969c2 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = { module_spi_driver(ksz9477_spi_driver); +MODULE_ALIAS("spi:ksz9477"); +MODULE_ALIAS("spi:ksz9897"); +MODULE_ALIAS("spi:ksz9893"); +MODULE_ALIAS("spi:ksz9563"); +MODULE_ALIAS("spi:ksz8563"); +MODULE_ALIAS("spi:ksz9567"); MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f07ac0e0af59..e0611cba87f9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) umac_reset(priv); + /* Disable the UniMAC RX/TX */ + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); + /* We may have been suspended and never received a WOL event that * would turn off MPD detection, take care of that now */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 7a2fe63d1136..4508f0d150da 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt { /* Max length of transmit frame must be a multiple of 8 bytes */ #define MACB_TX_LEN_ALIGN 8 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) -#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) +/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a + * false amba_error in TX path from the DMA assuming there is not enough + * space in the SRAM (16KB) even when there is. + */ +#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO @@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb, /* Validate LSO compatibility */ - /* there is only one buffer */ - if (!skb_is_nonlinear(skb)) + /* there is only one buffer or protocol is not UDP */ + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) return features; /* length of header */ hdrlen = skb_transport_offset(skb); - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - hdrlen += tcp_hdrlen(skb); - /* For LSO: + /* For UFO only: * When software supplies two or more payload buffers all payload buffers * apart from the last must be a multiple of 8 bytes in size. */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index c4f6ec0cd183..17a4110c2e49 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type) if (lmac_type == BGX_MODE_QSGMII) return PHY_INTERFACE_MODE_QSGMII; if (lmac_type == BGX_MODE_RGMII) - return PHY_INTERFACE_MODE_RGMII; + return PHY_INTERFACE_MODE_RGMII_RXID; return PHY_INTERFACE_MODE_SGMII; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 9d1f2f88b945..de30d61af065 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v) atomic_read(&adap->chcr_stats.fallback)); seq_printf(seq, "IPSec PDU: %10u\n", atomic_read(&adap->chcr_stats.ipsec_cnt)); + seq_printf(seq, "TLS PDU Tx: %10u\n", + atomic_read(&adap->chcr_stats.tls_pdu_tx)); + seq_printf(seq, "TLS PDU Rx: %10u\n", + atomic_read(&adap->chcr_stats.tls_pdu_rx)); + seq_printf(seq, "TLS Keys (DDR) Count: %10u\n", + atomic_read(&adap->chcr_stats.tls_key)); + return 0; } DEFINE_SHOW_ATTRIBUTE(chcr_stats); diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index d305d1b24b0a..42b798a3fad4 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -417,7 +417,10 @@ static void de_rx (struct de_private *de) if (status & DescOwn) break; - len = ((status >> 16) & 0x7ff) - 4; + /* the length is actually a 15 bit value here according + * to Table 4-1 in the DE2104x spec so mask is 0x7fff + */ + len = ((status >> 16) & 0x7fff) - 4; mapping = de->rx_skb[rx_tail].mapping; if (unlikely(drop)) { diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 6a7e8993119f..2bd7ace0a953 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -74,7 +74,7 @@ config FSL_XGMAC_MDIO config UCC_GETH tristate "Freescale QE Gigabit Ethernet" - depends on QUICC_ENGINE + depends on QUICC_ENGINE && PPC32 select FSL_PQ_MDIO select PHYLIB ---help--- diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 09dbcd819d84..fd93d542f497 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev) mac_dev->adjust_link(mac_dev); } +/* The Aquantia PHYs are capable of performing rate adaptation */ +#define PHY_VEND_AQUANTIA 0x03a1b400 + static int dpaa_phy_init(struct net_device *net_dev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; @@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev) return -ENODEV; } - /* Remove any features not supported by the controller */ - ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); - linkmode_and(phy_dev->supported, phy_dev->supported, mask); + /* Unless the PHY is capable of rate adaptation */ + if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || + ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { + /* remove any features not supported by the controller */ + ethtool_convert_legacy_u32_to_link_mode(mask, + mac_dev->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, mask); + } phy_support_asym_pause(phy_dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 42058fad6a3c..0b7d29192b2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) struct i40e_ring *ring; if (test_bit(__I40E_CONFIG_BUSY, pf->state)) - return -ENETDOWN; + return -EAGAIN; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 037e054b01a2..98017e7d5dd0 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -401,6 +401,8 @@ struct mvneta_pcpu_stats { struct u64_stats_sync syncp; u64 rx_packets; u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; u64 tx_packets; u64 tx_bytes; }; @@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev, struct mvneta_pcpu_stats *cpu_stats; u64 rx_packets; u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; u64 tx_packets; u64 tx_bytes; @@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev, start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; + rx_dropped = cpu_stats->rx_dropped; + rx_errors = cpu_stats->rx_errors; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; + stats->rx_dropped += rx_dropped; + stats->rx_errors += rx_errors; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } - stats->rx_errors = dev->stats.rx_errors; - stats->rx_dropped = dev->stats.rx_dropped; - stats->tx_dropped = dev->stats.tx_dropped; } @@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, static void mvneta_rx_error(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); u32 status = rx_desc->status; + /* update per-cpu counter */ + u64_stats_update_begin(&stats->syncp); + stats->rx_errors++; + u64_stats_update_end(&stats->syncp); + switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", @@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); if (unlikely(!rxq->skb)) { - netdev_err(dev, - "Can't allocate skb on queue %d\n", - rxq->id); - dev->stats.rx_dropped++; + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); rxq->skb_alloc_err++; + + u64_stats_update_begin(&stats->syncp); + stats->rx_dropped++; + u64_stats_update_end(&stats->syncp); + return -ENOMEM; } page_pool_release_page(rxq->page_pool, page); @@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); - dev->stats.rx_errors++; /* leave the descriptor untouched */ continue; } @@ -2372,7 +2386,6 @@ err_drop_frame_ret_pool: mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, rx_desc->buf_phys_addr); err_drop_frame: - dev->stats.rx_errors++; mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index d787bc0a4155..e09bc3858d57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { - if (!MLX5_CAP_GEN(mdev, tls)) + if (!MLX5_CAP_GEN(mdev, tls_tx)) return false; if (!MLX5_CAP_GEN(mdev, log_max_dek)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 71384ad1a443..ef1ed15a53b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, int datalen; u32 skb_seq; - if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { + if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9e9960146e5b..1c3ab69cbd96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { - netdev_WARN_ONCE(cq->channel->netdev, - "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - queue_work(cq->channel->priv->wq, &sq->recover_work); - break; - } do { struct mlx5e_sq_wqe_info *wi; u16 ci; @@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.ico_wqe[ci]; + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", + get_cqe_opcode(cqe)); + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + queue_work(cq->channel->priv->wq, &sq->recover_work); + break; + } + if (likely(wi->opcode == MLX5_OPCODE_UMR)) { sqcc += MLX5E_UMR_WQEBBS; wi->umr.rq->mpwqe.umr_completed++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2565ba8692d9..ee60383adc5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) i = 0; do { + struct mlx5e_tx_wqe_info *wi; u16 wqe_counter; bool last_wqe; + u16 ci; mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); - if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, - &sq->state)) { - struct mlx5e_tx_wqe_info *wi; - u16 ci; - - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - wi = &sq->db.wqe_info[ci]; - mlx5e_dump_error_cqe(sq, - (struct mlx5_err_cqe *)cqe); - mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); - queue_work(cq->channel->priv->wq, - &sq->recover_work); - } - stats->cqe_err++; - } - do { - struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; - u16 ci; int j; last_wqe = (sqcc == wqe_counter); @@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) napi_consume_skb(skb, napi_budget); } while (!last_wqe); + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, + &sq->state)) { + mlx5e_dump_error_cqe(sq, + (struct mlx5_err_cqe *)cqe); + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); + queue_work(cq->channel->priv->wq, + &sq->recover_work); + } + stats->cqe_err++; + } + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); stats->cqes += i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index e4ec0e03c289..4c61d25d2e88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context) mutex_lock(&fpga_xfrm->lock); if (!--fpga_xfrm->num_rules) { mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); + kfree(fpga_xfrm->sa_ctx); fpga_xfrm->sa_ctx = NULL; } mutex_unlock(&fpga_xfrm->lock); @@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) return 0; - if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c7a16ae05fa8..9dc24241dc91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1582,16 +1582,16 @@ struct match_list_head { struct match_list first; }; -static void free_match_list(struct match_list_head *head) +static void free_match_list(struct match_list_head *head, bool ft_locked) { if (!list_empty(&head->list)) { struct match_list *iter, *match_tmp; list_del(&head->first.list); - tree_put_node(&head->first.g->node, false); + tree_put_node(&head->first.g->node, ft_locked); list_for_each_entry_safe(iter, match_tmp, &head->list, list) { - tree_put_node(&iter->g->node, false); + tree_put_node(&iter->g->node, ft_locked); list_del(&iter->list); kfree(iter); } @@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head) static int build_match_list(struct match_list_head *match_head, struct mlx5_flow_table *ft, - const struct mlx5_flow_spec *spec) + const struct mlx5_flow_spec *spec, + bool ft_locked) { struct rhlist_head *tmp, *list; struct mlx5_flow_group *g; @@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { - free_match_list(match_head); + free_match_list(match_head, ft_locked); err = -ENOMEM; goto out; } @@ -1805,7 +1806,7 @@ search_again_locked: version = atomic_read(&ft->node.version); /* Collect all fgs which has a matching match_criteria */ - err = build_match_list(&match_head, ft, spec); + err = build_match_list(&match_head, ft, spec, take_write); if (err) { if (take_write) up_write_ref_node(&ft->node, false); @@ -1819,7 +1820,7 @@ search_again_locked: rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, dest_num, version); - free_match_list(&match_head); + free_match_list(&match_head, take_write); if (!IS_ERR(rule) || (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { if (take_write) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index d89ff1d09119..909a7f284614 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (MLX5_CAP_GEN(dev, tls)) { + if (MLX5_CAP_GEN(dev, tls_tx)) { err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 9bf8da5f6daf..3fe878d7c94c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) { + enum mlxsw_reg_mgpir_device_type device_type; int index, max_index, sensor_index; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; char mtmp_pl[MLXSW_REG_MTMP_LEN]; @@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL); - if (!gbox_num) + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || + !gbox_num) return 0; index = mlxsw_hwmon->module_sensor_max; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index c721b171bd8d..ce0a6837daa3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -895,8 +895,10 @@ static int mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, struct mlxsw_thermal *thermal) { + enum mlxsw_reg_mgpir_device_type device_type; struct mlxsw_thermal_module *gearbox_tz; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; + u8 gbox_num; int i; int err; @@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL, + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); - if (!thermal->tz_gearbox_num) + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || + !gbox_num) return 0; + thermal->tz_gearbox_num = gbox_num; thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, sizeof(*thermal->tz_gearbox_arr), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 49933818c6f5..2dc0978428e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled, start_again: err = devlink_dpipe_entry_ctx_prepare(dump_ctx); if (err) - return err; + goto err_ctx_prepare; j = 0; for (; i < rif_count; i++) { struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); @@ -247,6 +247,7 @@ start_again: return 0; err_entry_append: err_entry_get: +err_ctx_prepare: rtnl_unlock(); devlink_dpipe_entry_clear(&entry); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ce707723f8cf..4a77b511ead2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4844,6 +4844,23 @@ mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, fib_node->fib_entry = NULL; } +static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry) +{ + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *fib4_replaced; + + if (!fib_node->fib_entry) + return true; + + fib4_replaced = container_of(fib_node->fib_entry, + struct mlxsw_sp_fib4_entry, common); + if (fib4_entry->tb_id == RT_TABLE_MAIN && + fib4_replaced->tb_id == RT_TABLE_LOCAL) + return false; + + return true; +} + static int mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) @@ -4872,6 +4889,12 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, goto err_fib4_entry_create; } + if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) { + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return 0; + } + replaced = fib_node->fib_entry; err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common); if (err) { @@ -4908,7 +4931,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, return; fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); - if (WARN_ON(!fib4_entry)) + if (!fib4_entry) return; fib_node = fib4_entry->common.fib_node; @@ -4970,6 +4993,9 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt) static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) { + struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; + + fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); kfree(mlxsw_sp_rt6); } @@ -5408,6 +5434,27 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, return NULL; } +static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + struct mlxsw_sp_fib6_entry *fib6_replaced; + struct fib6_info *rt, *rt_replaced; + + if (!fib_node->fib_entry) + return true; + + fib6_replaced = container_of(fib_node->fib_entry, + struct mlxsw_sp_fib6_entry, + common); + rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced); + if (rt->fib6_table->tb6_id == RT_TABLE_MAIN && + rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL) + return false; + + return true; +} + static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, struct fib6_info **rt_arr, unsigned int nrt6) @@ -5442,6 +5489,12 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, goto err_fib6_entry_create; } + if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) { + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return 0; + } + replaced = fib_node->fib_entry; err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common); if (err) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 0dacf2c18c09..3e613058e225 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -44,8 +44,8 @@ /* Add/subtract the Adjustment_Value when making a Drift adjustment */ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) -/* Param mask for Hardware to detect/timestamp the unicast PTP packets */ -#define QED_PTP_UCAST_PARAM_MASK 0xF +/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ +#define QED_PTP_UCAST_PARAM_MASK 0x70F static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index aaa316be6183..a2168a14794c 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2477,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_12: case RTL_GIGA_MAC_VER_17: + pcie_set_readrq(tp->pci_dev, 512); r8168b_1_hw_jumbo_enable(tp); break; case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + pcie_set_readrq(tp->pci_dev, 512); r8168c_hw_jumbo_enable(tp); break; case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: r8168dp_hw_jumbo_enable(tp); break; case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + pcie_set_readrq(tp->pci_dev, 512); r8168e_hw_jumbo_enable(tp); break; default: @@ -2515,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) break; } rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, 4096); } static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu) diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index e61eb891c0f7..db6b2988e632 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -823,7 +823,6 @@ static int ioc3_close(struct net_device *dev) netif_stop_queue(dev); ioc3_stop(ip); - free_irq(dev->irq, dev); ioc3_free_rx_bufs(ip); ioc3_clean_tx_ring(ip); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 7ec895407d23..e0a5fe83d8e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos) dll_lock = rgmii_readl(ethqos, SDC4_STATUS); if (dll_lock & SDC4_STATUS_DLL_LOCK) break; + retry--; } while (retry > 0); if (!retry) dev_err(ðqos->pdev->dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index f0c0ea616032..dc09d2131e40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, value |= GMAC_PACKET_FILTER_PM; /* Set all the bits of the HASH tab */ memset(mc_filter, 0xff, sizeof(mc_filter)); - } else if (!netdev_mc_empty(dev)) { + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { struct netdev_hw_addr *ha; /* Hash filter for multicast */ @@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, __le16 perfect_match, bool is_double) { void __iomem *ioaddr = hw->pcsr; + u32 value; writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); + value = readl(ioaddr + GMAC_VLAN_TAG); + if (hash) { - u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV; + value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; if (is_double) { value |= GMAC_VLAN_EDVLP; value |= GMAC_VLAN_ESVL; @@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); } else { - u32 value = readl(ioaddr + GMAC_VLAN_TAG); - value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); value &= ~GMAC_VLAN_DOVLTC; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 2af3ac5409b7..67b754a56288 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw, for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); - } else if (!netdev_mc_empty(dev)) { + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { struct netdev_hw_addr *ha; value |= XGMAC_FILTER_HMC; @@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, writel(value, ioaddr + XGMAC_PACKET_FILTER); - value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; if (is_double) { value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; @@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, writel(value, ioaddr + XGMAC_PACKET_FILTER); - value = XGMAC_VLAN_ETV; + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value |= XGMAC_VLAN_ETV; if (is_double) { value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 623521052152..fe2c9fa6a71c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -95,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev, plat->bus_id = 1; plat->phy_addr = 0; - plat->interface = PHY_INTERFACE_MODE_GMII; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; plat->dma_cfg->pbl = 32; plat->dma_cfg->pblx8 = true; @@ -217,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev, { plat->bus_id = 1; plat->phy_addr = 0; - plat->interface = PHY_INTERFACE_MODE_SGMII; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + return ehl_common_data(pdev, plat); } @@ -230,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev, { plat->bus_id = 1; plat->phy_addr = 0; - plat->interface = PHY_INTERFACE_MODE_RGMII; + plat->phy_interface = PHY_INTERFACE_MODE_RGMII; + return ehl_common_data(pdev, plat); } @@ -258,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev, { plat->bus_id = 1; plat->phy_addr = 0; - plat->interface = PHY_INTERFACE_MODE_SGMII; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; return tgl_common_data(pdev, plat); } @@ -358,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev, plat->bus_id = pci_dev_id(pdev); plat->phy_addr = ret; - plat->interface = PHY_INTERFACE_MODE_RMII; + plat->phy_interface = PHY_INTERFACE_MODE_RMII; plat->dma_cfg->pbl = 16; plat->dma_cfg->pblx8 = true; @@ -415,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, plat->bus_id = 1; plat->phy_addr = -1; - plat->interface = PHY_INTERFACE_MODE_GMII; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; plat->dma_cfg->pbl = 32; plat->dma_cfg->pblx8 = true; diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 20adfe544294..b86611041db6 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -120,7 +120,7 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (prog) - bpf_prog_add(prog, nvdev->num_chn); + bpf_prog_add(prog, nvdev->num_chn - 1); for (i = 0; i < nvdev->num_chn; i++) rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); @@ -136,6 +136,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) { struct netdev_bpf xdp; bpf_op_t ndo_bpf; + int ret; ASSERT_RTNL(); @@ -148,10 +149,18 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) memset(&xdp, 0, sizeof(xdp)); + if (prog) + bpf_prog_inc(prog); + xdp.command = XDP_SETUP_PROG; xdp.prog = prog; - return ndo_bpf(vf_netdev, &xdp); + ret = ndo_bpf(vf_netdev, &xdp); + + if (ret && prog) + bpf_prog_put(prog); + + return ret; } static u32 netvsc_xdp_query(struct netvsc_device *nvdev) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8fc71bd49894..65e12cb07f45 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1059,9 +1059,12 @@ static int netvsc_attach(struct net_device *ndev, prog = dev_info->bprog; if (prog) { + bpf_prog_inc(prog); ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); - if (ret) + if (ret) { + bpf_prog_put(prog); goto err1; + } } /* In any case device is now ready */ diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 5c5427c840b6..d7706a0346f2 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -934,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, int nsim_dev_init(void) { nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); - if (IS_ERR(nsim_dev_ddir)) - return PTR_ERR(nsim_dev_ddir); - return 0; + return PTR_ERR_OR_ZERO(nsim_dev_ddir); } void nsim_dev_exit(void) diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 3998cac49d7f..9edd94679283 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -84,8 +84,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) int ret, i; void *bd_buffer; dma_addr_t bd_dma_addr; - u32 riptr; - u32 tiptr; + s32 riptr; + s32 tiptr; u32 gumr; ut_info = priv->ut_info; @@ -195,7 +195,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), ALIGNMENT_OF_UCC_HDLC_PRAM); - if (IS_ERR_VALUE(priv->ucc_pram_offset)) { + if (priv->ucc_pram_offset < 0) { dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; @@ -233,18 +233,23 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) /* Alloc riptr, tiptr */ riptr = qe_muram_alloc(32, 32); - if (IS_ERR_VALUE(riptr)) { + if (riptr < 0) { dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); ret = -ENOMEM; goto free_tx_skbuff; } tiptr = qe_muram_alloc(32, 32); - if (IS_ERR_VALUE(tiptr)) { + if (tiptr < 0) { dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); ret = -ENOMEM; goto free_riptr; } + if (riptr != (u16)riptr || tiptr != (u16)tiptr) { + dev_err(priv->dev, "MURAM allocation out of addressable range\n"); + ret = -ENOMEM; + goto free_tiptr; + } /* Set RIPTR, TIPTR */ iowrite16be(riptr, &priv->ucc_pram->riptr); @@ -623,8 +628,8 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget) if (howmany < budget) { napi_complete_done(napi, howmany); - qe_setbits32(priv->uccf->p_uccm, - (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); + qe_setbits_be32(priv->uccf->p_uccm, + (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); } return howmany; @@ -730,8 +735,8 @@ static int uhdlc_open(struct net_device *dev) static void uhdlc_memclean(struct ucc_hdlc_private *priv) { - qe_muram_free(priv->ucc_pram->riptr); - qe_muram_free(priv->ucc_pram->tiptr); + qe_muram_free(ioread16be(&priv->ucc_pram->riptr)); + qe_muram_free(ioread16be(&priv->ucc_pram->tiptr)); if (priv->rx_bd_base) { dma_free_coherent(priv->dev, diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h index 8b3507ae1781..71d5ad0a7b98 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.h +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -98,7 +98,7 @@ struct ucc_hdlc_private { unsigned short tx_ring_size; unsigned short rx_ring_size; - u32 ucc_pram_offset; + s32 ucc_pram_offset; unsigned short encoding; unsigned short parity; diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 121d9ea0f135..3725e9cd85f4 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -263,6 +263,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, } else { node = kzalloc(sizeof(*node), GFP_KERNEL); if (unlikely(!node)) { + list_del(&newnode->peer_list); kfree(newnode); return -ENOMEM; } diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index 0fdbd1c45977..bda26405497c 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -569,10 +569,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info) private_key); list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { - if (wg_noise_precompute_static_static(peer)) - wg_noise_expire_current_peer_keypairs(peer); - else - wg_peer_remove(peer); + BUG_ON(!wg_noise_precompute_static_static(peer)); + wg_noise_expire_current_peer_keypairs(peer); } wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); up_write(&wg->static_identity.lock); diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index d71c8db68a8c..919d9d866446 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -46,17 +46,21 @@ void __init wg_noise_init(void) /* Must hold peer->handshake.static_identity->lock */ bool wg_noise_precompute_static_static(struct wg_peer *peer) { - bool ret = true; + bool ret; down_write(&peer->handshake.lock); - if (peer->handshake.static_identity->has_identity) + if (peer->handshake.static_identity->has_identity) { ret = curve25519( peer->handshake.precomputed_static_static, peer->handshake.static_identity->static_private, peer->handshake.remote_static); - else + } else { + u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; + + ret = curve25519(empty, empty, peer->handshake.remote_static); memset(peer->handshake.precomputed_static_static, 0, NOISE_PUBLIC_KEY_LEN); + } up_write(&peer->handshake.lock); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 8878409d2f07..22a32eb10f01 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1897,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } -static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) { - u32 base = mvm->trans->dbg.lmac_error_event_table[0]; struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; - u32 error_id; + __le32 err_id; } err_info; - iwl_trans_read_mem_bytes(mvm->trans, base, + if (!base) + return false; + + iwl_trans_read_mem_bytes(trans, base, &err_info, sizeof(err_info)); + if (err_info.valid && err_id) + *err_id = le32_to_cpu(err_info.err_id); - if (err_info.valid && - err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); + return !!err_info.valid; +} + +static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 err_id; + + /* check for lmac1 error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[0], + &err_id)) { + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + return true; } - return err_info.valid; + + /* check if we have lmac2 set and check for error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[1], NULL)) + return true; + + /* check for umac error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.umac_error_event_table, NULL)) + return true; + + return false; } static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index f783d6d53b6f..6e1ea921c299 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -8,6 +8,7 @@ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 Intel Corporation * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -528,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) if (req != mvm->ftm_initiator.req) return; + iwl_mvm_ftm_reset(mvm); + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, LOCATION_GROUP, 0), 0, sizeof(cmd), &cmd)) @@ -641,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) { - IWL_ERR(mvm, "Got FTM response but have no request?\n"); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6717f25c46b1..02df603b6400 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); - if (IS_ERR(sta)) { + if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; @@ -3293,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, - min_duration); + min_duration, false); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 7b35f416404c..64ef3f3ba23b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { + /* This is a valid situation for IGTK */ + if (sta_id == IWL_MVM_INVALID_STA) + return 0; + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; @@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); - if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 1851719e9f4b..d781777b6b96 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; - mutex_lock(&mvm->mutex); /* Protect the session to hear the TDLS setup response on the channel */ - iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); + mutex_lock(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) + iwl_mvm_schedule_session_protection(mvm, vif, duration, + duration, true); + else + iwl_mvm_protect_session(mvm, vif, duration, + duration, 100, true); mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 51b138673ddb..c0b420fe5e48 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } +static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_session_prot_notif *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || + pkt->hdr.group_id != MAC_CONF_GROUP)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); + return true; + } + + resp = (void *)pkt->data; + + if (!resp->status) + IWL_ERR(mvm, + "TIME_EVENT_NOTIFICATION received but not executed\n"); + + return true; +} + void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration, u32 min_duration) + u32 duration, u32 min_duration, + bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - + const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, + MAC_CONF_GROUP, 0) }; + struct iwl_notification_wait wait_notif; struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, @@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; - int ret; lockdep_assert_held(&mvm->mutex); @@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", le32_to_cpu(cmd.duration_tu)); - ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, - MAC_CONF_GROUP, 0), - 0, sizeof(cmd), &cmd); - if (ret) { + if (!wait_for_notif) { + if (iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(SESSION_PROTECTION_CMD, + MAC_CONF_GROUP, 0), + 0, sizeof(cmd), &cmd)) { + IWL_ERR(mvm, + "Couldn't send the SESSION_PROTECTION_CMD\n"); + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); + } + + return; + } + + iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, + notif, ARRAY_SIZE(notif), + iwl_mvm_session_prot_notif, NULL); + + if (iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(SESSION_PROTECTION_CMD, + MAC_CONF_GROUP, 0), + 0, sizeof(cmd), &cmd)) { IWL_ERR(mvm, - "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); + "Couldn't send the SESSION_PROTECTION_CMD\n"); + iwl_remove_notification(&mvm->notif_wait, &wait_notif); + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, + TU_TO_JIFFIES(100))) { + IWL_ERR(mvm, + "Failed to protect session until session protection\n"); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index df6832b79666..3186d7e40567 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) * @mvm: the mvm component * @vif: the virtual interface for which the protection issued * @duration: the duration of the protection + * @wait_for_notif: if true, will block until the start of the protection */ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration, u32 min_duration); + u32 duration, u32 min_duration, + bool wait_for_notif); /** * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index b5a16f00bada..418e59b7c671 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -8,7 +8,7 @@ * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), }; struct iwl_ext_dts_measurement_cmd extcmd = { - .control_mode = cpu_to_le32(DTS_AUTOMATIC), + .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE), }; u32 cmdid; @@ -734,7 +734,8 @@ static struct thermal_zone_device_ops tzone_ops = { static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) { int i; - char name[] = "iwlwifi"; + char name[16]; + static atomic_t counter = ATOMIC_INIT(0); if (!iwl_mvm_is_tt_in_fw(mvm)) { mvm->tz_device.tzone = NULL; @@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); mvm->tz_device.tzone = thermal_zone_device_register(name, IWL_MAX_DTS_TRIPS, IWL_WRITABLE_TRIPS_MSK, diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index c9401c121a14..4e3de684928b 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, rates_max = rates_eid[1]; if (rates_max > MAX_RATES) { lbs_deb_join("invalid rates"); + rcu_read_unlock(); + ret = -EINVAL; goto out; } rates = cmd.bss.rates; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 98f942b797f7..a7968a84aaf8 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, vs_param_set->header.len = cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) & 0x00FF) + 2); + if (le16_to_cpu(vs_param_set->header.len) > + MWIFIEX_MAX_VSIE_LEN) { + mwifiex_dbg(priv->adapter, ERROR, + "Invalid param length!\n"); + break; + } + memcpy(vs_param_set->ie, priv->vs_ie[id].ie, le16_to_cpu(vs_param_set->header.len)); *buffer += le16_to_cpu(vs_param_set->header.len) + diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 6dd835f1efc2..fbfa0b15d0c8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, if (country_ie_len > (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + rcu_read_unlock(); mwifiex_dbg(priv->adapter, ERROR, "11D: country_ie_len overflow!, deauth AP\n"); return -EINVAL; diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 41f0231376c0..132f9e8ed68c 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, "WMM Parameter Set Count: %d\n", wmm_param_ie->qos_info_bitmap & mask); + if (wmm_param_ie->vend_hdr.len + 2 > + sizeof(struct ieee_types_wmm_parameter)) + break; + memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. wmm_ie, wmm_param_ie, wmm_param_ie->vend_hdr.len + 2); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index eccad4987ac8..17e277bf39e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev) static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) { - u8 val, *eeprom = dev->mt76.eeprom.data; + u8 *eeprom = dev->mt76.eeprom.data; u8 tx_mask, rx_mask, max_nss; + u32 val; val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, eeprom[MT_EE_WIFI_CONF]); diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c index af5c27e1bb07..4820dca958dd 100644 --- a/drivers/net/wireless/realtek/rtw88/wow.c +++ b/drivers/net/wireless/realtek/rtw88/wow.c @@ -281,27 +281,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev) rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE); } -static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) +static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) { - bool ret; - /* wait 100ms for wow firmware to finish work */ msleep(100); if (wow_enable) { - if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) - ret = 0; + if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) + goto wow_fail; } else { - if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 && - rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0) - ret = 0; + if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) || + rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE)) + goto wow_fail; } - if (ret) - rtw_err(rtwdev, "failed to check wow status %s\n", - wow_enable ? "enabled" : "disabled"); + return 0; - return ret; +wow_fail: + rtw_err(rtwdev, "failed to check wow status %s\n", + wow_enable ? "enabled" : "disabled"); + return -EBUSY; } static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 1e4a798dce6e..ef326f243f36 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/nvmem-provider.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/slab.h> #include "nvmem.h" @@ -54,8 +55,14 @@ static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, void *val, size_t bytes) { - if (nvmem->reg_write) - return nvmem->reg_write(nvmem->priv, offset, val, bytes); + int ret; + + if (nvmem->reg_write) { + gpiod_set_value_cansleep(nvmem->wp_gpio, 0); + ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); + gpiod_set_value_cansleep(nvmem->wp_gpio, 1); + return ret; + } return -EINVAL; } @@ -340,6 +347,14 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) kfree(nvmem); return ERR_PTR(rval); } + if (config->wp_gpio) + nvmem->wp_gpio = config->wp_gpio; + else + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) + return ERR_CAST(nvmem->wp_gpio); + kref_init(&nvmem->refcnt); INIT_LIST_HEAD(&nvmem->cells); diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h index eb8ed7121fa3..be0d66d75c8a 100644 --- a/drivers/nvmem/nvmem.h +++ b/drivers/nvmem/nvmem.h @@ -9,6 +9,7 @@ #include <linux/list.h> #include <linux/nvmem-consumer.h> #include <linux/nvmem-provider.h> +#include <linux/gpio/consumer.h> struct nvmem_device { struct module *owner; @@ -26,6 +27,7 @@ struct nvmem_device { struct list_head cells; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; void *priv; }; diff --git a/drivers/of/base.c b/drivers/of/base.c index 8d173fb3552a..ae03b1218b06 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -416,6 +416,42 @@ int of_cpu_node_to_id(struct device_node *cpu_node) EXPORT_SYMBOL(of_cpu_node_to_id); /** + * of_get_cpu_state_node - Get CPU's idle state node at the given index + * + * @cpu_node: The device node for the CPU + * @index: The index in the list of the idle states + * + * Two generic methods can be used to describe a CPU's idle states, either via + * a flattened description through the "cpu-idle-states" binding or via the + * hierarchical layout, using the "power-domains" and the "domain-idle-states" + * bindings. This function check for both and returns the idle state node for + * the requested index. + * + * In case an idle state node is found at @index, the refcount is incremented + * for it, so call of_node_put() on it when done. Returns NULL if not found. + */ +struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, + int index) +{ + struct of_phandle_args args; + int err; + + err = of_parse_phandle_with_args(cpu_node, "power-domains", + "#power-domain-cells", 0, &args); + if (!err) { + struct device_node *state_node = + of_parse_phandle(args.np, "domain-idle-states", index); + + of_node_put(args.np); + if (state_node) + return state_node; + } + + return of_parse_phandle(cpu_node, "cpu-idle-states", index); +} +EXPORT_SYMBOL(of_get_cpu_state_node); + +/** * __of_device_is_compatible() - Check if the node matches given constraints * @device: pointer to node * @compat: required compatible string, NULL or "" for any match diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index b8fe166cd0d9..cdb4237bfd02 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -14,7 +14,7 @@ menuconfig POWER_AVS config QCOM_CPR tristate "QCOM Core Power Reduction (CPR) support" - depends on POWER_AVS + depends on POWER_AVS && HAS_IOMEM select PM_OPP select REGMAP help diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c index 9192fb747653..bd7c3e48b386 100644 --- a/drivers/power/avs/qcom-cpr.c +++ b/drivers/power/avs/qcom-cpr.c @@ -517,7 +517,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) dev_dbg(drv->dev, "UP: -> new_uV: %d last_uV: %d perf state: %u\n", new_uV, last_uV, cpr_get_cur_perf_state(drv)); - } else if (dir == DOWN) { + } else { if (desc->clamp_timer_interval && error_steps < desc->down_threshold) { /* @@ -567,7 +567,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) /* Disable auto nack down */ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; val = 0; - } else if (dir == DOWN) { + } else { /* Restore default threshold for UP */ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; @@ -1547,8 +1547,6 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, goto unlock; } - dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners); - drv->corners = devm_kcalloc(drv->dev, drv->num_corners, sizeof(*drv->corners), GFP_KERNEL); @@ -1586,6 +1584,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, acc_desc->enable_mask, acc_desc->enable_mask); + dev_info(drv->dev, "driver initialized with %u OPPs\n", + drv->num_corners); + unlock: mutex_unlock(&drv->lock); diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 398fc954419e..eece97f97ef8 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -152,18 +152,18 @@ static void px30_iodomain_init(struct rockchip_iodomain *iod) int ret; u32 val; - /* if no VCCIO0 supply we should leave things alone */ + /* if no VCCIO6 supply we should leave things alone */ if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg) return; /* - * set vccio0 iodomain to also use this framework + * set vccio6 iodomain to also use this framework * instead of a special gpio. */ val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16); ret = regmap_write(iod->grf, PX30_IO_VSEL, val); if (ret < 0) - dev_warn(iod->dev, "couldn't update vccio0 ctrl\n"); + dev_warn(iod->dev, "couldn't update vccio6 ctrl\n"); } static void rk3288_iodomain_init(struct rockchip_iodomain *iod) diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 0498363203e8..513efe8e7628 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -26,7 +26,7 @@ config POWER_RESET_AT91_POWEROFF config POWER_RESET_AT91_RESET tristate "Atmel AT91 reset driver" depends on ARCH_AT91 - default SOC_AT91SAM9 || SOC_SAMA5 + default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5 help This driver supports restart for Atmel AT91SAM9 and SAMA5 SoCs @@ -34,7 +34,7 @@ config POWER_RESET_AT91_RESET config POWER_RESET_AT91_SAMA5D2_SHDWC tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver" depends on ARCH_AT91 - default SOC_SAMA5 + default SOC_SAM9X60 || SOC_SAMA5 help This driver supports the alternate shutdown controller for some Atmel SAMA5 SoCs. It is present for example on SAMA5D2 SoC. diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 3ad7817ce1f0..461b0e506a26 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -49,6 +49,13 @@ config RESET_BRCMSTB This enables the reset controller driver for Broadcom STB SoCs using a SUN_TOP_CTRL_SW_INIT style controller. +config RESET_BRCMSTB_RESCAL + bool "Broadcom STB RESCAL reset controller" + default ARCH_BRCMSTB || COMPILE_TEST + help + This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on + BCM7216. + config RESET_HSDK bool "Synopsys HSDK Reset Driver" depends on HAS_IOMEM @@ -64,6 +71,15 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_INTEL_GW + bool "Intel Reset Controller Driver" + depends on OF + select REGMAP_MMIO + help + This enables the reset controller driver for Intel Gateway SoCs. + Say Y to control the reset signals provided by reset controller. + Otherwise, say N. + config RESET_LANTIQ bool "Lantiq XWAY Reset Driver" if COMPILE_TEST default SOC_TYPE_XWAY @@ -89,6 +105,13 @@ config RESET_MESON_AUDIO_ARB This enables the reset driver for Audio Memory Arbiter of Amlogic's A113 based SoCs +config RESET_NPCM + bool "NPCM BMC Reset Driver" if COMPILE_TEST + default ARCH_NPCM + help + This enables the reset controller driver for Nuvoton NPCM + BMC SoCs. + config RESET_OXNAS bool @@ -99,7 +122,7 @@ config RESET_PISTACHIO This enables the reset driver for ImgTec Pistachio SoCs. config RESET_QCOM_AOSS - bool "Qcom AOSS Reset Driver" + tristate "Qcom AOSS Reset Driver" depends on ARCH_QCOM || COMPILE_TEST help This enables the AOSS (always on subsystem) reset driver diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index cf60ce526064..249ed357c997 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -8,12 +8,15 @@ obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o +obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o +obj-$(CONFIG_RESET_NPCM) += reset-npcm.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 7597c70e04d5..01c0c7aa835c 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -150,13 +150,14 @@ int devm_reset_controller_register(struct device *dev, return -ENOMEM; ret = reset_controller_register(rcdev); - if (!ret) { - *rcdevp = rcdev; - devres_add(dev, rcdevp); - } else { + if (ret) { devres_free(rcdevp); + return ret; } + *rcdevp = rcdev; + devres_add(dev, rcdevp); + return ret; } EXPORT_SYMBOL_GPL(devm_reset_controller_register); @@ -787,13 +788,14 @@ struct reset_control *__devm_reset_control_get(struct device *dev, return ERR_PTR(-ENOMEM); rstc = __reset_control_get(dev, id, index, shared, optional, acquired); - if (!IS_ERR_OR_NULL(rstc)) { - *ptr = rstc; - devres_add(dev, ptr); - } else { + if (IS_ERR_OR_NULL(rstc)) { devres_free(ptr); + return rstc; } + *ptr = rstc; + devres_add(dev, ptr); + return rstc; } EXPORT_SYMBOL_GPL(__devm_reset_control_get); @@ -919,22 +921,21 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get); struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) { - struct reset_control **devres; - struct reset_control *rstc; + struct reset_control **ptr, *rstc; - devres = devres_alloc(devm_reset_control_release, sizeof(*devres), - GFP_KERNEL); - if (!devres) + ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) return ERR_PTR(-ENOMEM); rstc = of_reset_control_array_get(dev->of_node, shared, optional, true); if (IS_ERR_OR_NULL(rstc)) { - devres_free(devres); + devres_free(ptr); return rstc; } - *devres = rstc; - devres_add(dev, devres); + *ptr = rstc; + devres_add(dev, ptr); return rstc; } diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c new file mode 100644 index 000000000000..b6f074d6a65f --- /dev/null +++ b/drivers/reset/reset-brcmstb-rescal.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2020 Broadcom */ + +#include <linux/device.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> + +#define BRCM_RESCAL_START 0x0 +#define BRCM_RESCAL_START_BIT BIT(0) +#define BRCM_RESCAL_CTRL 0x4 +#define BRCM_RESCAL_STATUS 0x8 +#define BRCM_RESCAL_STATUS_BIT BIT(0) + +struct brcm_rescal_reset { + void __iomem *base; + struct device *dev; + struct reset_controller_dev rcdev; +}; + +static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct brcm_rescal_reset *data = + container_of(rcdev, struct brcm_rescal_reset, rcdev); + void __iomem *base = data->base; + u32 reg; + int ret; + + reg = readl(base + BRCM_RESCAL_START); + writel(reg | BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START); + reg = readl(base + BRCM_RESCAL_START); + if (!(reg & BRCM_RESCAL_START_BIT)) { + dev_err(data->dev, "failed to start SATA/PCIe rescal\n"); + return -EIO; + } + + ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg, + !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000); + if (ret) { + dev_err(data->dev, "time out on SATA/PCIe rescal\n"); + return ret; + } + + reg = readl(base + BRCM_RESCAL_START); + writel(reg & ~BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START); + + dev_dbg(data->dev, "SATA/PCIe rescal success\n"); + + return 0; +} + +static int brcm_rescal_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + /* This is needed if #reset-cells == 0. */ + return 0; +} + +static const struct reset_control_ops brcm_rescal_reset_ops = { + .reset = brcm_rescal_reset_set, +}; + +static int brcm_rescal_reset_probe(struct platform_device *pdev) +{ + struct brcm_rescal_reset *data; + struct resource *res; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = 1; + data->rcdev.ops = &brcm_rescal_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + data->rcdev.of_xlate = brcm_rescal_reset_xlate; + data->dev = &pdev->dev; + + return devm_reset_controller_register(&pdev->dev, &data->rcdev); +} + +static const struct of_device_id brcm_rescal_reset_of_match[] = { + { .compatible = "brcm,bcm7216-pcie-sata-rescal" }, + { }, +}; +MODULE_DEVICE_TABLE(of, brcm_rescal_reset_of_match); + +static struct platform_driver brcm_rescal_reset_driver = { + .probe = brcm_rescal_reset_probe, + .driver = { + .name = "brcm-rescal-reset", + .of_match_table = brcm_rescal_reset_of_match, + } +}; +module_platform_driver(brcm_rescal_reset_driver); + +MODULE_AUTHOR("Broadcom"); +MODULE_DESCRIPTION("Broadcom SATA/PCIe rescal reset controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c new file mode 100644 index 000000000000..854238444616 --- /dev/null +++ b/drivers/reset/reset-intel-gw.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 Intel Corporation. + * Lei Chuanhua <Chuanhua.lei@intel.com> + */ + +#include <linux/bitfield.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +#define RCU_RST_STAT 0x0024 +#define RCU_RST_REQ 0x0048 + +#define REG_OFFSET GENMASK(31, 16) +#define BIT_OFFSET GENMASK(15, 8) +#define STAT_BIT_OFFSET GENMASK(7, 0) + +#define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev) + +struct intel_reset_soc { + bool legacy; + u32 reset_cell_count; +}; + +struct intel_reset_data { + struct reset_controller_dev rcdev; + struct notifier_block restart_nb; + const struct intel_reset_soc *soc_data; + struct regmap *regmap; + struct device *dev; + u32 reboot_id; +}; + +static const struct regmap_config intel_rcu_regmap_config = { + .name = "intel-reset", + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, +}; + +/* + * Reset status register offset relative to + * the reset control register(X) is X + 4 + */ +static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data, + unsigned long id, u32 *rst_req, + u32 *req_bit, u32 *stat_bit) +{ + *rst_req = FIELD_GET(REG_OFFSET, id); + *req_bit = FIELD_GET(BIT_OFFSET, id); + + if (data->soc_data->legacy) + *stat_bit = FIELD_GET(STAT_BIT_OFFSET, id); + else + *stat_bit = *req_bit; + + if (data->soc_data->legacy && *rst_req == RCU_RST_REQ) + return RCU_RST_STAT; + else + return *rst_req + 0x4; +} + +static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id, + bool set) +{ + u32 rst_req, req_bit, rst_stat, stat_bit, val; + int ret; + + rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req, + &req_bit, &stat_bit); + + val = set ? BIT(req_bit) : 0; + ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val); + if (ret) + return ret; + + return regmap_read_poll_timeout(data->regmap, rst_stat, val, + set == !!(val & BIT(stat_bit)), 20, + 200); +} + +static int intel_assert_device(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct intel_reset_data *data = to_reset_data(rcdev); + int ret; + + ret = intel_set_clr_bits(data, id, true); + if (ret) + dev_err(data->dev, "Reset assert failed %d\n", ret); + + return ret; +} + +static int intel_deassert_device(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct intel_reset_data *data = to_reset_data(rcdev); + int ret; + + ret = intel_set_clr_bits(data, id, false); + if (ret) + dev_err(data->dev, "Reset deassert failed %d\n", ret); + + return ret; +} + +static int intel_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct intel_reset_data *data = to_reset_data(rcdev); + u32 rst_req, req_bit, rst_stat, stat_bit, val; + int ret; + + rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req, + &req_bit, &stat_bit); + ret = regmap_read(data->regmap, rst_stat, &val); + if (ret) + return ret; + + return !!(val & BIT(stat_bit)); +} + +static const struct reset_control_ops intel_reset_ops = { + .assert = intel_assert_device, + .deassert = intel_deassert_device, + .status = intel_reset_status, +}; + +static int intel_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *spec) +{ + struct intel_reset_data *data = to_reset_data(rcdev); + u32 id; + + if (spec->args[1] > 31) + return -EINVAL; + + id = FIELD_PREP(REG_OFFSET, spec->args[0]); + id |= FIELD_PREP(BIT_OFFSET, spec->args[1]); + + if (data->soc_data->legacy) { + if (spec->args[2] > 31) + return -EINVAL; + + id |= FIELD_PREP(STAT_BIT_OFFSET, spec->args[2]); + } + + return id; +} + +static int intel_reset_restart_handler(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct intel_reset_data *reset_data; + + reset_data = container_of(nb, struct intel_reset_data, restart_nb); + intel_assert_device(&reset_data->rcdev, reset_data->reboot_id); + + return NOTIFY_DONE; +} + +static int intel_reset_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct intel_reset_data *data; + void __iomem *base; + u32 rb_id[3]; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->soc_data = of_device_get_match_data(dev); + if (!data->soc_data) + return -ENODEV; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + data->regmap = devm_regmap_init_mmio(dev, base, + &intel_rcu_regmap_config); + if (IS_ERR(data->regmap)) { + dev_err(dev, "regmap initialization failed\n"); + return PTR_ERR(data->regmap); + } + + ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id, + data->soc_data->reset_cell_count); + if (ret) { + dev_err(dev, "Failed to get global reset offset!\n"); + return ret; + } + + data->dev = dev; + data->rcdev.of_node = np; + data->rcdev.owner = dev->driver->owner; + data->rcdev.ops = &intel_reset_ops; + data->rcdev.of_xlate = intel_reset_xlate; + data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count; + ret = devm_reset_controller_register(&pdev->dev, &data->rcdev); + if (ret) + return ret; + + data->reboot_id = FIELD_PREP(REG_OFFSET, rb_id[0]); + data->reboot_id |= FIELD_PREP(BIT_OFFSET, rb_id[1]); + + if (data->soc_data->legacy) + data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET, rb_id[2]); + + data->restart_nb.notifier_call = intel_reset_restart_handler; + data->restart_nb.priority = 128; + register_restart_handler(&data->restart_nb); + + return 0; +} + +static const struct intel_reset_soc xrx200_data = { + .legacy = true, + .reset_cell_count = 3, +}; + +static const struct intel_reset_soc lgm_data = { + .legacy = false, + .reset_cell_count = 2, +}; + +static const struct of_device_id intel_reset_match[] = { + { .compatible = "intel,rcu-lgm", .data = &lgm_data }, + { .compatible = "intel,rcu-xrx200", .data = &xrx200_data }, + {} +}; + +static struct platform_driver intel_reset_driver = { + .probe = intel_reset_probe, + .driver = { + .name = "intel-reset", + .of_match_table = intel_reset_match, + }, +}; + +static int __init intel_reset_init(void) +{ + return platform_driver_register(&intel_reset_driver); +} + +/* + * RCU is system core entity which is in Always On Domain whose clocks + * or resource initialization happens in system core initialization. + * Also, it is required for most of the platform or architecture + * specific devices to perform reset operation as part of initialization. + * So perform RCU as post core initialization. + */ +postcore_initcall(intel_reset_init); diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c new file mode 100644 index 000000000000..2ea4d3136e15 --- /dev/null +++ b/drivers/reset/reset-npcm.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Nuvoton Technology corporation. + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/reset-controller.h> +#include <linux/spinlock.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/of_address.h> + +/* NPCM7xx GCR registers */ +#define NPCM_MDLR_OFFSET 0x7C +#define NPCM_MDLR_USBD0 BIT(9) +#define NPCM_MDLR_USBD1 BIT(8) +#define NPCM_MDLR_USBD2_4 BIT(21) +#define NPCM_MDLR_USBD5_9 BIT(22) + +#define NPCM_USB1PHYCTL_OFFSET 0x140 +#define NPCM_USB2PHYCTL_OFFSET 0x144 +#define NPCM_USBXPHYCTL_RS BIT(28) + +/* NPCM7xx Reset registers */ +#define NPCM_SWRSTR 0x14 +#define NPCM_SWRST BIT(2) + +#define NPCM_IPSRST1 0x20 +#define NPCM_IPSRST1_USBD1 BIT(5) +#define NPCM_IPSRST1_USBD2 BIT(8) +#define NPCM_IPSRST1_USBD3 BIT(25) +#define NPCM_IPSRST1_USBD4 BIT(22) +#define NPCM_IPSRST1_USBD5 BIT(23) +#define NPCM_IPSRST1_USBD6 BIT(24) + +#define NPCM_IPSRST2 0x24 +#define NPCM_IPSRST2_USB_HOST BIT(26) + +#define NPCM_IPSRST3 0x34 +#define NPCM_IPSRST3_USBD0 BIT(4) +#define NPCM_IPSRST3_USBD7 BIT(5) +#define NPCM_IPSRST3_USBD8 BIT(6) +#define NPCM_IPSRST3_USBD9 BIT(7) +#define NPCM_IPSRST3_USBPHY1 BIT(24) +#define NPCM_IPSRST3_USBPHY2 BIT(25) + +#define NPCM_RC_RESETS_PER_REG 32 +#define NPCM_MASK_RESETS GENMASK(4, 0) + +struct npcm_rc_data { + struct reset_controller_dev rcdev; + struct notifier_block restart_nb; + u32 sw_reset_number; + void __iomem *base; + spinlock_t lock; +}; + +#define to_rc_data(p) container_of(p, struct npcm_rc_data, rcdev) + +static int npcm_rc_restart(struct notifier_block *nb, unsigned long mode, + void *cmd) +{ + struct npcm_rc_data *rc = container_of(nb, struct npcm_rc_data, + restart_nb); + + writel(NPCM_SWRST << rc->sw_reset_number, rc->base + NPCM_SWRSTR); + mdelay(1000); + + pr_emerg("%s: unable to restart system\n", __func__); + + return NOTIFY_DONE; +} + +static int npcm_rc_setclear_reset(struct reset_controller_dev *rcdev, + unsigned long id, bool set) +{ + struct npcm_rc_data *rc = to_rc_data(rcdev); + unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS); + unsigned int ctrl_offset = id >> 8; + unsigned long flags; + u32 stat; + + spin_lock_irqsave(&rc->lock, flags); + stat = readl(rc->base + ctrl_offset); + if (set) + writel(stat | rst_bit, rc->base + ctrl_offset); + else + writel(stat & ~rst_bit, rc->base + ctrl_offset); + spin_unlock_irqrestore(&rc->lock, flags); + + return 0; +} + +static int npcm_rc_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + return npcm_rc_setclear_reset(rcdev, id, true); +} + +static int npcm_rc_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return npcm_rc_setclear_reset(rcdev, id, false); +} + +static int npcm_rc_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct npcm_rc_data *rc = to_rc_data(rcdev); + unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS); + unsigned int ctrl_offset = id >> 8; + + return (readl(rc->base + ctrl_offset) & rst_bit); +} + +static int npcm_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + unsigned int offset, bit; + + offset = reset_spec->args[0]; + if (offset != NPCM_IPSRST1 && offset != NPCM_IPSRST2 && + offset != NPCM_IPSRST3) { + dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset); + return -EINVAL; + } + bit = reset_spec->args[1]; + if (bit >= NPCM_RC_RESETS_PER_REG) { + dev_err(rcdev->dev, "Error reset number (%d)\n", bit); + return -EINVAL; + } + + return (offset << 8) | bit; +} + +static const struct of_device_id npcm_rc_match[] = { + { .compatible = "nuvoton,npcm750-reset", + .data = (void *)"nuvoton,npcm750-gcr" }, + { } +}; + +/* + * The following procedure should be observed in USB PHY, USB device and + * USB host initialization at BMC boot + */ +static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc) +{ + u32 mdlr, iprst1, iprst2, iprst3; + struct device *dev = &pdev->dev; + struct regmap *gcr_regmap; + u32 ipsrst1_bits = 0; + u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST; + u32 ipsrst3_bits = 0; + const char *gcr_dt; + + gcr_dt = (const char *) + of_match_device(dev->driver->of_match_table, dev)->data; + + gcr_regmap = syscon_regmap_lookup_by_compatible(gcr_dt); + if (IS_ERR(gcr_regmap)) { + dev_err(&pdev->dev, "Failed to find %s\n", gcr_dt); + return PTR_ERR(gcr_regmap); + } + + /* checking which USB device is enabled */ + regmap_read(gcr_regmap, NPCM_MDLR_OFFSET, &mdlr); + if (!(mdlr & NPCM_MDLR_USBD0)) + ipsrst3_bits |= NPCM_IPSRST3_USBD0; + if (!(mdlr & NPCM_MDLR_USBD1)) + ipsrst1_bits |= NPCM_IPSRST1_USBD1; + if (!(mdlr & NPCM_MDLR_USBD2_4)) + ipsrst1_bits |= (NPCM_IPSRST1_USBD2 | + NPCM_IPSRST1_USBD3 | + NPCM_IPSRST1_USBD4); + if (!(mdlr & NPCM_MDLR_USBD0)) { + ipsrst1_bits |= (NPCM_IPSRST1_USBD5 | + NPCM_IPSRST1_USBD6); + ipsrst3_bits |= (NPCM_IPSRST3_USBD7 | + NPCM_IPSRST3_USBD8 | + NPCM_IPSRST3_USBD9); + } + + /* assert reset USB PHY and USB devices */ + iprst1 = readl(rc->base + NPCM_IPSRST1); + iprst2 = readl(rc->base + NPCM_IPSRST2); + iprst3 = readl(rc->base + NPCM_IPSRST3); + + iprst1 |= ipsrst1_bits; + iprst2 |= ipsrst2_bits; + iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 | + NPCM_IPSRST3_USBPHY2); + + writel(iprst1, rc->base + NPCM_IPSRST1); + writel(iprst2, rc->base + NPCM_IPSRST2); + writel(iprst3, rc->base + NPCM_IPSRST3); + + /* clear USB PHY RS bit */ + regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, 0); + regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, 0); + + /* deassert reset USB PHY */ + iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2); + writel(iprst3, rc->base + NPCM_IPSRST3); + + udelay(50); + + /* set USB PHY RS bit */ + regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); + regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET, + NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS); + + /* deassert reset USB devices*/ + iprst1 &= ~ipsrst1_bits; + iprst2 &= ~ipsrst2_bits; + iprst3 &= ~ipsrst3_bits; + + writel(iprst1, rc->base + NPCM_IPSRST1); + writel(iprst2, rc->base + NPCM_IPSRST2); + writel(iprst3, rc->base + NPCM_IPSRST3); + + return 0; +} + +static const struct reset_control_ops npcm_rc_ops = { + .assert = npcm_rc_assert, + .deassert = npcm_rc_deassert, + .status = npcm_rc_status, +}; + +static int npcm_rc_probe(struct platform_device *pdev) +{ + struct npcm_rc_data *rc; + int ret; + + rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL); + if (!rc) + return -ENOMEM; + + rc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rc->base)) + return PTR_ERR(rc->base); + + spin_lock_init(&rc->lock); + + rc->rcdev.owner = THIS_MODULE; + rc->rcdev.ops = &npcm_rc_ops; + rc->rcdev.of_node = pdev->dev.of_node; + rc->rcdev.of_reset_n_cells = 2; + rc->rcdev.of_xlate = npcm_reset_xlate; + + platform_set_drvdata(pdev, rc); + + ret = devm_reset_controller_register(&pdev->dev, &rc->rcdev); + if (ret) { + dev_err(&pdev->dev, "unable to register device\n"); + return ret; + } + + if (npcm_usb_reset(pdev, rc)) + dev_warn(&pdev->dev, "NPCM USB reset failed, can cause issues with UDC and USB host\n"); + + if (!of_property_read_u32(pdev->dev.of_node, "nuvoton,sw-reset-number", + &rc->sw_reset_number)) { + if (rc->sw_reset_number && rc->sw_reset_number < 5) { + rc->restart_nb.priority = 192, + rc->restart_nb.notifier_call = npcm_rc_restart, + ret = register_restart_handler(&rc->restart_nb); + if (ret) + dev_warn(&pdev->dev, "failed to register restart handler\n"); + } + } + + return ret; +} + +static struct platform_driver npcm_rc_driver = { + .probe = npcm_rc_probe, + .driver = { + .name = "npcm-reset", + .of_match_table = npcm_rc_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(npcm_rc_driver); diff --git a/drivers/reset/reset-qcom-aoss.c b/drivers/reset/reset-qcom-aoss.c index 36db96750450..9333b923dda0 100644 --- a/drivers/reset/reset-qcom-aoss.c +++ b/drivers/reset/reset-qcom-aoss.c @@ -118,6 +118,7 @@ static const struct of_device_id qcom_aoss_reset_of_match[] = { { .compatible = "qcom,sdm845-aoss-cc", .data = &sdm845_aoss_desc }, {} }; +MODULE_DEVICE_TABLE(of, qcom_aoss_reset_of_match); static struct platform_driver qcom_aoss_reset_driver = { .probe = qcom_aoss_reset_probe, @@ -127,7 +128,7 @@ static struct platform_driver qcom_aoss_reset_driver = { }, }; -builtin_platform_driver(qcom_aoss_reset_driver); +module_platform_driver(qcom_aoss_reset_driver); MODULE_DESCRIPTION("Qualcomm AOSS Reset Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c index b46df80ec6c3..8d3a858e3b19 100644 --- a/drivers/reset/reset-scmi.c +++ b/drivers/reset/reset-scmi.c @@ -108,7 +108,7 @@ static int scmi_reset_probe(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_RESET }, + { SCMI_PROTOCOL_RESET, "reset" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index 74e589f5dd6a..279e535bf5d8 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = { #define UNIPHIER_PERI_RESET_FI2C(id, ch) \ UNIPHIER_RESETX((id), 0x114, 24 + (ch)) -#define UNIPHIER_PERI_RESET_SCSSI(id) \ - UNIPHIER_RESETX((id), 0x110, 17) +#define UNIPHIER_PERI_RESET_SCSSI(id, ch) \ + UNIPHIER_RESETX((id), 0x110, 17 + (ch)) #define UNIPHIER_PERI_RESET_MCSSI(id) \ UNIPHIER_RESETX((id), 0x114, 14) @@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = { UNIPHIER_PERI_RESET_I2C(6, 2), UNIPHIER_PERI_RESET_I2C(7, 3), UNIPHIER_PERI_RESET_I2C(8, 4), - UNIPHIER_PERI_RESET_SCSSI(11), + UNIPHIER_PERI_RESET_SCSSI(11, 0), UNIPHIER_RESET_END, }; @@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = { UNIPHIER_PERI_RESET_FI2C(8, 4), UNIPHIER_PERI_RESET_FI2C(9, 5), UNIPHIER_PERI_RESET_FI2C(10, 6), - UNIPHIER_PERI_RESET_SCSSI(11), - UNIPHIER_PERI_RESET_MCSSI(12), + UNIPHIER_PERI_RESET_SCSSI(11, 0), + UNIPHIER_PERI_RESET_SCSSI(12, 1), + UNIPHIER_PERI_RESET_SCSSI(13, 2), + UNIPHIER_PERI_RESET_SCSSI(14, 3), + UNIPHIER_PERI_RESET_MCSSI(15), UNIPHIER_RESET_END, }; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index acb82181f70f..fd4b5ac6ac5b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -7604,7 +7604,6 @@ megasas_resume(struct pci_dev *pdev) int rval; struct Scsi_Host *host; struct megasas_instance *instance; - int irq_flags = PCI_IRQ_LEGACY; u32 status_reg; instance = pci_get_drvdata(pdev); @@ -7673,16 +7672,15 @@ megasas_resume(struct pci_dev *pdev) atomic_set(&instance->ldio_outstanding, 0); /* Now re-enable MSI-X */ - if (instance->msix_vectors) { - irq_flags = PCI_IRQ_MSIX; - if (instance->smp_affinity_enable) - irq_flags |= PCI_IRQ_AFFINITY; - } - rval = pci_alloc_irq_vectors(instance->pdev, 1, - instance->msix_vectors ? - instance->msix_vectors : 1, irq_flags); - if (rval < 0) - goto fail_reenable_msix; + if (instance->msix_vectors) + megasas_alloc_irq_vectors(instance); + + if (!instance->msix_vectors) { + rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, + PCI_IRQ_LEGACY); + if (rval < 0) + goto fail_reenable_msix; + } megasas_setup_reply_map(instance); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 98dcdbd146d5..d1d95f1a2c6a 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2377,7 +2377,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->buf_valid_size = sizeof(*resp); } else PM8001_IO_DBG(pm8001_ha, - pm8001_printk("response to large\n")); + pm8001_printk("response too large\n")); } if (pm8001_dev) pm8001_dev->running_req--; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index e5500bba06ca..88a56e8480f7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2519,12 +2519,6 @@ qla83xx_fw_dump_failed: /* Driver Debug Functions. */ /****************************************************************************/ -static inline int -ql_mask_match(uint level) -{ - return (level & ql2xextended_error_logging) == level; -} - /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index bb01b680ce9f..433e95502808 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, struct qla_hw_data *); extern int qla24xx_soft_reset(struct qla_hw_data *); + +static inline int +ql_mask_match(uint level) +{ + return (level & ql2xextended_error_logging) == level; +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e7bad0bfffda..e40705d38cea 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1939,6 +1939,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, inbuf = (uint32_t *)&sts->nvme_ersp_data; outbuf = (uint32_t *)fd->rspaddr; iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + if (unlikely(iocb->u.nvme.rsp_pyld_len > + sizeof(struct nvme_fc_ersp_iu))) { + if (ql_mask_match(ql_dbg_io)) { + WARN_ONCE(1, "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + ql_log(ql_log_warn, fcport->vha, 0x5100, + "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + } + iocb->u.nvme.rsp_pyld_len = + sizeof(struct nvme_fc_ersp_iu); + } iter = iocb->u.nvme.rsp_pyld_len >> 2; for (; iter; iter--) *outbuf++ = swab32(*inbuf++); diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dde2eb02f76f..cfe380348bf0 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -546,7 +546,7 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8 lun) { if (!dev_info || !dev_info->max_lu_supported) { - pr_err("Max General LU supported by UFS isn't initilized\n"); + pr_err("Max General LU supported by UFS isn't initialized\n"); return false; } diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index 096a83cf0caf..55a1f57a4d8c 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -66,8 +66,9 @@ static const struct at91_soc __initconst socs[] = { AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"), AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"), AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"), - AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, - "sam9x60", "sam9x60"), +#endif +#ifdef CONFIG_SOC_SAM9X60 + AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"), #endif #ifdef CONFIG_SOC_SAMA5 AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH, diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c index d326915e0f40..61731e01f94b 100644 --- a/drivers/soc/bcm/brcmstb/biuctrl.c +++ b/drivers/soc/bcm/brcmstb/biuctrl.c @@ -63,7 +63,7 @@ static const int b15_cpubiuctrl_regs[] = { [CPU_WRITEBACK_CTRL_REG] = -1, }; -/* Odd cases, e.g: 7260 */ +/* Odd cases, e.g: 7260A0 */ static const int b53_cpubiuctrl_no_wb_regs[] = { [CPU_CREDIT_REG] = 0x0b0, [CPU_MCP_FLOW_REG] = 0x0b4, @@ -76,6 +76,12 @@ static const int b53_cpubiuctrl_regs[] = { [CPU_WRITEBACK_CTRL_REG] = 0x22c, }; +static const int a72_cpubiuctrl_regs[] = { + [CPU_CREDIT_REG] = 0x18, + [CPU_MCP_FLOW_REG] = 0x1c, + [CPU_WRITEBACK_CTRL_REG] = 0x20, +}; + #define NUM_CPU_BIUCTRL_REGS 3 static int __init mcp_write_pairing_set(void) @@ -101,25 +107,29 @@ static int __init mcp_write_pairing_set(void) return 0; } -static const u32 b53_mach_compat[] = { +static const u32 a72_b53_mach_compat[] = { + 0x7211, + 0x7216, + 0x7255, + 0x7260, 0x7268, 0x7271, 0x7278, }; -static void __init mcp_b53_set(void) +static void __init mcp_a72_b53_set(void) { unsigned int i; u32 reg; reg = brcmstb_get_family_id(); - for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) { - if (BRCM_ID(reg) == b53_mach_compat[i]) + for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) { + if (BRCM_ID(reg) == a72_b53_mach_compat[i]) break; } - if (i == ARRAY_SIZE(b53_mach_compat)) + if (i == ARRAY_SIZE(a72_b53_mach_compat)) return; /* Set all 3 MCP interfaces to 8 credits */ @@ -157,6 +167,7 @@ static void __init mcp_b53_set(void) static int __init setup_hifcpubiuctrl_regs(struct device_node *np) { struct device_node *cpu_dn; + u32 family_id; int ret = 0; cpubiuctrl_base = of_iomap(np, 0); @@ -179,13 +190,16 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np) cpubiuctrl_regs = b15_cpubiuctrl_regs; else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53")) cpubiuctrl_regs = b53_cpubiuctrl_regs; + else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72")) + cpubiuctrl_regs = a72_cpubiuctrl_regs; else { pr_err("unsupported CPU\n"); ret = -EINVAL; } of_node_put(cpu_dn); - if (BRCM_ID(brcmstb_get_family_id()) == 0x7260) + family_id = brcmstb_get_family_id(); + if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0) cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs; out: of_node_put(np); @@ -248,7 +262,7 @@ static int __init brcmstb_biuctrl_init(void) return ret; } - mcp_b53_set(); + mcp_a72_b53_set(); #ifdef CONFIG_PM_SLEEP register_syscore_ops(&brcmstb_cpu_credit_syscore_ops); #endif diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index cfa4b2939992..357c5800b112 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -5,7 +5,8 @@ config QUICC_ENGINE bool "QUICC Engine (QE) framework support" - depends on FSL_SOC && PPC32 + depends on OF && HAS_IOMEM + depends on PPC || ARM || ARM64 || COMPILE_TEST select GENERIC_ALLOCATOR select CRC32 help diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index f0c29ed8f0ff..ed75198ed254 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) container_of(mm_gc, struct qe_gpio_chip, mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; - qe_gc->cpdata = in_be32(®s->cpdata); + qe_gc->cpdata = qe_ioread32be(®s->cpdata); qe_gc->saved_regs.cpdata = qe_gc->cpdata; - qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1); - qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2); - qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1); - qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2); - qe_gc->saved_regs.cpodr = in_be32(®s->cpodr); + qe_gc->saved_regs.cpdir1 = qe_ioread32be(®s->cpdir1); + qe_gc->saved_regs.cpdir2 = qe_ioread32be(®s->cpdir2); + qe_gc->saved_regs.cppar1 = qe_ioread32be(®s->cppar1); + qe_gc->saved_regs.cppar2 = qe_ioread32be(®s->cppar2); + qe_gc->saved_regs.cpodr = qe_ioread32be(®s->cpodr); } static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) @@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) struct qe_pio_regs __iomem *regs = mm_gc->regs; u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); - return !!(in_be32(®s->cpdata) & pin_mask); + return !!(qe_ioread32be(®s->cpdata) & pin_mask); } static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) @@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else qe_gc->cpdata &= ~pin_mask; - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc, } } - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) { struct qe_pin *qe_pin; struct gpio_chip *gc; - struct of_mm_gpio_chip *mm_gc; struct qe_gpio_chip *qe_gc; int err; unsigned long flags; @@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) goto err0; } - mm_gc = to_of_mm_gpio_chip(gc); qe_gc = gpiochip_get_data(gc); spin_lock_irqsave(&qe_gc->lock, flags); @@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) spin_lock_irqsave(&qe_gc->lock, flags); if (second_reg) { - clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2); - clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2); + qe_clrsetbits_be32(®s->cpdir2, mask2, + sregs->cpdir2 & mask2); + qe_clrsetbits_be32(®s->cppar2, mask2, + sregs->cppar2 & mask2); } else { - clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2); - clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2); + qe_clrsetbits_be32(®s->cpdir1, mask2, + sregs->cpdir1 & mask2); + qe_clrsetbits_be32(®s->cppar1, mask2, + sregs->cppar1 & mask2); } if (sregs->cpdata & mask1) @@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) else qe_gc->cpdata &= ~mask1; - out_be32(®s->cpdata, qe_gc->cpdata); - clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); + qe_clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); spin_unlock_irqrestore(&qe_gc->lock, flags); } diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 417df7e19281..96c2057d8d8e 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -22,16 +22,12 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/ioport.h> +#include <linux/iopoll.h> #include <linux/crc32.h> #include <linux/mod_devicetable.h> #include <linux/of_platform.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgtable.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <asm/rheap.h> static void qe_snums_init(void); static int qe_sdma_init(void); @@ -108,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) { unsigned long flags; u8 mcn_shift = 0, dev_shift = 0; - u32 ret; + u32 val; + int ret; spin_lock_irqsave(&qe_lock, flags); if (cmd == QE_RESET) { - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); + qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr); } else { if (cmd == QE_ASSIGN_PAGE) { /* Here device is the SNUM, not sub-block */ @@ -129,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) mcn_shift = QE_CR_MCN_NORMAL_SHIFT; } - out_be32(&qe_immr->cp.cecdr, cmd_input); - out_be32(&qe_immr->cp.cecr, - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) - mcn_protocol << mcn_shift)); + qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr); + qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift), + &qe_immr->cp.cecr); } /* wait for the QE_CR_FLG to clear */ - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, - 100, 0); - /* On timeout (e.g. failure), the expression will be false (ret == 0), - otherwise it will be true (ret == 1). */ + ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val, + (val & QE_CR_FLG) == 0, 0, 100); + /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */ spin_unlock_irqrestore(&qe_lock, flags); - return ret == 1; + return ret == 0; } EXPORT_SYMBOL(qe_issue_cmd); @@ -164,8 +159,7 @@ static unsigned int brg_clk = 0; unsigned int qe_get_brg_clk(void) { struct device_node *qe; - int size; - const u32 *prop; + u32 brg; unsigned int mod; if (brg_clk) @@ -175,9 +169,8 @@ unsigned int qe_get_brg_clk(void) if (!qe) return brg_clk; - prop = of_get_property(qe, "brg-frequency", &size); - if (prop && size == sizeof(*prop)) - brg_clk = *prop; + if (!of_property_read_u32(qe, "brg-frequency", &brg)) + brg_clk = brg; of_node_put(qe); @@ -197,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk); #define PVR_VER_836x 0x8083 #define PVR_VER_832x 0x8084 +static bool qe_general4_errata(void) +{ +#ifdef CONFIG_PPC32 + return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x); +#endif + return false; +} + /* Program the BRG to the given sampling rate and multiplier * * @brg: the BRG, QE_BRG1 - QE_BRG16 @@ -223,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says that the BRG divisor must be even if you're not using divide-by-16 mode. */ - if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x)) + if (qe_general4_errata()) if (!div16 && (divisor & 1) && (divisor > 3)) divisor++; tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE | div16; - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); + qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); return 0; } @@ -364,22 +365,20 @@ EXPORT_SYMBOL(qe_put_snum); static int qe_sdma_init(void) { struct sdma __iomem *sdma = &qe_immr->sdma; - static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM; - - if (!sdma) - return -ENODEV; + static s32 sdma_buf_offset = -ENOMEM; /* allocate 2 internal temporary buffers (512 bytes size each) for * the SDMA */ - if (IS_ERR_VALUE(sdma_buf_offset)) { + if (sdma_buf_offset < 0) { sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); - if (IS_ERR_VALUE(sdma_buf_offset)) + if (sdma_buf_offset < 0) return -ENOMEM; } - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | - (0x1 << QE_SDMR_CEN_SHIFT))); + qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, + &sdma->sdebcr); + qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), + &sdma->sdmr); return 0; } @@ -417,14 +416,14 @@ static void qe_upload_microcode(const void *base, "uploading microcode '%s'\n", ucode->id); /* Use auto-increment */ - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); + qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR, + &qe_immr->iram.iadd); for (i = 0; i < be32_to_cpu(ucode->count); i++) - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); + qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); /* Set I-RAM Ready Register */ - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); + qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); } /* @@ -509,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware) * If the microcode calls for it, split the I-RAM. */ if (!firmware->split) - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); + qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); if (firmware->soc.model) printk(KERN_INFO @@ -543,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware) u32 trap = be32_to_cpu(ucode->traps[j]); if (trap) - out_be32(&qe_immr->rsp[i].tibcr[j], trap); + qe_iowrite32be(trap, + &qe_immr->rsp[i].tibcr[j]); } /* Enable traps */ - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); + qe_iowrite32be(be32_to_cpu(ucode->eccr), + &qe_immr->rsp[i].eccr); } qe_firmware_uploaded = 1; @@ -565,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware); struct qe_firmware_info *qe_get_firmware_info(void) { static int initialized; - struct property *prop; struct device_node *qe; struct device_node *fw = NULL; const char *sprop; - unsigned int i; /* * If we haven't checked yet, and a driver hasn't uploaded a firmware @@ -603,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void) strlcpy(qe_firmware_info.id, sprop, sizeof(qe_firmware_info.id)); - prop = of_find_property(fw, "extended-modes", NULL); - if (prop && (prop->length == sizeof(u64))) { - const u64 *iprop = prop->value; - - qe_firmware_info.extended_modes = *iprop; - } + of_property_read_u64(fw, "extended-modes", + &qe_firmware_info.extended_modes); - prop = of_find_property(fw, "virtual-traps", NULL); - if (prop && (prop->length == 32)) { - const u32 *iprop = prop->value; - - for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++) - qe_firmware_info.vtraps[i] = iprop[i]; - } + of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps, + ARRAY_SIZE(qe_firmware_info.vtraps)); of_node_put(fw); @@ -627,17 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info); unsigned int qe_get_num_of_risc(void) { struct device_node *qe; - int size; unsigned int num_of_risc = 0; - const u32 *prop; qe = qe_get_device_node(); if (!qe) return num_of_risc; - prop = of_get_property(qe, "fsl,qe-num-riscs", &size); - if (prop && size == sizeof(*prop)) - num_of_risc = *prop; + of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc); of_node_put(qe); diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 83e85e61669f..a81a1a79f1ca 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -32,7 +32,7 @@ static phys_addr_t muram_pbase; struct muram_block { struct list_head head; - unsigned long start; + s32 start; int size; }; @@ -110,34 +110,30 @@ out_muram: * @algo: algorithm for alloc. * @data: data for genalloc's algorithm. * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. */ -static unsigned long cpm_muram_alloc_common(unsigned long size, - genpool_algo_t algo, void *data) +static s32 cpm_muram_alloc_common(unsigned long size, + genpool_algo_t algo, void *data) { struct muram_block *entry; - unsigned long start; - - if (!muram_pool && cpm_muram_init()) - goto out2; + s32 start; + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; start = gen_pool_alloc_algo(muram_pool, size, algo, data); - if (!start) - goto out2; + if (!start) { + kfree(entry); + return -ENOMEM; + } start = start - GENPOOL_OFFSET; memset_io(cpm_muram_addr(start), 0, size); - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - goto out1; entry->start = start; entry->size = size; list_add(&entry->head, &muram_block_list); return start; -out1: - gen_pool_free(muram_pool, start, size); -out2: - return (unsigned long)-ENOMEM; } /* @@ -145,13 +141,14 @@ out2: * @size: number of bytes to allocate * @align: requested alignment, in bytes * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) +s32 cpm_muram_alloc(unsigned long size, unsigned long align) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_align muram_pool_data; @@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc); * cpm_muram_free - free a chunk of multi-user ram * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). */ -int cpm_muram_free(unsigned long offset) +void cpm_muram_free(s32 offset) { unsigned long flags; int size; struct muram_block *tmp; + if (offset < 0) + return; + size = 0; spin_lock_irqsave(&cpm_muram_lock, flags); list_for_each_entry(tmp, &muram_block_list, head) { @@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset) } gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size); spin_unlock_irqrestore(&cpm_muram_lock, flags); - return size; } EXPORT_SYMBOL(cpm_muram_free); @@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free); * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram * @offset: offset of allocation start address * @size: number of bytes to allocate - * This function returns an offset into the muram area + * This function returns @offset if the area was available, a negative + * errno otherwise. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) +s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_fixed muram_pool_data_fixed; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 9bac546998d3..0dd5bdb04a14 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> +#include <linux/irq.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> @@ -24,9 +25,57 @@ #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/io.h> -#include <soc/fsl/qe/qe_ic.h> +#include <soc/fsl/qe/qe.h> + +#define NR_QE_IC_INTS 64 + +/* QE IC registers offset */ +#define QEIC_CICR 0x00 +#define QEIC_CIVEC 0x04 +#define QEIC_CIPXCC 0x10 +#define QEIC_CIPYCC 0x14 +#define QEIC_CIPWCC 0x18 +#define QEIC_CIPZCC 0x1c +#define QEIC_CIMR 0x20 +#define QEIC_CRIMR 0x24 +#define QEIC_CIPRTA 0x30 +#define QEIC_CIPRTB 0x34 +#define QEIC_CHIVEC 0x60 + +struct qe_ic { + /* Control registers offset */ + u32 __iomem *regs; + + /* The remapper for this QEIC */ + struct irq_domain *irqhost; + + /* The "linux" controller struct */ + struct irq_chip hc_irq; + + /* VIRQ numbers of QE high/low irqs */ + unsigned int virq_high; + unsigned int virq_low; +}; -#include "qe_ic.h" +/* + * QE interrupt controller internal structure + */ +struct qe_ic_info { + /* Location of this source at the QIMR register */ + u32 mask; + + /* Mask register offset */ + u32 mask_reg; + + /* + * For grouped interrupts sources - the interrupt code as + * appears at the group priority register + */ + u8 pri_code; + + /* Group priority register offset */ + u32 pri_reg; +}; static DEFINE_RAW_SPINLOCK(qe_ic_lock); @@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = { }, }; -static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) +static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) { - return in_be32(base + (reg >> 2)); + return qe_ioread32be(base + (reg >> 2)); } -static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, +static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, u32 value) { - out_be32(base + (reg >> 2), value); + qe_iowrite32be(value, base + (reg >> 2)); } static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) @@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = { .xlate = irq_domain_xlate_onetwocell, }; -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { int irq; @@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) { int irq; @@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -void __init qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(struct irq_desc *desc), - void (*high_handler)(struct irq_desc *desc)) +static void qe_ic_cascade_low(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_high(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) { + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq; + struct irq_chip *chip = irq_desc_get_chip(desc); + + cascade_irq = qe_ic_get_high_irq(qe_ic); + if (cascade_irq == 0) + cascade_irq = qe_ic_get_low_irq(qe_ic); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static void __init qe_ic_init(struct device_node *node) +{ + void (*low_handler)(struct irq_desc *desc); + void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; struct resource res; - u32 temp = 0, ret, high_active = 0; + u32 ret; ret = of_address_to_resource(node, 0, &res); if (ret) @@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, qe_ic->virq_high = irq_of_parse_and_map(node, 0); qe_ic->virq_low = irq_of_parse_and_map(node, 1); - if (qe_ic->virq_low == NO_IRQ) { + if (!qe_ic->virq_low) { printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); kfree(qe_ic); return; } - - /* default priority scheme is grouped. If spread mode is */ - /* required, configure cicr accordingly. */ - if (flags & QE_IC_SPREADMODE_GRP_W) - temp |= CICR_GWCC; - if (flags & QE_IC_SPREADMODE_GRP_X) - temp |= CICR_GXCC; - if (flags & QE_IC_SPREADMODE_GRP_Y) - temp |= CICR_GYCC; - if (flags & QE_IC_SPREADMODE_GRP_Z) - temp |= CICR_GZCC; - if (flags & QE_IC_SPREADMODE_GRP_RISCA) - temp |= CICR_GRTA; - if (flags & QE_IC_SPREADMODE_GRP_RISCB) - temp |= CICR_GRTB; - - /* choose destination signal for highest priority interrupt */ - if (flags & QE_IC_HIGH_SIGNAL) { - temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); - high_active = 1; + if (qe_ic->virq_high != qe_ic->virq_low) { + low_handler = qe_ic_cascade_low; + high_handler = qe_ic_cascade_high; + } else { + low_handler = qe_ic_cascade_muxed_mpic; + high_handler = NULL; } - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high != NO_IRQ && - qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } } -void qe_ic_set_highest_priority(unsigned int virq, int high) +static int __init qe_ic_of_init(void) { - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp = 0; - - temp = qe_ic_read(qe_ic->regs, QEIC_CICR); - - temp &= ~CICR_HP_MASK; - temp |= src << CICR_HP_SHIFT; - - temp &= ~CICR_HPIT_MASK; - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; - - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); -} - -/* Set Priority level within its group, from 1 to 8 */ -int qe_ic_set_priority(unsigned int virq, unsigned int priority) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp; + struct device_node *np; - if (priority > 8 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - if (qe_ic_info[src].pri_reg == 0) - return -EINVAL; - - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); - - if (priority < 4) { - temp &= ~(0x7 << (32 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (32 - priority * 3); - } else { - temp &= ~(0x7 << (24 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (24 - priority * 3); + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return -ENODEV; } - - qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); - + qe_ic_init(np); + of_node_put(np); return 0; } - -/* Set a QE priority to use high irq, only priority 1~2 can use high irq */ -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp, control_reg = QEIC_CICNR, shift = 0; - - if (priority > 2 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - - switch (qe_ic_info[src].pri_reg) { - case QEIC_CIPZCC: - shift = CICNR_ZCC1T_SHIFT; - break; - case QEIC_CIPWCC: - shift = CICNR_WCC1T_SHIFT; - break; - case QEIC_CIPYCC: - shift = CICNR_YCC1T_SHIFT; - break; - case QEIC_CIPXCC: - shift = CICNR_XCC1T_SHIFT; - break; - case QEIC_CIPRTA: - shift = CRICR_RTA1T_SHIFT; - control_reg = QEIC_CRICR; - break; - case QEIC_CIPRTB: - shift = CRICR_RTB1T_SHIFT; - control_reg = QEIC_CRICR; - break; - default: - return -EINVAL; - } - - shift += (2 - priority) * 2; - temp = qe_ic_read(qe_ic->regs, control_reg); - temp &= ~(SIGNAL_MASK << shift); - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; - qe_ic_write(qe_ic->regs, control_reg, temp); - - return 0; -} - -static struct bus_type qe_ic_subsys = { - .name = "qe_ic", - .dev_name = "qe_ic", -}; - -static struct device device_qe_ic = { - .id = 0, - .bus = &qe_ic_subsys, -}; - -static int __init init_qe_ic_sysfs(void) -{ - int rc; - - printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); - - rc = subsys_system_register(&qe_ic_subsys, NULL); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys class\n"); - return -ENODEV; - } - rc = device_register(&device_qe_ic); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys device\n"); - return -ENODEV; - } - return 0; -} - -subsys_initcall(init_qe_ic_sysfs); +subsys_initcall(qe_ic_of_init); diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h deleted file mode 100644 index 08c695672a03..000000000000 --- a/drivers/soc/fsl/qe/qe_ic.h +++ /dev/null @@ -1,99 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * drivers/soc/fsl/qe/qe_ic.h - * - * QUICC ENGINE Interrupt Controller Header - * - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Li Yang <leoli@freescale.com> - * Based on code from Shlomi Gridish <gridish@freescale.com> - */ -#ifndef _POWERPC_SYSDEV_QE_IC_H -#define _POWERPC_SYSDEV_QE_IC_H - -#include <soc/fsl/qe/qe_ic.h> - -#define NR_QE_IC_INTS 64 - -/* QE IC registers offset */ -#define QEIC_CICR 0x00 -#define QEIC_CIVEC 0x04 -#define QEIC_CRIPNR 0x08 -#define QEIC_CIPNR 0x0c -#define QEIC_CIPXCC 0x10 -#define QEIC_CIPYCC 0x14 -#define QEIC_CIPWCC 0x18 -#define QEIC_CIPZCC 0x1c -#define QEIC_CIMR 0x20 -#define QEIC_CRIMR 0x24 -#define QEIC_CICNR 0x28 -#define QEIC_CIPRTA 0x30 -#define QEIC_CIPRTB 0x34 -#define QEIC_CRICR 0x3c -#define QEIC_CHIVEC 0x60 - -/* Interrupt priority registers */ -#define CIPCC_SHIFT_PRI0 29 -#define CIPCC_SHIFT_PRI1 26 -#define CIPCC_SHIFT_PRI2 23 -#define CIPCC_SHIFT_PRI3 20 -#define CIPCC_SHIFT_PRI4 13 -#define CIPCC_SHIFT_PRI5 10 -#define CIPCC_SHIFT_PRI6 7 -#define CIPCC_SHIFT_PRI7 4 - -/* CICR priority modes */ -#define CICR_GWCC 0x00040000 -#define CICR_GXCC 0x00020000 -#define CICR_GYCC 0x00010000 -#define CICR_GZCC 0x00080000 -#define CICR_GRTA 0x00200000 -#define CICR_GRTB 0x00400000 -#define CICR_HPIT_SHIFT 8 -#define CICR_HPIT_MASK 0x00000300 -#define CICR_HP_SHIFT 24 -#define CICR_HP_MASK 0x3f000000 - -/* CICNR */ -#define CICNR_WCC1T_SHIFT 20 -#define CICNR_ZCC1T_SHIFT 28 -#define CICNR_YCC1T_SHIFT 12 -#define CICNR_XCC1T_SHIFT 4 - -/* CRICR */ -#define CRICR_RTA1T_SHIFT 20 -#define CRICR_RTB1T_SHIFT 28 - -/* Signal indicator */ -#define SIGNAL_MASK 3 -#define SIGNAL_HIGH 2 -#define SIGNAL_LOW 0 - -struct qe_ic { - /* Control registers offset */ - volatile u32 __iomem *regs; - - /* The remapper for this QEIC */ - struct irq_domain *irqhost; - - /* The "linux" controller struct */ - struct irq_chip hc_irq; - - /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; -}; - -/* - * QE interrupt controller internal structure - */ -struct qe_ic_info { - u32 mask; /* location of this source at the QIMR register. */ - u32 mask_reg; /* Mask register offset */ - u8 pri_code; /* for grouped interrupts sources - the interrupt - code as appears at the group priority register */ - u32 pri_reg; /* Group priority register offset */ -}; - -#endif /* _POWERPC_SYSDEV_QE_IC_H */ diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index 3657e296a8a2..11ea08e97db7 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -18,8 +18,6 @@ #include <asm/io.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <sysdev/fsl_soc.h> #undef DEBUG @@ -30,7 +28,7 @@ int par_io_init(struct device_node *np) { struct resource res; int ret; - const u32 *num_ports; + u32 num_ports; /* Map Parallel I/O ports registers */ ret = of_address_to_resource(np, 0, &res); @@ -38,9 +36,8 @@ int par_io_init(struct device_node *np) return ret; par_io = ioremap(res.start, resource_size(&res)); - num_ports = of_get_property(np, "num-ports", NULL); - if (num_ports) - num_par_io_ports = *num_ports; + if (!of_property_read_u32(np, "num-ports", &num_ports)) + num_par_io_ports = num_ports; return 0; } @@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); /* Set open drain, if required */ - tmp_val = in_be32(&par_io->cpodr); + tmp_val = qe_ioread32be(&par_io->cpodr); if (open_drain) - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); + qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); else - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); + qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); /* define direction */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cpdir2) : - in_be32(&par_io->cpdir1); + qe_ioread32be(&par_io->cpdir2) : + qe_ioread32be(&par_io->cpdir1); /* get all bits mask for 2 bit per port */ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - @@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cpdir2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); } else { - out_be32(&par_io->cpdir1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); } /* define pin assignment */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cppar2) : - in_be32(&par_io->cppar1); + qe_ioread32be(&par_io->cppar2) : + qe_ioread32be(&par_io->cppar1); new_mask2bits = (u32) (assignment << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cppar2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); } else { - out_be32(&par_io->cppar1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); } } EXPORT_SYMBOL(__par_io_config_pin); @@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val) /* calculate pin location */ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); - tmp_val = in_be32(&par_io[port].cpdata); + tmp_val = qe_ioread32be(&par_io[port].cpdata); if (val == 0) /* clear */ - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); + qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); else /* set */ - out_be32(&par_io[port].cpdata, pin_mask | tmp_val); + qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); return 0; } @@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set); int par_io_of_config(struct device_node *np) { struct device_node *pio; - const phandle *ph; int pio_map_len; - const unsigned int *pio_map; + const __be32 *pio_map; if (par_io == NULL) { printk(KERN_ERR "par_io not initialized\n"); return -1; } - ph = of_get_property(np, "pio-handle", NULL); - if (ph == NULL) { + pio = of_parse_phandle(np, "pio-handle", 0); + if (pio == NULL) { printk(KERN_ERR "pio-handle not available\n"); return -1; } - pio = of_find_node_by_phandle(*ph); - pio_map = of_get_property(pio, "pio-map", &pio_map_len); if (pio_map == NULL) { printk(KERN_ERR "pio-map is not set!\n"); @@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np) } while (pio_map_len > 0) { - par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], - (int) pio_map[2], (int) pio_map[3], - (int) pio_map[4], (int) pio_map[5]); + u8 port = be32_to_cpu(pio_map[0]); + u8 pin = be32_to_cpu(pio_map[1]); + int dir = be32_to_cpu(pio_map[2]); + int open_drain = be32_to_cpu(pio_map[3]); + int assignment = be32_to_cpu(pio_map[4]); + int has_irq = be32_to_cpu(pio_map[5]); + + par_io_config_pin(port, pin, dir, open_drain, + assignment, has_irq); pio_map += 6; pio_map_len -= 6; } diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index e37ebc3be661..7d7d78d3ee50 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info) &siram[siram_entry_id * 32 + 0x200 + i]); } - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], - SIR_LAST); - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], - SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], + SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], + SIR_LAST); /* Set SIxMR register */ sixmr = SIMR_SAD(siram_entry_id); diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index 024d239ac1e1..90157acc5ba6 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -15,7 +15,6 @@ #include <linux/spinlock.h> #include <linux/export.h> -#include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> @@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) return -EINVAL; spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, - ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); + qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, + ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; @@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) return -EINVAL; } - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, - UCC_GUEMR_SET_RESERVED3 | speed); + qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, + UCC_GUEMR_SET_RESERVED3 | speed); return 0; } @@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); if (set) - setbits32(cmxucr, mask << shift); + qe_setbits_be32(cmxucr, mask << shift); else - clrbits32(cmxucr, mask << shift); + qe_clrbits_be32(cmxucr, mask << shift); return 0; } @@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, if (mode == COMM_DIR_RX) shift += 4; - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock, cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l : &qe_mux_reg->cmxsi1cr_h; - qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock, shift = ucc_get_tdm_sync_shift(mode, tdm_num); - qe_clrsetbits32(&qe_mux_reg->cmxsi1syr, - QE_CMXUCR_TX_CLK_SRC_MASK << shift, - source << shift); + qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr, + QE_CMXUCR_TX_CLK_SRC_MASK << shift, + source << shift); return 0; } diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c index af4d80e38521..ad6193ea4597 100644 --- a/drivers/soc/fsl/qe/ucc_fast.c +++ b/drivers/soc/fsl/qe/ucc_fast.c @@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf) printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); + &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr)); printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); + &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr)); printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); + &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr)); printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); + &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr)); printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); + &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce)); printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); + &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm)); printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); + &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs)); printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); + &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb)); printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); + &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs)); printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); + &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet)); printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); + &uccf->uf_regs->urfset, + qe_ioread16be(&uccf->uf_regs->urfset)); printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); + &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb)); printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); + &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs)); printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); + &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet)); printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); + &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt)); printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); + &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt)); printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); + &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry)); printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); + &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr)); } EXPORT_SYMBOL(ucc_fast_dump_regs); @@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) { - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); + qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); } EXPORT_SYMBOL(ucc_fast_transmit_on_demand); @@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Enable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr |= UCC_FAST_GUMR_ENT; uccf->enabled_tx = 1; @@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr |= UCC_FAST_GUMR_ENR; uccf->enabled_rx = 1; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_enable); @@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Disable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr &= ~UCC_FAST_GUMR_ENT; uccf->enabled_tx = 0; @@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr &= ~UCC_FAST_GUMR_ENR; uccf->enabled_rx = 0; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_disable); @@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc __func__); return -ENOMEM; } + uccf->ucc_fast_tx_virtual_fifo_base_offset = -1; + uccf->ucc_fast_rx_virtual_fifo_base_offset = -1; /* Fill fast UCC structure */ uccf->uf_info = uf_info; @@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc gumr |= uf_info->tenc; gumr |= uf_info->tcrc; gumr |= uf_info->mode; - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); /* Allocate memory for Tx Virtual Fifo */ uccf->ucc_fast_tx_virtual_fifo_base_offset = qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", __func__); - uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } @@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc qe_muram_alloc(uf_info->urfs + UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", __func__); - uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } /* Set Virtual Fifo registers */ - out_be16(&uf_regs->urfs, uf_info->urfs); - out_be16(&uf_regs->urfet, uf_info->urfet); - out_be16(&uf_regs->urfset, uf_info->urfset); - out_be16(&uf_regs->utfs, uf_info->utfs); - out_be16(&uf_regs->utfet, uf_info->utfet); - out_be16(&uf_regs->utftt, uf_info->utftt); + qe_iowrite16be(uf_info->urfs, &uf_regs->urfs); + qe_iowrite16be(uf_info->urfet, &uf_regs->urfet); + qe_iowrite16be(uf_info->urfset, &uf_regs->urfset); + qe_iowrite16be(uf_info->utfs, &uf_regs->utfs); + qe_iowrite16be(uf_info->utfet, &uf_regs->utfet); + qe_iowrite16be(uf_info->utftt, &uf_regs->utftt); /* utfb, urfb are offsets from MURAM base */ - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, + &uf_regs->utfb); + qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, + &uf_regs->urfb); /* Mux clocking */ /* Grant Support */ @@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be32(&uf_regs->uccm, uf_info->uccm_mask); + qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be32(&uf_regs->ucce, 0xffffffff); + qe_iowrite32be(0xffffffff, &uf_regs->ucce); *uccf_ret = uccf; return 0; @@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf) if (!uccf) return; - if (uccf->ucc_fast_tx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); - - if (uccf->ucc_fast_rx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); if (uccf->uf_regs) iounmap(uccf->uf_regs); diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c index 34f0ec3a63b5..274d34449846 100644 --- a/drivers/soc/fsl/qe/ucc_slow.c +++ b/drivers/soc/fsl/qe/ucc_slow.c @@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Enable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l |= UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 1; @@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l |= UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 1; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_enable); @@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Disable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l &= ~UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 0; @@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l &= ~UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 0; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_disable); @@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc __func__); return -ENOMEM; } + uccs->rx_base_offset = -1; + uccs->tx_base_offset = -1; + uccs->us_pram_offset = -1; /* Fill slow UCC structure */ uccs->us_info = us_info; @@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc /* Get PRAM base */ uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); - if (IS_ERR_VALUE(uccs->us_pram_offset)) { + if (uccs->us_pram_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); ucc_slow_free(uccs); return -ENOMEM; @@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc return ret; } - out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); + qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); INIT_LIST_HEAD(&uccs->confQ); @@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->rx_base_offset = qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->rx_base_offset)) { + if (uccs->rx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, us_info->rx_bd_ring_len); - uccs->rx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->tx_base_offset = qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->tx_base_offset)) { + if (uccs->tx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); - uccs->tx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); /* set bd status and length */ - out_be32((u32 *) bd, 0); + qe_iowrite32be(0, (u32 *)bd); bd++; } /* for last BD set Wrap bit */ - out_be32(&bd->buf, 0); - out_be32((u32 *) bd, cpu_to_be32(T_W)); + qe_iowrite32be(0, &bd->buf); + qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ - out_be32((u32*)bd, 0); + qe_iowrite32be(0, (u32 *)bd); /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); bd++; } /* for last BD set Wrap bit */ - out_be32((u32*)bd, cpu_to_be32(R_W)); - out_be32(&bd->buf, 0); + qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd); + qe_iowrite32be(0, &bd->buf); /* Set GUMR (For more details see the hardware spec.). */ /* gumr_h */ @@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_H_TXSY; if (us_info->rtsm) gumr |= UCC_SLOW_GUMR_H_RTSM; - out_be32(&us_regs->gumr_h, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_h); /* gumr_l */ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | @@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_L_TINV; if (us_info->tend) gumr |= UCC_SLOW_GUMR_L_TEND; - out_be32(&us_regs->gumr_l, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_l); /* Function code registers */ @@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->us_pram->rbmr = UCC_BMR_BO_BE; /* rbase, tbase are offsets from MURAM base */ - out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); - out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); + qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); + qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); /* Mux clocking */ /* Grant Support */ @@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be16(&us_regs->uccm, us_info->uccm_mask); + qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be16(&us_regs->ucce, 0xffff); + qe_iowrite16be(0xffff, &us_regs->ucce); /* Issue QE Init command */ if (us_info->init_tx && us_info->init_rx) @@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs) if (!uccs) return; - if (uccs->rx_base_offset) - qe_muram_free(uccs->rx_base_offset); - - if (uccs->tx_base_offset) - qe_muram_free(uccs->tx_base_offset); - - if (uccs->us_pram) - qe_muram_free(uccs->us_pram_offset); + qe_muram_free(uccs->rx_base_offset); + qe_muram_free(uccs->tx_base_offset); + qe_muram_free(uccs->us_pram_offset); if (uccs->us_regs) iounmap(uccs->us_regs); diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c index 32d8269fa692..890f236ea697 100644 --- a/drivers/soc/fsl/qe/usb.c +++ b/drivers/soc/fsl/qe/usb.c @@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate) spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); + qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); spin_unlock_irqrestore(&cmxgcr_lock, flags); diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 8aaebf13e2e6..0281ef9a1800 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -10,7 +10,7 @@ config IMX_GPCV2_PM_DOMAINS config IMX_SCU_SOC bool "i.MX System Controller Unit SoC info support" - depends on IMX_SCU + depends on IMX_SCU || COMPILE_TEST select SOC_BUS help If you say yes here you get support for the NXP i.MX System diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c index d84ed736cdb0..719e1f189ebf 100644 --- a/drivers/soc/imx/soc-imx8.c +++ b/drivers/soc/imx/soc-imx8.c @@ -142,10 +142,16 @@ static const struct imx8_soc_data imx8mn_soc_data = { .soc_revision = imx8mm_soc_revision, }; +static const struct imx8_soc_data imx8mp_soc_data = { + .name = "i.MX8MP", + .soc_revision = imx8mm_soc_revision, +}; + static const struct of_device_id imx8_soc_match[] = { { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, }, { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, }, + { .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, }, { } }; @@ -204,6 +210,9 @@ static int __init imx8_soc_init(void) goto free_serial_number; } + pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id, + soc_dev_attr->revision); + if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 9add0fd5fa6c..de20e6cba83b 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -12,8 +12,6 @@ #define CMDQ_WRITE_ENABLE_MASK BIT(0) #define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) -#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ - << 32 | CMDQ_EOC_IRQ_EN) struct cmdq_instruction { union { diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 79d826553ac8..d0a73e76d563 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -45,13 +45,13 @@ config QCOM_GLINK_SSR neighboring subsystems going up or down. config QCOM_GSBI - tristate "QCOM General Serial Bus Interface" - depends on ARCH_QCOM || COMPILE_TEST - select MFD_SYSCON - help - Say y here to enable GSBI support. The GSBI provides control - functions for connecting the underlying serial UART, SPI, and I2C - devices to the output pins. + tristate "QCOM General Serial Bus Interface" + depends on ARCH_QCOM || COMPILE_TEST + select MFD_SYSCON + help + Say y here to enable GSBI support. The GSBI provides control + functions for connecting the underlying serial UART, SPI, and I2C + devices to the output pins. config QCOM_LLCC tristate "Qualcomm Technologies, Inc. LLCC driver" @@ -71,10 +71,10 @@ config QCOM_OCMEM depends on ARCH_QCOM select QCOM_SCM help - The On Chip Memory (OCMEM) allocator allows various clients to - allocate memory from OCMEM based on performance, latency and power - requirements. This is typically used by the GPU, camera/video, and - audio components on some Snapdragon SoCs. + The On Chip Memory (OCMEM) allocator allows various clients to + allocate memory from OCMEM based on performance, latency and power + requirements. This is typically used by the GPU, camera/video, and + audio components on some Snapdragon SoCs. config QCOM_PM bool "Qualcomm Power Management" @@ -198,8 +198,8 @@ config QCOM_APR depends on ARCH_QCOM || COMPILE_TEST depends on RPMSG help - Enable APR IPC protocol support between - application processor and QDSP6. APR is - used by audio driver to configure QDSP6 - ASM, ADM and AFE modules. + Enable APR IPC protocol support between + application processor and QDSP6. APR is + used by audio driver to configure QDSP6 + ASM, ADM and AFE modules. endmenu diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c index f9e309f0acd3..1a03eaa38c46 100644 --- a/drivers/soc/qcom/qmi_interface.c +++ b/drivers/soc/qcom/qmi_interface.c @@ -655,8 +655,12 @@ int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size, qmi->sock = qmi_sock_create(qmi, &qmi->sq); if (IS_ERR(qmi->sock)) { - pr_err("failed to create QMI socket\n"); - ret = PTR_ERR(qmi->sock); + if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) { + ret = -EPROBE_DEFER; + } else { + pr_err("failed to create QMI socket\n"); + ret = PTR_ERR(qmi->sock); + } goto err_destroy_wq; } diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 5741ec3fa814..4d264d0672c4 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = { static struct rpmhpd sdm845_mx_ao = { .pd = { .name = "mx_ao", }, + .active_only = true, .peer = &sdm845_mx, .res_name = "mx.lvl", }; @@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = { static struct rpmhpd sdm845_cx_ao = { .pd = { .name = "cx_ao", }, + .active_only = true, .peer = &sdm845_cx, .parent = &sdm845_mx_ao.pd, .res_name = "cx.lvl", @@ -129,8 +131,62 @@ static const struct rpmhpd_desc sdm845_desc = { .num_pds = ARRAY_SIZE(sdm845_rpmhpds), }; +/* SM8150 RPMH powerdomains */ + +static struct rpmhpd sm8150_mmcx_ao; +static struct rpmhpd sm8150_mmcx = { + .pd = { .name = "mmcx", }, + .peer = &sm8150_mmcx_ao, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd sm8150_mmcx_ao = { + .pd = { .name = "mmcx_ao", }, + .active_only = true, + .peer = &sm8150_mmcx, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd *sm8150_rpmhpds[] = { + [SM8150_MSS] = &sdm845_mss, + [SM8150_EBI] = &sdm845_ebi, + [SM8150_LMX] = &sdm845_lmx, + [SM8150_LCX] = &sdm845_lcx, + [SM8150_GFX] = &sdm845_gfx, + [SM8150_MX] = &sdm845_mx, + [SM8150_MX_AO] = &sdm845_mx_ao, + [SM8150_CX] = &sdm845_cx, + [SM8150_CX_AO] = &sdm845_cx_ao, + [SM8150_MMCX] = &sm8150_mmcx, + [SM8150_MMCX_AO] = &sm8150_mmcx_ao, +}; + +static const struct rpmhpd_desc sm8150_desc = { + .rpmhpds = sm8150_rpmhpds, + .num_pds = ARRAY_SIZE(sm8150_rpmhpds), +}; + +/* SC7180 RPMH powerdomains */ +static struct rpmhpd *sc7180_rpmhpds[] = { + [SC7180_CX] = &sdm845_cx, + [SC7180_CX_AO] = &sdm845_cx_ao, + [SC7180_GFX] = &sdm845_gfx, + [SC7180_MX] = &sdm845_mx, + [SC7180_MX_AO] = &sdm845_mx_ao, + [SC7180_LMX] = &sdm845_lmx, + [SC7180_LCX] = &sdm845_lcx, + [SC7180_MSS] = &sdm845_mss, +}; + +static const struct rpmhpd_desc sc7180_desc = { + .rpmhpds = sc7180_rpmhpds, + .num_pds = ARRAY_SIZE(sc7180_rpmhpds), +}; + static const struct of_device_id rpmhpd_match_table[] = { + { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, + { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, { } }; diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index f93492b72c04..ba2b8b51d2d9 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -192,21 +192,25 @@ config ARCH_R8A774C0 help This enables support for the Renesas RZ/G2E SoC. +config ARCH_R8A77950 + bool + +config ARCH_R8A77951 + bool + config ARCH_R8A7795 bool "Renesas R-Car H3 SoC Platform" + select ARCH_R8A77950 + select ARCH_R8A77951 select ARCH_RCAR_GEN3 select SYSC_R8A7795 help This enables support for the Renesas R-Car H3 SoC. config ARCH_R8A77960 - bool + bool "Renesas R-Car M3-W SoC Platform" select ARCH_RCAR_GEN3 select SYSC_R8A77960 - -config ARCH_R8A7796 - bool "Renesas R-Car M3-W SoC Platform" - select ARCH_R8A77960 help This enables support for the Renesas R-Car M3-W SoC. diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c index 14d05a070dd3..2af2e0dd83fe 100644 --- a/drivers/soc/renesas/rcar-rst.c +++ b/drivers/soc/renesas/rcar-rst.c @@ -21,7 +21,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base) struct rst_config { unsigned int modemr; /* Mode Monitoring Register Offset */ - int (*configure)(void *base); /* Platform specific configuration */ + int (*configure)(void __iomem *base); /* Platform specific config */ }; static const struct rst_config rcar_rst_gen1 __initconst = { diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 27fc59bbb520..c7a2003687c7 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# SAMSUNG SoC drivers +# Samsung SoC drivers # menuconfig SOC_SAMSUNG bool "Samsung SoC driver support" if COMPILE_TEST diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index b89c26a71c6e..2dad4961a80b 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -3,7 +3,7 @@ * Copyright (c) 2019 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * - * EXYNOS - CHIP ID support + * Exynos - CHIP ID support * Author: Pankaj Dubey <pankaj.dubey@samsung.com> * Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> */ diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index d34ca201b8b7..17304fa18429 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -3,7 +3,7 @@ // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS - CPU PMU(Power Management Unit) support +// Exynos - CPU PMU(Power Management Unit) support #include <linux/of.h> #include <linux/of_address.h> @@ -110,10 +110,8 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap); static int exynos_pmu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pmu_base_addr = devm_ioremap_resource(dev, res); + pmu_base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pmu_base_addr)) return PTR_ERR(pmu_base_addr); diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h index 977e4daf5a0f..5e851f32307e 100644 --- a/drivers/soc/samsung/exynos-pmu.h +++ b/drivers/soc/samsung/exynos-pmu.h @@ -3,7 +3,7 @@ * Copyright (c) 2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Header for EXYNOS PMU Driver support + * Header for Exynos PMU Driver support */ #ifndef __EXYNOS_PMU_H diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c index 275d348ed9c9..30f230ed1769 100644 --- a/drivers/soc/samsung/exynos3250-pmu.c +++ b/drivers/soc/samsung/exynos3250-pmu.c @@ -3,7 +3,7 @@ // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS3250 - CPU PMU (Power Management Unit) support +// Exynos3250 - CPU PMU (Power Management Unit) support #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c index a7cdbf1aac0c..cb35103565a6 100644 --- a/drivers/soc/samsung/exynos4-pmu.c +++ b/drivers/soc/samsung/exynos4-pmu.c @@ -3,7 +3,7 @@ // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS4 - CPU PMU(Power Management Unit) support +// Exynos4 - CPU PMU(Power Management Unit) support #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c index 19b38e008145..7a2d50be6b4a 100644 --- a/drivers/soc/samsung/exynos5250-pmu.c +++ b/drivers/soc/samsung/exynos5250-pmu.c @@ -3,7 +3,7 @@ // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS5250 - CPU PMU (Power Management Unit) support +// Exynos5250 - CPU PMU (Power Management Unit) support #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c index b236d3b47b49..6fedcd78cb45 100644 --- a/drivers/soc/samsung/exynos5420-pmu.c +++ b/drivers/soc/samsung/exynos5420-pmu.c @@ -3,7 +3,7 @@ // Copyright (c) 2011-2015 Samsung Electronics Co., Ltd. // http://www.samsung.com/ // -// EXYNOS5420 - CPU PMU (Power Management Unit) support +// Exynos5420 - CPU PMU (Power Management Unit) support #include <linux/pm.h> #include <linux/soc/samsung/exynos-regs-pmu.h> diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 84bd615c4a92..3693532949b8 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -126,6 +126,7 @@ config SOC_TEGRA_FUSE def_bool y depends on ARCH_TEGRA select SOC_BUS + select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC config SOC_TEGRA_FLOWCTRL bool diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 606abbe55bba..802717b9f6a3 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -49,6 +49,9 @@ static struct tegra_fuse *fuse = &(struct tegra_fuse) { }; static const struct of_device_id tegra_fuse_match[] = { +#ifdef CONFIG_ARCH_TEGRA_194_SOC + { .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc }, +#endif #ifdef CONFIG_ARCH_TEGRA_186_SOC { .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc }, #endif diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index b8daaf5b7291..f68f4e1c215d 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -320,3 +320,32 @@ const struct tegra_fuse_soc tegra186_fuse_soc = { .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups), }; #endif + +#if defined(CONFIG_ARCH_TEGRA_194_SOC) +static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = { + { + .nvmem_name = "fuse", + .cell_name = "xusb-pad-calibration", + .dev_id = "3520000.padctl", + .con_id = "calibration", + }, { + .nvmem_name = "fuse", + .cell_name = "xusb-pad-calibration-ext", + .dev_id = "3520000.padctl", + .con_id = "calibration-ext", + }, +}; + +static const struct tegra_fuse_info tegra194_fuse_info = { + .read = tegra30_fuse_read, + .size = 0x300, + .spare = 0x280, +}; + +const struct tegra_fuse_soc tegra194_fuse_soc = { + .init = tegra30_fuse_init, + .info = &tegra194_fuse_info, + .lookups = tegra194_fuse_lookups, + .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups), +}; +#endif diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index 0f74c2c34af0..94a059e577a1 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -108,4 +108,8 @@ extern const struct tegra_fuse_soc tegra210_fuse_soc; extern const struct tegra_fuse_soc tegra186_fuse_soc; #endif +#ifdef CONFIG_ARCH_TEGRA_194_SOC +extern const struct tegra_fuse_soc tegra194_fuse_soc; +#endif + #endif diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index a2fd6ccd48f9..089d9340564b 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -21,18 +21,15 @@ #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \ (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT) -static void __iomem *apbmisc_base; -static void __iomem *strapping_base; static bool long_ram_code; +static u32 strapping; +static u32 chipid; u32 tegra_read_chipid(void) { - if (!apbmisc_base) { - WARN(1, "Tegra Chip ID not yet available\n"); - return 0; - } + WARN(!chipid, "Tegra ABP MISC not yet available\n"); - return readl_relaxed(apbmisc_base + 4); + return chipid; } u8 tegra_get_chip_id(void) @@ -42,10 +39,9 @@ u8 tegra_get_chip_id(void) u32 tegra_read_straps(void) { - if (strapping_base) - return readl_relaxed(strapping_base); - else - return 0; + WARN(!chipid, "Tegra ABP MISC not yet available\n"); + + return strapping; } u32 tegra_read_ram_code(void) @@ -63,6 +59,7 @@ u32 tegra_read_ram_code(void) static const struct of_device_id apbmisc_match[] __initconst = { { .compatible = "nvidia,tegra20-apbmisc", }, { .compatible = "nvidia,tegra186-misc", }, + { .compatible = "nvidia,tegra194-misc", }, {}, }; @@ -103,6 +100,7 @@ void __init tegra_init_revision(void) void __init tegra_init_apbmisc(void) { + void __iomem *apbmisc_base, *strapping_base; struct resource apbmisc, straps; struct device_node *np; @@ -123,7 +121,7 @@ void __init tegra_init_apbmisc(void) apbmisc.flags = IORESOURCE_MEM; /* strapping options */ - if (tegra_get_chip_id() == TEGRA124) { + if (of_machine_is_compatible("nvidia,tegra124")) { straps.start = 0x7000e864; straps.end = 0x7000e867; } else { @@ -160,12 +158,20 @@ void __init tegra_init_apbmisc(void) } apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); - if (!apbmisc_base) + if (!apbmisc_base) { pr_err("failed to map APBMISC registers\n"); + } else { + chipid = readl_relaxed(apbmisc_base + 4); + iounmap(apbmisc_base); + } strapping_base = ioremap(straps.start, resource_size(&straps)); - if (!strapping_base) + if (!strapping_base) { pr_err("failed to map strapping options registers\n"); + } else { + strapping = readl_relaxed(strapping_base); + iounmap(strapping_base); + } long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); } diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c index ea0eede48802..367a71a3cd10 100644 --- a/drivers/soc/tegra/regulators-tegra20.c +++ b/drivers/soc/tegra/regulators-tegra20.c @@ -162,6 +162,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, core_target_uV = max(rtc_uV - max_spread, core_target_uV); } + if (core_uV == core_target_uV) + goto update_rtc; + err = regulator_set_voltage_rdev(core_rdev, core_target_uV, core_max_uV, @@ -170,7 +173,7 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, return err; core_uV = core_target_uV; - +update_rtc: if (rtc_uV < rtc_min_uV) { rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV); rtc_target_uV = min(core_uV + max_spread, rtc_target_uV); @@ -179,6 +182,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, rtc_target_uV = max(core_uV - max_spread, rtc_target_uV); } + if (rtc_uV == rtc_target_uV) + continue; + err = regulator_set_voltage_rdev(rtc_rdev, rtc_target_uV, rtc_max_uV, diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c index 8e623ff18e70..7f21f31de09d 100644 --- a/drivers/soc/tegra/regulators-tegra30.c +++ b/drivers/soc/tegra/regulators-tegra30.c @@ -209,6 +209,9 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, cpu_target_uV = max(core_uV - max_spread, cpu_target_uV); } + if (cpu_uV == cpu_target_uV) + goto update_core; + err = regulator_set_voltage_rdev(cpu_rdev, cpu_target_uV, cpu_max_uV, @@ -231,6 +234,9 @@ update_core: core_target_uV = max(core_target_uV, core_uV - core_max_step); } + if (core_uV == core_target_uV) + continue; + err = regulator_set_voltage_rdev(core_rdev, core_target_uV, core_max_uV, diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 1ccc9064e1eb..37f3db6c041c 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -25,6 +25,8 @@ static struct knav_device *kdev; static DEFINE_MUTEX(knav_dev_lock); +#define knav_dev_lock_held() \ + lockdep_is_held(&knav_dev_lock) /* Queue manager register indices in DTS */ #define KNAV_QUEUE_PEEK_REG_INDEX 0 @@ -52,8 +54,9 @@ static DEFINE_MUTEX(knav_dev_lock); #define knav_queue_idx_to_inst(kdev, idx) \ (kdev->instances + (idx << kdev->inst_shift)) -#define for_each_handle_rcu(qh, inst) \ - list_for_each_entry_rcu(qh, &inst->handles, list) +#define for_each_handle_rcu(qh, inst) \ + list_for_each_entry_rcu(qh, &inst->handles, list, \ + knav_dev_lock_held()) #define for_each_instance(idx, inst, kdev) \ for (idx = 0, inst = kdev->instances; \ diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 01e76b58dd78..223f1f9d0922 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -21,11 +21,15 @@ config ZYNQMP_POWER bool "Enable Xilinx Zynq MPSoC Power Management driver" depends on PM && ARCH_ZYNQMP default y + select MAILBOX + select ZYNQMP_IPI_MBOX help Say yes to enable power management support for ZyqnMP SoC. This driver uses firmware driver as an interface for power management request to firmware. It registers isr to handle - power management callbacks from firmware. + power management callbacks from firmware. It registers mailbox client + to handle power management callbacks from firmware. + If in doubt, say N. config ZYNQMP_PM_DOMAINS diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 1b9d14411a15..09227895d216 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -2,7 +2,7 @@ /* * Xilinx Zynq MPSoC Power Management * - * Copyright (C) 2014-2018 Xilinx, Inc. + * Copyright (C) 2014-2019 Xilinx, Inc. * * Davorin Mista <davorin.mista@aggios.com> * Jolly Shah <jollys@xilinx.com> @@ -16,6 +16,21 @@ #include <linux/suspend.h> #include <linux/firmware/xlnx-zynqmp.h> +#include <linux/mailbox/zynqmp-ipi-message.h> + +/** + * struct zynqmp_pm_work_struct - Wrapper for struct work_struct + * @callback_work: Work structure + * @args: Callback arguments + */ +struct zynqmp_pm_work_struct { + struct work_struct callback_work; + u32 args[CB_ARG_CNT]; +}; + +static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work; +static struct mbox_chan *rx_chan; +static const struct zynqmp_eemi_ops *eemi_ops; enum pm_suspend_mode { PM_SUSPEND_MODE_FIRST = 0, @@ -31,7 +46,6 @@ static const char *const suspend_modes[] = { }; static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; -static const struct zynqmp_eemi_ops *eemi_ops; enum pm_api_cb_id { PM_INIT_SUSPEND_CB = 30, @@ -68,6 +82,53 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data) return IRQ_HANDLED; } +static void ipi_receive_callback(struct mbox_client *cl, void *data) +{ + struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data; + u32 payload[CB_PAYLOAD_SIZE]; + int ret; + + memcpy(payload, msg->data, sizeof(msg->len)); + /* First element is callback API ID, others are callback arguments */ + if (payload[0] == PM_INIT_SUSPEND_CB) { + if (work_pending(&zynqmp_pm_init_suspend_work->callback_work)) + return; + + /* Copy callback arguments into work's structure */ + memcpy(zynqmp_pm_init_suspend_work->args, &payload[1], + sizeof(zynqmp_pm_init_suspend_work->args)); + + queue_work(system_unbound_wq, + &zynqmp_pm_init_suspend_work->callback_work); + + /* Send NULL message to mbox controller to ack the message */ + ret = mbox_send_message(rx_chan, NULL); + if (ret) + pr_err("IPI ack failed. Error %d\n", ret); + } +} + +/** + * zynqmp_pm_init_suspend_work_fn - Initialize suspend + * @work: Pointer to work_struct + * + * Bottom-half of PM callback IRQ handler. + */ +static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work) +{ + struct zynqmp_pm_work_struct *pm_work = + container_of(work, struct zynqmp_pm_work_struct, callback_work); + + if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) { + orderly_poweroff(true); + } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) { + pm_suspend(PM_SUSPEND_MEM); + } else { + pr_err("%s Unsupported InitSuspendCb reason code %d.\n", + __func__, pm_work->args[0]); + } +} + static ssize_t suspend_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -119,6 +180,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev) { int ret, irq; u32 pm_api_version; + struct mbox_client *client; eemi_ops = zynqmp_pm_get_eemi_ops(); if (IS_ERR(eemi_ops)) @@ -134,17 +196,46 @@ static int zynqmp_pm_probe(struct platform_device *pdev) if (pm_api_version < ZYNQMP_PM_VERSION) return -ENODEV; - irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -ENXIO; - - ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(&pdev->dev), &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed " - "with %d\n", irq, ret); - return ret; + if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { + zynqmp_pm_init_suspend_work = + devm_kzalloc(&pdev->dev, + sizeof(struct zynqmp_pm_work_struct), + GFP_KERNEL); + if (!zynqmp_pm_init_suspend_work) + return -ENOMEM; + + INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work, + zynqmp_pm_init_suspend_work_fn); + client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->dev = &pdev->dev; + client->rx_callback = ipi_receive_callback; + + rx_chan = mbox_request_channel_byname(client, "rx"); + if (IS_ERR(rx_chan)) { + dev_err(&pdev->dev, "Failed to request rx channel\n"); + return IS_ERR(rx_chan); + } + } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) { + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + zynqmp_pm_isr, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&pdev->dev), + &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "devm_request_threaded_irq '%d' " + "failed with %d\n", irq, ret); + return ret; + } + } else { + dev_err(&pdev->dev, "Required property not found in DT node\n"); + return -ENOENT; } ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); @@ -160,6 +251,9 @@ static int zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + if (!rx_chan) + mbox_free_channel(rx_chan); + return 0; } diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index b830e0a87fba..99698b8a3a74 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -534,13 +534,13 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } -static optee_invoke_fn *get_invoke_func(struct device_node *np) +static optee_invoke_fn *get_invoke_func(struct device *dev) { const char *method; - pr_info("probing for conduit method from DT.\n"); + pr_info("probing for conduit method.\n"); - if (of_property_read_string(np, "method", &method)) { + if (device_property_read_string(dev, "method", &method)) { pr_warn("missing \"method\" property\n"); return ERR_PTR(-ENXIO); } @@ -554,7 +554,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np) return ERR_PTR(-EINVAL); } -static struct optee *optee_probe(struct device_node *np) +static int optee_remove(struct platform_device *pdev) +{ + struct optee *optee = platform_get_drvdata(pdev); + + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices have to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); + + return 0; +} + +static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; struct tee_shm_pool *pool = ERR_PTR(-EINVAL); @@ -564,25 +594,25 @@ static struct optee *optee_probe(struct device_node *np) u32 sec_caps; int rc; - invoke_fn = get_invoke_func(np); + invoke_fn = get_invoke_func(&pdev->dev); if (IS_ERR(invoke_fn)) - return (void *)invoke_fn; + return PTR_ERR(invoke_fn); if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { pr_warn("api uid mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } optee_msg_get_os_revision(invoke_fn); if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { pr_warn("capabilities mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* @@ -598,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np) pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); if (IS_ERR(pool)) - return (void *)pool; + return PTR_ERR(pool); optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { @@ -643,7 +673,16 @@ static struct optee *optee_probe(struct device_node *np) if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) pr_info("dynamic shared memory is enabled\n"); - return optee; + platform_set_drvdata(pdev, optee); + + rc = optee_enumerate_devices(); + if (rc) { + optee_remove(pdev); + return rc; + } + + pr_info("initialized driver\n"); + return 0; err: if (optee) { /* @@ -659,92 +698,28 @@ err: tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); - return ERR_PTR(rc); -} - -static void optee_remove(struct optee *optee) -{ - /* - * Ask OP-TEE to free all cached shared memory objects to decrease - * reference counters and also avoid wild pointers in secure world - * into the old shared memory range. - */ - optee_disable_shm_cache(optee); - - /* - * The two devices has to be unregistered before we can free the - * other resources. - */ - tee_device_unregister(optee->supp_teedev); - tee_device_unregister(optee->teedev); - - tee_shm_pool_free(optee->pool); - if (optee->memremaped_shm) - memunmap(optee->memremaped_shm); - optee_wait_queue_exit(&optee->wait_queue); - optee_supp_uninit(&optee->supp); - mutex_destroy(&optee->call_queue.mutex); - - kfree(optee); + return rc; } -static const struct of_device_id optee_match[] = { +static const struct of_device_id optee_dt_match[] = { { .compatible = "linaro,optee-tz" }, {}, }; - -static struct optee *optee_svc; - -static int __init optee_driver_init(void) -{ - struct device_node *fw_np = NULL; - struct device_node *np = NULL; - struct optee *optee = NULL; - int rc = 0; - - /* Node is supposed to be below /firmware */ - fw_np = of_find_node_by_name(NULL, "firmware"); - if (!fw_np) - return -ENODEV; - - np = of_find_matching_node(fw_np, optee_match); - if (!np || !of_device_is_available(np)) { - of_node_put(np); - return -ENODEV; - } - - optee = optee_probe(np); - of_node_put(np); - - if (IS_ERR(optee)) - return PTR_ERR(optee); - - rc = optee_enumerate_devices(); - if (rc) { - optee_remove(optee); - return rc; - } - - pr_info("initialized driver\n"); - - optee_svc = optee; - - return 0; -} -module_init(optee_driver_init); - -static void __exit optee_driver_exit(void) -{ - struct optee *optee = optee_svc; - - optee_svc = NULL; - if (optee) - optee_remove(optee); -} -module_exit(optee_driver_exit); +MODULE_DEVICE_TABLE(of, optee_dt_match); + +static struct platform_driver optee_driver = { + .probe = optee_probe, + .remove = optee_remove, + .driver = { + .name = "optee", + .of_match_table = optee_dt_match, + }, +}; +module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:optee"); diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c index 4c405f1b9c67..d5a39e105a76 100644 --- a/drivers/tty/serial/8250/8250_ioc3.c +++ b/drivers/tty/serial/8250/8250_ioc3.c @@ -47,7 +47,7 @@ static int serial8250_ioc3_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - membase = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); + membase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!membase) return -ENOMEM; diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index 2e151a4c222b..3c8c662c69e2 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -32,7 +32,11 @@ #include <soc/fsl/qe/ucc_slow.h> #include <linux/firmware.h> -#include <asm/reg.h> +#include <soc/fsl/cpm.h> + +#ifdef CONFIG_PPC32 +#include <asm/reg.h> /* mfspr, SPRN_SVR */ +#endif /* * The GUMR flag for Soft UART. This would normally be defined in qe.h, @@ -257,11 +261,11 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port) struct qe_bd *bdp = qe_port->tx_bd_base; while (1) { - if (in_be16(&bdp->status) & BD_SC_READY) + if (qe_ioread16be(&bdp->status) & BD_SC_READY) /* This BD is not done, so return "not done" */ return 0; - if (in_be16(&bdp->status) & BD_SC_WRAP) + if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) /* * This BD is done and it's the last one, so return * "done" @@ -307,7 +311,7 @@ static void qe_uart_stop_tx(struct uart_port *port) struct uart_qe_port *qe_port = container_of(port, struct uart_qe_port, port); - clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); + qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); } /* @@ -337,13 +341,13 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) /* Pick next descriptor and fill from buffer */ bdp = qe_port->tx_cur; - p = qe2cpu_addr(bdp->buf, qe_port); + p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); *p++ = port->x_char; - out_be16(&bdp->length, 1); - setbits16(&bdp->status, BD_SC_READY); + qe_iowrite16be(1, &bdp->length); + qe_setbits_be16(&bdp->status, BD_SC_READY); /* Get next BD. */ - if (in_be16(&bdp->status) & BD_SC_WRAP) + if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) bdp = qe_port->tx_bd_base; else bdp++; @@ -362,10 +366,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) /* Pick next descriptor and fill from buffer */ bdp = qe_port->tx_cur; - while (!(in_be16(&bdp->status) & BD_SC_READY) && + while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) && (xmit->tail != xmit->head)) { count = 0; - p = qe2cpu_addr(bdp->buf, qe_port); + p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); while (count < qe_port->tx_fifosize) { *p++ = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); @@ -375,11 +379,11 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) break; } - out_be16(&bdp->length, count); - setbits16(&bdp->status, BD_SC_READY); + qe_iowrite16be(count, &bdp->length); + qe_setbits_be16(&bdp->status, BD_SC_READY); /* Get next BD. */ - if (in_be16(&bdp->status) & BD_SC_WRAP) + if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) bdp = qe_port->tx_bd_base; else bdp++; @@ -412,12 +416,12 @@ static void qe_uart_start_tx(struct uart_port *port) container_of(port, struct uart_qe_port, port); /* If we currently are transmitting, then just return */ - if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX) + if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX) return; /* Otherwise, pump the port and start transmission */ if (qe_uart_tx_pump(qe_port)) - setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); + qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX); } /* @@ -428,7 +432,7 @@ static void qe_uart_stop_rx(struct uart_port *port) struct uart_qe_port *qe_port = container_of(port, struct uart_qe_port, port); - clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); + qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); } /* Start or stop sending break signal @@ -467,14 +471,14 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port) */ bdp = qe_port->rx_cur; while (1) { - status = in_be16(&bdp->status); + status = qe_ioread16be(&bdp->status); /* If this one is empty, then we assume we've read them all */ if (status & BD_SC_EMPTY) break; /* get number of characters, and check space in RX buffer */ - i = in_be16(&bdp->length); + i = qe_ioread16be(&bdp->length); /* If we don't have enough room in RX buffer for the entire BD, * then we try later, which will be the next RX interrupt. @@ -485,7 +489,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port) } /* get pointer */ - cp = qe2cpu_addr(bdp->buf, qe_port); + cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); /* loop through the buffer */ while (i-- > 0) { @@ -505,9 +509,10 @@ error_return: } /* This BD is ready to be used again. Clear status. get next */ - clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR | - BD_SC_OV | BD_SC_ID, BD_SC_EMPTY); - if (in_be16(&bdp->status) & BD_SC_WRAP) + qe_clrsetbits_be16(&bdp->status, + BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID, + BD_SC_EMPTY); + if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) bdp = qe_port->rx_bd_base; else bdp++; @@ -564,8 +569,8 @@ static irqreturn_t qe_uart_int(int irq, void *data) u16 events; /* Clear the interrupts */ - events = in_be16(&uccp->ucce); - out_be16(&uccp->ucce, events); + events = qe_ioread16be(&uccp->ucce); + qe_iowrite16be(events, &uccp->ucce); if (events & UCC_UART_UCCE_BRKE) uart_handle_break(&qe_port->port); @@ -596,17 +601,17 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port) bdp = qe_port->rx_bd_base; qe_port->rx_cur = qe_port->rx_bd_base; for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) { - out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT); - out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port)); - out_be16(&bdp->length, 0); + qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); + qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + qe_iowrite16be(0, &bdp->length); bd_virt += qe_port->rx_fifosize; bdp++; } /* */ - out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT); - out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port)); - out_be16(&bdp->length, 0); + qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); + qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + qe_iowrite16be(0, &bdp->length); /* Set the physical address of the host memory * buffers in the buffer descriptors, and the @@ -617,21 +622,21 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port) qe_port->tx_cur = qe_port->tx_bd_base; bdp = qe_port->tx_bd_base; for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) { - out_be16(&bdp->status, BD_SC_INTRPT); - out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port)); - out_be16(&bdp->length, 0); + qe_iowrite16be(BD_SC_INTRPT, &bdp->status); + qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + qe_iowrite16be(0, &bdp->length); bd_virt += qe_port->tx_fifosize; bdp++; } /* Loopback requires the preamble bit to be set on the first TX BD */ #ifdef LOOPBACK - setbits16(&qe_port->tx_cur->status, BD_SC_P); + qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P); #endif - out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT); - out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port)); - out_be16(&bdp->length, 0); + qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status); + qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); + qe_iowrite16be(0, &bdp->length); } /* @@ -653,78 +658,74 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port) ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX); /* Program the UCC UART parameter RAM */ - out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE); - out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE); - out_be16(&uccup->common.mrblr, qe_port->rx_fifosize); - out_be16(&uccup->maxidl, 0x10); - out_be16(&uccup->brkcr, 1); - out_be16(&uccup->parec, 0); - out_be16(&uccup->frmec, 0); - out_be16(&uccup->nosec, 0); - out_be16(&uccup->brkec, 0); - out_be16(&uccup->uaddr[0], 0); - out_be16(&uccup->uaddr[1], 0); - out_be16(&uccup->toseq, 0); + qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr); + qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr); + qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr); + qe_iowrite16be(0x10, &uccup->maxidl); + qe_iowrite16be(1, &uccup->brkcr); + qe_iowrite16be(0, &uccup->parec); + qe_iowrite16be(0, &uccup->frmec); + qe_iowrite16be(0, &uccup->nosec); + qe_iowrite16be(0, &uccup->brkec); + qe_iowrite16be(0, &uccup->uaddr[0]); + qe_iowrite16be(0, &uccup->uaddr[1]); + qe_iowrite16be(0, &uccup->toseq); for (i = 0; i < 8; i++) - out_be16(&uccup->cchars[i], 0xC000); - out_be16(&uccup->rccm, 0xc0ff); + qe_iowrite16be(0xC000, &uccup->cchars[i]); + qe_iowrite16be(0xc0ff, &uccup->rccm); /* Configure the GUMR registers for UART */ if (soft_uart) { /* Soft-UART requires a 1X multiplier for TX */ - clrsetbits_be32(&uccp->gumr_l, - UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | - UCC_SLOW_GUMR_L_RDCR_MASK, - UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 | - UCC_SLOW_GUMR_L_RDCR_16); - - clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW, - UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX); + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 | UCC_SLOW_GUMR_L_RDCR_16); + + qe_clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW, + UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX); } else { - clrsetbits_be32(&uccp->gumr_l, - UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | - UCC_SLOW_GUMR_L_RDCR_MASK, - UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 | - UCC_SLOW_GUMR_L_RDCR_16); - - clrsetbits_be32(&uccp->gumr_h, - UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX, - UCC_SLOW_GUMR_H_RFW); + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16); + + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX, + UCC_SLOW_GUMR_H_RFW); } #ifdef LOOPBACK - clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, - UCC_SLOW_GUMR_L_DIAG_LOOP); - clrsetbits_be32(&uccp->gumr_h, - UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN, - UCC_SLOW_GUMR_H_CDS); + qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, + UCC_SLOW_GUMR_L_DIAG_LOOP); + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN, + UCC_SLOW_GUMR_H_CDS); #endif /* Disable rx interrupts and clear all pending events. */ - out_be16(&uccp->uccm, 0); - out_be16(&uccp->ucce, 0xffff); - out_be16(&uccp->udsr, 0x7e7e); + qe_iowrite16be(0, &uccp->uccm); + qe_iowrite16be(0xffff, &uccp->ucce); + qe_iowrite16be(0x7e7e, &uccp->udsr); /* Initialize UPSMR */ - out_be16(&uccp->upsmr, 0); + qe_iowrite16be(0, &uccp->upsmr); if (soft_uart) { - out_be16(&uccup->supsmr, 0x30); - out_be16(&uccup->res92, 0); - out_be32(&uccup->rx_state, 0); - out_be32(&uccup->rx_cnt, 0); - out_8(&uccup->rx_bitmark, 0); - out_8(&uccup->rx_length, 10); - out_be32(&uccup->dump_ptr, 0x4000); - out_8(&uccup->rx_temp_dlst_qe, 0); - out_be32(&uccup->rx_frame_rem, 0); - out_8(&uccup->rx_frame_rem_size, 0); + qe_iowrite16be(0x30, &uccup->supsmr); + qe_iowrite16be(0, &uccup->res92); + qe_iowrite32be(0, &uccup->rx_state); + qe_iowrite32be(0, &uccup->rx_cnt); + qe_iowrite8(0, &uccup->rx_bitmark); + qe_iowrite8(10, &uccup->rx_length); + qe_iowrite32be(0x4000, &uccup->dump_ptr); + qe_iowrite8(0, &uccup->rx_temp_dlst_qe); + qe_iowrite32be(0, &uccup->rx_frame_rem); + qe_iowrite8(0, &uccup->rx_frame_rem_size); /* Soft-UART requires TX to be 1X */ - out_8(&uccup->tx_mode, - UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1); - out_be16(&uccup->tx_state, 0); - out_8(&uccup->resD4, 0); - out_be16(&uccup->resD5, 0); + qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1, + &uccup->tx_mode); + qe_iowrite16be(0, &uccup->tx_state); + qe_iowrite8(0, &uccup->resD4); + qe_iowrite16be(0, &uccup->resD5); /* Set UART mode. * Enable receive and transmit. @@ -738,22 +739,19 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port) * ... * 6.Receiver must use 16x over sampling */ - clrsetbits_be32(&uccp->gumr_l, - UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | - UCC_SLOW_GUMR_L_RDCR_MASK, - UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 | - UCC_SLOW_GUMR_L_RDCR_16); + qe_clrsetbits_be32(&uccp->gumr_l, + UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK, + UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16); - clrsetbits_be32(&uccp->gumr_h, - UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN, - UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX | - UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL); + qe_clrsetbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN, + UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL); #ifdef LOOPBACK - clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, - UCC_SLOW_GUMR_L_DIAG_LOOP); - clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP | - UCC_SLOW_GUMR_H_CDS); + qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK, + UCC_SLOW_GUMR_L_DIAG_LOOP); + qe_clrbits_be32(&uccp->gumr_h, + UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_CDS); #endif cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num); @@ -796,7 +794,7 @@ static int qe_uart_startup(struct uart_port *port) } /* Startup rx-int */ - setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); + qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX); ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX); return 0; @@ -832,7 +830,7 @@ static void qe_uart_shutdown(struct uart_port *port) /* Stop uarts */ ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX); - clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX); + qe_clrbits_be16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX); /* Shut them really down and reinit buffer descriptors */ ucc_slow_graceful_stop_tx(qe_port->us_private); @@ -852,9 +850,9 @@ static void qe_uart_set_termios(struct uart_port *port, struct ucc_slow __iomem *uccp = qe_port->uccp; unsigned int baud; unsigned long flags; - u16 upsmr = in_be16(&uccp->upsmr); + u16 upsmr = qe_ioread16be(&uccp->upsmr); struct ucc_uart_pram __iomem *uccup = qe_port->uccup; - u16 supsmr = in_be16(&uccup->supsmr); + u16 supsmr = qe_ioread16be(&uccup->supsmr); u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */ /* Character length programmed into the mode register is the @@ -952,10 +950,10 @@ static void qe_uart_set_termios(struct uart_port *port, /* Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); - out_be16(&uccp->upsmr, upsmr); + qe_iowrite16be(upsmr, &uccp->upsmr); if (soft_uart) { - out_be16(&uccup->supsmr, supsmr); - out_8(&uccup->rx_length, char_length); + qe_iowrite16be(supsmr, &uccup->supsmr); + qe_iowrite8(char_length, &uccup->rx_length); /* Soft-UART requires a 1X multiplier for TX */ qe_setbrg(qe_port->us_info.rx_clock, baud, 16); @@ -1097,6 +1095,8 @@ static const struct uart_ops qe_uart_pops = { .verify_port = qe_uart_verify_port, }; + +#ifdef CONFIG_PPC32 /* * Obtain the SOC model number and revision level * @@ -1184,70 +1184,86 @@ static void uart_firmware_cont(const struct firmware *fw, void *context) release_firmware(fw); } -static int ucc_uart_probe(struct platform_device *ofdev) +static int soft_uart_init(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; - const unsigned int *iprop; /* Integer OF properties */ - const char *sprop; /* String OF properties */ - struct uart_qe_port *qe_port = NULL; - struct resource res; + struct qe_firmware_info *qe_fw_info; int ret; - /* - * Determine if we need Soft-UART mode - */ if (of_find_property(np, "soft-uart", NULL)) { dev_dbg(&ofdev->dev, "using Soft-UART mode\n"); soft_uart = 1; + } else { + return 0; } - /* - * If we are using Soft-UART, determine if we need to upload the - * firmware, too. - */ - if (soft_uart) { - struct qe_firmware_info *qe_fw_info; - - qe_fw_info = qe_get_firmware_info(); - - /* Check if the firmware has been uploaded. */ - if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) { - firmware_loaded = 1; - } else { - char filename[32]; - unsigned int soc; - unsigned int rev_h; - unsigned int rev_l; - - soc = soc_info(&rev_h, &rev_l); - if (!soc) { - dev_err(&ofdev->dev, "unknown CPU model\n"); - return -ENXIO; - } - sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin", - soc, rev_h, rev_l); - - dev_info(&ofdev->dev, "waiting for firmware %s\n", - filename); + qe_fw_info = qe_get_firmware_info(); - /* - * We call request_firmware_nowait instead of - * request_firmware so that the driver can load and - * initialize the ports without holding up the rest of - * the kernel. If hotplug support is enabled in the - * kernel, then we use it. - */ - ret = request_firmware_nowait(THIS_MODULE, - FW_ACTION_HOTPLUG, filename, &ofdev->dev, - GFP_KERNEL, &ofdev->dev, uart_firmware_cont); - if (ret) { - dev_err(&ofdev->dev, - "could not load firmware %s\n", - filename); - return ret; - } + /* Check if the firmware has been uploaded. */ + if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) { + firmware_loaded = 1; + } else { + char filename[32]; + unsigned int soc; + unsigned int rev_h; + unsigned int rev_l; + + soc = soc_info(&rev_h, &rev_l); + if (!soc) { + dev_err(&ofdev->dev, "unknown CPU model\n"); + return -ENXIO; + } + sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin", + soc, rev_h, rev_l); + + dev_info(&ofdev->dev, "waiting for firmware %s\n", + filename); + + /* + * We call request_firmware_nowait instead of + * request_firmware so that the driver can load and + * initialize the ports without holding up the rest of + * the kernel. If hotplug support is enabled in the + * kernel, then we use it. + */ + ret = request_firmware_nowait(THIS_MODULE, + FW_ACTION_HOTPLUG, filename, &ofdev->dev, + GFP_KERNEL, &ofdev->dev, uart_firmware_cont); + if (ret) { + dev_err(&ofdev->dev, + "could not load firmware %s\n", + filename); + return ret; } } + return 0; +} + +#else /* !CONFIG_PPC32 */ + +static int soft_uart_init(struct platform_device *ofdev) +{ + return 0; +} + +#endif + + +static int ucc_uart_probe(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + const char *sprop; /* String OF properties */ + struct uart_qe_port *qe_port = NULL; + struct resource res; + u32 val; + int ret; + + /* + * Determine if we need Soft-UART mode + */ + ret = soft_uart_init(ofdev); + if (ret) + return ret; qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL); if (!qe_port) { @@ -1270,23 +1286,20 @@ static int ucc_uart_probe(struct platform_device *ofdev) /* Get the UCC number (device ID) */ /* UCCs are numbered 1-7 */ - iprop = of_get_property(np, "cell-index", NULL); - if (!iprop) { - iprop = of_get_property(np, "device-id", NULL); - if (!iprop) { - dev_err(&ofdev->dev, "UCC is unspecified in " - "device tree\n"); + if (of_property_read_u32(np, "cell-index", &val)) { + if (of_property_read_u32(np, "device-id", &val)) { + dev_err(&ofdev->dev, "UCC is unspecified in device tree\n"); ret = -EINVAL; goto out_free; } } - if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) { - dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop); + if (val < 1 || val > UCC_MAX_NUM) { + dev_err(&ofdev->dev, "no support for UCC%u\n", val); ret = -ENODEV; goto out_free; } - qe_port->ucc_num = *iprop - 1; + qe_port->ucc_num = val - 1; /* * In the future, we should not require the BRG to be specified in the @@ -1330,13 +1343,12 @@ static int ucc_uart_probe(struct platform_device *ofdev) } /* Get the port number, numbered 0-3 */ - iprop = of_get_property(np, "port-number", NULL); - if (!iprop) { + if (of_property_read_u32(np, "port-number", &val)) { dev_err(&ofdev->dev, "missing port-number in device tree\n"); ret = -EINVAL; goto out_free; } - qe_port->port.line = *iprop; + qe_port->port.line = val; if (qe_port->port.line >= UCC_MAX_UART) { dev_err(&ofdev->dev, "port-number must be 0-%u\n", UCC_MAX_UART - 1); @@ -1366,31 +1378,36 @@ static int ucc_uart_probe(struct platform_device *ofdev) } } - iprop = of_get_property(np, "brg-frequency", NULL); - if (!iprop) { + if (of_property_read_u32(np, "brg-frequency", &val)) { dev_err(&ofdev->dev, "missing brg-frequency in device tree\n"); ret = -EINVAL; goto out_np; } - if (*iprop) - qe_port->port.uartclk = *iprop; + if (val) + qe_port->port.uartclk = val; else { + if (!IS_ENABLED(CONFIG_PPC32)) { + dev_err(&ofdev->dev, + "invalid brg-frequency in device tree\n"); + ret = -EINVAL; + goto out_np; + } + /* * Older versions of U-Boot do not initialize the brg-frequency * property, so in this case we assume the BRG frequency is * half the QE bus frequency. */ - iprop = of_get_property(np, "bus-frequency", NULL); - if (!iprop) { + if (of_property_read_u32(np, "bus-frequency", &val)) { dev_err(&ofdev->dev, "missing QE bus-frequency in device tree\n"); ret = -EINVAL; goto out_np; } - if (*iprop) - qe_port->port.uartclk = *iprop / 2; + if (val) + qe_port->port.uartclk = val / 2; else { dev_err(&ofdev->dev, "invalid QE bus-frequency in device tree\n"); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6f8b67e61771..6171d28331e6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1488,7 +1488,7 @@ enum { Opt_gid, }; -static const struct fs_parameter_spec ffs_fs_param_specs[] = { +static const struct fs_parameter_spec ffs_fs_fs_parameters[] = { fsparam_bool ("no_disconnect", Opt_no_disconnect), fsparam_u32 ("rmode", Opt_rmode), fsparam_u32 ("fmode", Opt_fmode), @@ -1498,11 +1498,6 @@ static const struct fs_parameter_spec ffs_fs_param_specs[] = { {} }; -static const struct fs_parameter_description ffs_fs_fs_parameters = { - .name = "kAFS", - .specs = ffs_fs_param_specs, -}; - static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct ffs_sb_fill_data *data = fc->fs_private; @@ -1511,7 +1506,7 @@ static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param) ENTER(); - opt = fs_parse(fc, &ffs_fs_fs_parameters, param, &result); + opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result); if (opt < 0) return opt; @@ -1643,7 +1638,7 @@ static struct file_system_type ffs_fs_type = { .owner = THIS_MODULE, .name = "functionfs", .init_fs_context = ffs_fs_init_fs_context, - .parameters = &ffs_fs_fs_parameters, + .parameters = ffs_fs_fs_parameters, .kill_sb = ffs_fs_kill_sb, }; MODULE_ALIAS_FS("functionfs"); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 93f995f6cf36..7bfe365d9372 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -158,6 +158,8 @@ static void set_page_pfns(struct virtio_balloon *vb, { unsigned int i; + BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX); + /* * Set balloon pfns pointing at this page. * Note that the first pfn points at start of the page. @@ -475,7 +477,9 @@ static int init_vqs(struct virtio_balloon *vb) names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; + callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL; names[VIRTIO_BALLOON_VQ_STATS] = NULL; + callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { @@ -899,8 +903,7 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); if (IS_ERR(vb->vb_dev_info.inode)) { err = PTR_ERR(vb->vb_dev_info.inode); - kern_unmount(balloon_mnt); - goto out_del_vqs; + goto out_kern_unmount; } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif @@ -911,13 +914,13 @@ static int virtballoon_probe(struct virtio_device *vdev) */ if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { err = -ENOSPC; - goto out_del_vqs; + goto out_iput; } vb->balloon_wq = alloc_workqueue("balloon-wq", WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); if (!vb->balloon_wq) { err = -ENOMEM; - goto out_del_vqs; + goto out_iput; } INIT_WORK(&vb->report_free_page_work, report_free_page_func); vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; @@ -951,6 +954,12 @@ static int virtballoon_probe(struct virtio_device *vdev) out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); +out_iput: +#ifdef CONFIG_BALLOON_COMPACTION + iput(vb->vb_dev_info.inode); +out_kern_unmount: + kern_unmount(balloon_mnt); +#endif out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: @@ -966,6 +975,10 @@ static void remove_common(struct virtio_balloon *vb) leak_balloon(vb, vb->num_pages); update_balloon_size(vb); + /* There might be free pages that are being reported: release them. */ + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + return_free_pages_to_mm(vb, ULONG_MAX); + /* Now we reset the device so we can clean up the queues. */ vb->vdev->config->reset(vb->vdev); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index e09edb5c5e06..97d5725fd9a2 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -531,18 +531,9 @@ static void virtio_mmio_release_dev(struct device *_d) static int virtio_mmio_probe(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev; - struct resource *mem; unsigned long magic; int rc; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -EINVAL; - - if (!devm_request_mem_region(&pdev->dev, mem->start, - resource_size(mem), pdev->name)) - return -EBUSY; - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; @@ -554,9 +545,9 @@ static int virtio_mmio_probe(struct platform_device *pdev) INIT_LIST_HEAD(&vm_dev->virtqueues); spin_lock_init(&vm_dev->lock); - vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); - if (vm_dev->base == NULL) - return -EFAULT; + vm_dev->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vm_dev->base)) + return PTR_ERR(vm_dev->base); /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index f2862f66c2ac..222d630c41fc 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) - if (callbacks[i]) + if (names[i] && callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h index abfe34dd760a..298d545df1a1 100644 --- a/drivers/watchdog/at91sam9_wdt.h +++ b/drivers/watchdog/at91sam9_wdt.h @@ -24,7 +24,10 @@ #define AT91_WDT_MR 0x04 /* Watchdog Mode Register */ #define AT91_WDT_WDV (0xfffUL << 0) /* Counter Value */ #define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV) +#define AT91_SAM9X60_PERIODRST BIT(4) /* Period Reset */ +#define AT91_SAM9X60_RPTHRST BIT(5) /* Minimum Restart Period */ #define AT91_WDT_WDFIEN BIT(12) /* Fault Interrupt Enable */ +#define AT91_SAM9X60_WDDIS BIT(12) /* Watchdog Disable */ #define AT91_WDT_WDRSTEN BIT(13) /* Reset Processor */ #define AT91_WDT_WDRPROC BIT(14) /* Timer Restart */ #define AT91_WDT_WDDIS BIT(15) /* Watchdog Disable */ @@ -37,4 +40,22 @@ #define AT91_WDT_WDUNF BIT(0) /* Watchdog Underflow */ #define AT91_WDT_WDERR BIT(1) /* Watchdog Error */ +/* Watchdog Timer Value Register */ +#define AT91_SAM9X60_VR 0x08 + +/* Watchdog Window Level Register */ +#define AT91_SAM9X60_WLR 0x0c +/* Watchdog Period Value */ +#define AT91_SAM9X60_COUNTER (0xfffUL << 0) +#define AT91_SAM9X60_SET_COUNTER(x) ((x) & AT91_SAM9X60_COUNTER) + +/* Interrupt Enable Register */ +#define AT91_SAM9X60_IER 0x14 +/* Period Interrupt Enable */ +#define AT91_SAM9X60_PERINT BIT(0) +/* Interrupt Disable Register */ +#define AT91_SAM9X60_IDR 0x18 +/* Interrupt Status Register */ +#define AT91_SAM9X60_ISR 0x1c + #endif diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 06bd4e1a5923..672b184da875 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -369,9 +369,8 @@ static int cdns_wdt_probe(struct platform_device *pdev) return ret; platform_set_drvdata(pdev, wdt); - dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n", - wdt->regs, cdns_wdt_device->timeout, - nowayout ? ", nowayout" : ""); + dev_info(dev, "Xilinx Watchdog Timer with timeout %ds%s\n", + cdns_wdt_device->timeout, nowayout ? ", nowayout" : ""); return 0; } diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index e149e66a6ea9..47eefe072b40 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/i2c.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/mfd/da9062/registers.h> @@ -147,12 +148,13 @@ static int da9062_wdt_restart(struct watchdog_device *wdd, unsigned long action, void *data) { struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + struct i2c_client *client = to_i2c_client(wdt->hw->dev); int ret; - ret = regmap_write(wdt->hw->regmap, - DA9062AA_CONTROL_F, - DA9062AA_SHUTDOWN_MASK); - if (ret) + /* Don't use regmap because it is not atomic safe */ + ret = i2c_smbus_write_byte_data(client, DA9062AA_CONTROL_F, + DA9062AA_SHUTDOWN_MASK); + if (ret < 0) dev_alert(wdt->hw->dev, "Failed to shutdown (err = %d)\n", ret); @@ -212,6 +214,7 @@ static int da9062_wdt_probe(struct platform_device *pdev) watchdog_set_restart_priority(&wdt->wdtdev, 128); watchdog_set_drvdata(&wdt->wdtdev, wdt); + dev_set_drvdata(dev, &wdt->wdtdev); ret = devm_watchdog_register_device(dev, &wdt->wdtdev); if (ret < 0) @@ -220,10 +223,34 @@ static int da9062_wdt_probe(struct platform_device *pdev) return da9062_wdt_ping(&wdt->wdtdev); } +static int __maybe_unused da9062_wdt_suspend(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + if (watchdog_active(wdd)) + return da9062_wdt_stop(wdd); + + return 0; +} + +static int __maybe_unused da9062_wdt_resume(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + if (watchdog_active(wdd)) + return da9062_wdt_start(wdd); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(da9062_wdt_pm_ops, + da9062_wdt_suspend, da9062_wdt_resume); + static struct platform_driver da9062_wdt_driver = { .probe = da9062_wdt_probe, .driver = { .name = "da9062-watchdog", + .pm = &da9062_wdt_pm_ops, .of_match_table = da9062_compatible_id_table, }, }; diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index fef7c61f5555..fba21de2bbad 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -114,7 +114,15 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s) writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); - wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val); + /* + * In case users set bigger timeout value than HW can support, + * kernel(watchdog_dev.c) helps to feed watchdog before + * wdd->max_hw_heartbeat_ms + */ + if (top_s * 1000 <= wdd->max_hw_heartbeat_ms) + wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val); + else + wdd->timeout = top_s; return 0; } @@ -135,6 +143,7 @@ static int dw_wdt_start(struct watchdog_device *wdd) struct dw_wdt *dw_wdt = to_dw_wdt(wdd); dw_wdt_set_timeout(wdd, wdd->timeout); + dw_wdt_ping(&dw_wdt->wdd); dw_wdt_arm_system_reset(dw_wdt); return 0; diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index a4b71ebc8cab..f3bf3ea50e39 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -67,6 +67,7 @@ #define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */ #define IT8728_ID 0x8728 #define IT8783_ID 0x8783 +#define IT8786_ID 0x8786 /* GPIO Configuration Registers LDN=0x07 */ #define WDTCTRL 0x71 @@ -294,6 +295,7 @@ static int __init it87_wdt_init(void) case IT8721_ID: case IT8728_ID: case IT8783_ID: + case IT8786_ID: max_units = 65535; break; case IT8705_ID: diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 9c3d0033260d..d6a6393f609d 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -9,6 +9,9 @@ * Based on sunxi_wdt.c */ +#include <dt-bindings/reset-controller/mt2712-resets.h> +#include <dt-bindings/reset-controller/mt8183-resets.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -16,10 +19,11 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/reset-controller.h> #include <linux/types.h> #include <linux/watchdog.h> -#include <linux/delay.h> #define WDT_MAX_TIMEOUT 31 #define WDT_MIN_TIMEOUT 1 @@ -44,6 +48,9 @@ #define WDT_SWRST 0x14 #define WDT_SWRST_KEY 0x1209 +#define WDT_SWSYSRST 0x18U +#define WDT_SWSYS_RST_KEY 0x88000000 + #define DRV_NAME "mtk-wdt" #define DRV_VERSION "1.0" @@ -53,8 +60,94 @@ static unsigned int timeout; struct mtk_wdt_dev { struct watchdog_device wdt_dev; void __iomem *wdt_base; + spinlock_t lock; /* protects WDT_SWSYSRST reg */ + struct reset_controller_dev rcdev; +}; + +struct mtk_wdt_data { + int toprgu_sw_rst_num; }; +static const struct mtk_wdt_data mt2712_data = { + .toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM, +}; + +static const struct mtk_wdt_data mt8183_data = { + .toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM, +}; + +static int toprgu_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + unsigned int tmp; + unsigned long flags; + struct mtk_wdt_dev *data = + container_of(rcdev, struct mtk_wdt_dev, rcdev); + + spin_lock_irqsave(&data->lock, flags); + + tmp = readl(data->wdt_base + WDT_SWSYSRST); + if (assert) + tmp |= BIT(id); + else + tmp &= ~BIT(id); + tmp |= WDT_SWSYS_RST_KEY; + writel(tmp, data->wdt_base + WDT_SWSYSRST); + + spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static int toprgu_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return toprgu_reset_update(rcdev, id, true); +} + +static int toprgu_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return toprgu_reset_update(rcdev, id, false); +} + +static int toprgu_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = toprgu_reset_assert(rcdev, id); + if (ret) + return ret; + + return toprgu_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops toprgu_reset_ops = { + .assert = toprgu_reset_assert, + .deassert = toprgu_reset_deassert, + .reset = toprgu_reset, +}; + +static int toprgu_register_reset_controller(struct platform_device *pdev, + int rst_num) +{ + int ret; + struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev); + + spin_lock_init(&mtk_wdt->lock); + + mtk_wdt->rcdev.owner = THIS_MODULE; + mtk_wdt->rcdev.nr_resets = rst_num; + mtk_wdt->rcdev.ops = &toprgu_reset_ops; + mtk_wdt->rcdev.of_node = pdev->dev.of_node; + ret = devm_reset_controller_register(&pdev->dev, &mtk_wdt->rcdev); + if (ret != 0) + dev_err(&pdev->dev, + "couldn't register wdt reset controller: %d\n", ret); + return ret; +} + static int mtk_wdt_restart(struct watchdog_device *wdt_dev, unsigned long action, void *data) { @@ -155,6 +248,7 @@ static int mtk_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_wdt_dev *mtk_wdt; + const struct mtk_wdt_data *wdt_data; int err; mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL); @@ -190,6 +284,13 @@ static int mtk_wdt_probe(struct platform_device *pdev) dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n", mtk_wdt->wdt_dev.timeout, nowayout); + wdt_data = of_device_get_match_data(dev); + if (wdt_data) { + err = toprgu_register_reset_controller(pdev, + wdt_data->toprgu_sw_rst_num); + if (err) + return err; + } return 0; } @@ -218,7 +319,9 @@ static int mtk_wdt_resume(struct device *dev) #endif static const struct of_device_id mtk_wdt_dt_ids[] = { + { .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data }, { .compatible = "mediatek,mt6589-wdt" }, + { .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids); diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index a494543d3ae1..eb47fe5ed280 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev) } /* check if there is pretimeout support */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { ret = devm_request_irq(dev, irq, qcom_wdt_isr, IRQF_TRIGGER_RISING, diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c index d193a60430b2..e5d11d6a2600 100644 --- a/drivers/watchdog/sama5d4_wdt.c +++ b/drivers/watchdog/sama5d4_wdt.c @@ -2,7 +2,7 @@ /* * Driver for Atmel SAMA5D4 Watchdog Timer * - * Copyright (C) 2015 Atmel Corporation + * Copyright (C) 2015-2019 Microchip Technology Inc. and its subsidiaries */ #include <linux/delay.h> @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/reboot.h> @@ -29,7 +30,10 @@ struct sama5d4_wdt { struct watchdog_device wdd; void __iomem *reg_base; u32 mr; + u32 ir; unsigned long last_ping; + bool need_irq; + bool sam9x60_support; }; static int wdt_timeout; @@ -78,7 +82,12 @@ static int sama5d4_wdt_start(struct watchdog_device *wdd) { struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); - wdt->mr &= ~AT91_WDT_WDDIS; + if (wdt->sam9x60_support) { + writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IER); + wdt->mr &= ~AT91_SAM9X60_WDDIS; + } else { + wdt->mr &= ~AT91_WDT_WDDIS; + } wdt_write(wdt, AT91_WDT_MR, wdt->mr); return 0; @@ -88,7 +97,12 @@ static int sama5d4_wdt_stop(struct watchdog_device *wdd) { struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); - wdt->mr |= AT91_WDT_WDDIS; + if (wdt->sam9x60_support) { + writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IDR); + wdt->mr |= AT91_SAM9X60_WDDIS; + } else { + wdt->mr |= AT91_WDT_WDDIS; + } wdt_write(wdt, AT91_WDT_MR, wdt->mr); return 0; @@ -109,6 +123,14 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd, struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd); u32 value = WDT_SEC2TICKS(timeout); + if (wdt->sam9x60_support) { + wdt_write(wdt, AT91_SAM9X60_WLR, + AT91_SAM9X60_SET_COUNTER(value)); + + wdd->timeout = timeout; + return 0; + } + wdt->mr &= ~AT91_WDT_WDV; wdt->mr |= AT91_WDT_SET_WDV(value); @@ -143,8 +165,14 @@ static const struct watchdog_ops sama5d4_wdt_ops = { static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id) { struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id); + u32 reg; - if (wdt_read(wdt, AT91_WDT_SR)) { + if (wdt->sam9x60_support) + reg = wdt_read(wdt, AT91_SAM9X60_ISR); + else + reg = wdt_read(wdt, AT91_WDT_SR); + + if (reg) { pr_crit("Atmel Watchdog Software Reset\n"); emergency_restart(); pr_crit("Reboot didn't succeed\n"); @@ -157,13 +185,14 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt) { const char *tmp; - wdt->mr = AT91_WDT_WDDIS; + if (wdt->sam9x60_support) + wdt->mr = AT91_SAM9X60_WDDIS; + else + wdt->mr = AT91_WDT_WDDIS; if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) && !strcmp(tmp, "software")) - wdt->mr |= AT91_WDT_WDFIEN; - else - wdt->mr |= AT91_WDT_WDRSTEN; + wdt->need_irq = true; if (of_property_read_bool(np, "atmel,idle-halt")) wdt->mr |= AT91_WDT_WDIDLEHLT; @@ -176,21 +205,46 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt) static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) { - u32 reg; + u32 reg, val; + + val = WDT_SEC2TICKS(WDT_DEFAULT_TIMEOUT); /* * When booting and resuming, the bootloader may have changed the * watchdog configuration. * If the watchdog is already running, we can safely update it. * Else, we have to disable it properly. */ - if (wdt_enabled) { - wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr); - } else { + if (!wdt_enabled) { reg = wdt_read(wdt, AT91_WDT_MR); - if (!(reg & AT91_WDT_WDDIS)) + if (wdt->sam9x60_support && (!(reg & AT91_SAM9X60_WDDIS))) + wdt_write_nosleep(wdt, AT91_WDT_MR, + reg | AT91_SAM9X60_WDDIS); + else if (!wdt->sam9x60_support && + (!(reg & AT91_WDT_WDDIS))) wdt_write_nosleep(wdt, AT91_WDT_MR, reg | AT91_WDT_WDDIS); } + + if (wdt->sam9x60_support) { + if (wdt->need_irq) + wdt->ir = AT91_SAM9X60_PERINT; + else + wdt->mr |= AT91_SAM9X60_PERIODRST; + + wdt_write(wdt, AT91_SAM9X60_IER, wdt->ir); + wdt_write(wdt, AT91_SAM9X60_WLR, AT91_SAM9X60_SET_COUNTER(val)); + } else { + wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT)); + wdt->mr |= AT91_WDT_SET_WDV(val); + + if (wdt->need_irq) + wdt->mr |= AT91_WDT_WDFIEN; + else + wdt->mr |= AT91_WDT_WDRSTEN; + } + + wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr); + return 0; } @@ -201,7 +255,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) struct sama5d4_wdt *wdt; void __iomem *regs; u32 irq = 0; - u32 timeout; int ret; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); @@ -215,6 +268,8 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) wdd->min_timeout = MIN_WDT_TIMEOUT; wdd->max_timeout = MAX_WDT_TIMEOUT; wdt->last_ping = jiffies; + wdt->sam9x60_support = of_device_is_compatible(dev->of_node, + "microchip,sam9x60-wdt"); watchdog_set_drvdata(wdd, wdt); @@ -224,15 +279,19 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) wdt->reg_base = regs; - irq = irq_of_parse_and_map(dev->of_node, 0); - if (!irq) - dev_warn(dev, "failed to get IRQ from DT\n"); - ret = of_sama5d4_wdt_init(dev->of_node, wdt); if (ret) return ret; - if ((wdt->mr & AT91_WDT_WDFIEN) && irq) { + if (wdt->need_irq) { + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + dev_warn(dev, "failed to get IRQ from DT\n"); + wdt->need_irq = false; + } + } + + if (wdt->need_irq) { ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler, IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND, pdev->name, pdev); @@ -244,11 +303,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) watchdog_init_timeout(wdd, wdt_timeout, dev); - timeout = WDT_SEC2TICKS(wdd->timeout); - - wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT)); - wdt->mr |= AT91_WDT_SET_WDV(timeout); - ret = sama5d4_wdt_init(wdt); if (ret) return ret; @@ -269,7 +323,12 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) } static const struct of_device_id sama5d4_wdt_of_match[] = { - { .compatible = "atmel,sama5d4-wdt", }, + { + .compatible = "atmel,sama5d4-wdt", + }, + { + .compatible = "microchip,sam9x60-wdt", + }, { } }; MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match); diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index a3a329011a06..25188d6bbe15 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev) watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT); watchdog_init_timeout(wdd, 0, dev); + /* + * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set + * (Means U-Boot/bootloaders leaves the watchdog running) + * When we get here we should make a decision to prevent + * any side effects before user space daemon will take care of it. + * The best option, taking into consideration that there is no + * way to read values back from hardware, is to enforce watchdog + * being run with deterministic values. + */ + if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) { + ret = stm32_iwdg_start(wdd); + if (ret) + return ret; + + /* Make sure the watchdog is serviced */ + set_bit(WDOG_HW_RUNNING, &wdd->status); + } + ret = devm_watchdog_register_device(dev, wdd); if (ret) return ret; diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 21e8085b848b..861daf4f37b2 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -147,6 +147,25 @@ int watchdog_init_timeout(struct watchdog_device *wdd, } EXPORT_SYMBOL_GPL(watchdog_init_timeout); +static int watchdog_reboot_notifier(struct notifier_block *nb, + unsigned long code, void *data) +{ + struct watchdog_device *wdd; + + wdd = container_of(nb, struct watchdog_device, reboot_nb); + if (code == SYS_DOWN || code == SYS_HALT) { + if (watchdog_active(wdd)) { + int ret; + + ret = wdd->ops->stop(wdd); + if (ret) + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} + static int watchdog_restart_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -235,6 +254,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd) } } + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { + wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; + + ret = register_reboot_notifier(&wdd->reboot_nb); + if (ret) { + pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", + wdd->id, ret); + watchdog_dev_unregister(wdd); + ida_simple_remove(&watchdog_ida, id); + return ret; + } + } + if (wdd->ops->restart) { wdd->restart_nb.notifier_call = watchdog_restart_notifier; @@ -289,6 +321,9 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd) if (wdd->ops->restart) unregister_restart_handler(&wdd->restart_nb); + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) + unregister_reboot_notifier(&wdd->reboot_nb); + watchdog_dev_unregister(wdd); ida_simple_remove(&watchdog_ida, wdd->id); } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 4b2a85438478..8b5c742f24e8 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -38,7 +38,6 @@ #include <linux/miscdevice.h> /* For handling misc devices */ #include <linux/module.h> /* For module stuff/... */ #include <linux/mutex.h> /* For mutexes */ -#include <linux/reboot.h> /* For reboot notifier */ #include <linux/slab.h> /* For memory functions */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/watchdog.h> /* For watchdog specific items */ @@ -1097,25 +1096,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) put_device(&wd_data->dev); } -static int watchdog_reboot_notifier(struct notifier_block *nb, - unsigned long code, void *data) -{ - struct watchdog_device *wdd; - - wdd = container_of(nb, struct watchdog_device, reboot_nb); - if (code == SYS_DOWN || code == SYS_HALT) { - if (watchdog_active(wdd)) { - int ret; - - ret = wdd->ops->stop(wdd); - if (ret) - return NOTIFY_BAD; - } - } - - return NOTIFY_DONE; -} - /* * watchdog_dev_register: register a watchdog device * @wdd: watchdog device @@ -1134,22 +1114,8 @@ int watchdog_dev_register(struct watchdog_device *wdd) return ret; ret = watchdog_register_pretimeout(wdd); - if (ret) { + if (ret) watchdog_cdev_unregister(wdd); - return ret; - } - - if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { - wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; - - ret = devm_register_reboot_notifier(&wdd->wd_data->dev, - &wdd->reboot_nb); - if (ret) { - pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", - wdd->id, ret); - watchdog_dev_unregister(wdd); - } - } return ret; } |