summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 11:43:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 11:43:47 -0700
commit1785d116124fc33f2c265243f3f59da3dc2a2576 (patch)
tree459394cef69ca6c4151606a8fe360be791a45f4a /drivers
parent060a72a268577cf27733d9e8eb03b3ca427f45e6 (diff)
parent22362aa30bad6f03b5bcbbeee3cdc61950d40086 (diff)
downloadlinux-1785d116124fc33f2c265243f3f59da3dc2a2576.tar.bz2
Merge tag 'char-misc-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the large set of char and misc and other driver subsystem patches for 5.9-rc1. Lots of new driver submissions in here, and cleanups and features for existing drivers. Highlights are: - habanalabs driver updates - coresight driver updates - nvmem driver updates - huge number of "W=1" build warning cleanups from Lee Jones - dyndbg updates - virtbox driver fixes and updates - soundwire driver updates - mei driver updates - phy driver updates - fpga driver updates - lots of smaller individual misc/char driver cleanups and fixes Full details are in the shortlog. All of these have been in linux-next with no reported issues" * tag 'char-misc-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (322 commits) habanalabs: remove unused but set variable 'ctx_asid' nvmem: qcom-spmi-sdam: Enable multiple devices dt-bindings: nvmem: SID: add binding for A100's SID controller nvmem: update Kconfig description nvmem: qfprom: Add fuse blowing support dt-bindings: nvmem: Add properties needed for blowing fuses dt-bindings: nvmem: qfprom: Convert to yaml nvmem: qfprom: use NVMEM_DEVID_AUTO for multiple instances nvmem: core: add support to auto devid nvmem: core: Add nvmem_cell_read_u8() nvmem: core: Grammar fixes for help text nvmem: sc27xx: add sc2730 efuse support nvmem: Enforce nvmem stride in the sysfs interface MAINTAINERS: Add git tree for NVMEM FRAMEWORK nvmem: sprd: Fix return value of sprd_efuse_probe() drivers: android: Fix the SPDX comment style drivers: android: Fix a variable declaration coding style issue drivers: android: Remove braces for a single statement if-else block drivers: android: Remove the use of else after return drivers: android: Fix a variable declaration coding style issue ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder.c23
-rw-r--r--drivers/android/binder_alloc.c1
-rw-r--r--drivers/android/binderfs.c3
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c57
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c30
-rw-r--r--drivers/bus/fsl-mc/mc-io.c2
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c4
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/mwave/smapi.c4
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/firmware/stratix10-rsu.c170
-rw-r--r--drivers/firmware/stratix10-svc.c17
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c19
-rw-r--r--drivers/fpga/dfl-afu-error.c17
-rw-r--r--drivers/fpga/dfl-afu-main.c32
-rw-r--r--drivers/fpga/dfl-fme-error.c18
-rw-r--r--drivers/fpga/dfl-fme-main.c6
-rw-r--r--drivers/fpga/dfl-pci.c78
-rw-r--r--drivers/fpga/dfl.c313
-rw-r--r--drivers/fpga/dfl.h63
-rw-r--r--drivers/fpga/fpga-bridge.c6
-rw-r--r--drivers/fpga/fpga-mgr.c4
-rw-r--r--drivers/fpga/xilinx-spi.c61
-rw-r--r--drivers/greybus/es2.c2
-rw-r--r--drivers/greybus/interface.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h1
-rw-r--r--drivers/hwtracing/coresight/coresight.c166
-rw-r--r--drivers/interconnect/core.c16
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rtl8411.c8
-rw-r--r--drivers/misc/cardreader/rts5209.c5
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rts5228.c747
-rw-r--r--drivers/misc/cardreader/rts5228.h168
-rw-r--r--drivers/misc/cardreader/rts5229.c5
-rw-r--r--drivers/misc/cardreader/rts5249.c28
-rw-r--r--drivers/misc/cardreader/rts5260.c23
-rw-r--r--drivers/misc/cardreader/rts5261.c32
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c129
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c2
-rw-r--r--drivers/misc/cb710/core.c28
-rw-r--r--drivers/misc/cb710/sgbuf2.c1
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/hcalls.c42
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c4
-rw-r--r--drivers/misc/echo/echo.c6
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c4
-rw-r--r--drivers/misc/enclosure.c8
-rw-r--r--drivers/misc/genwqe/card_base.c32
-rw-r--r--drivers/misc/genwqe/card_ddcb.c20
-rw-r--r--drivers/misc/genwqe/card_debugfs.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c24
-rw-r--r--drivers/misc/genwqe/card_sysfs.c8
-rw-r--r--drivers/misc/genwqe/card_utils.c30
-rw-r--r--drivers/misc/habanalabs/Makefile11
-rw-r--r--drivers/misc/habanalabs/common/Makefile7
-rw-r--r--drivers/misc/habanalabs/common/asid.c (renamed from drivers/misc/habanalabs/asid.c)0
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c (renamed from drivers/misc/habanalabs/command_buffer.c)82
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c (renamed from drivers/misc/habanalabs/command_submission.c)92
-rw-r--r--drivers/misc/habanalabs/common/context.c (renamed from drivers/misc/habanalabs/context.c)39
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c (renamed from drivers/misc/habanalabs/debugfs.c)2
-rw-r--r--drivers/misc/habanalabs/common/device.c (renamed from drivers/misc/habanalabs/device.c)88
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c (renamed from drivers/misc/habanalabs/firmware_if.c)104
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h (renamed from drivers/misc/habanalabs/habanalabs.h)172
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c (renamed from drivers/misc/habanalabs/habanalabs_drv.c)1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c (renamed from drivers/misc/habanalabs/habanalabs_ioctl.c)24
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c (renamed from drivers/misc/habanalabs/hw_queue.c)165
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c (renamed from drivers/misc/habanalabs/hwmon.c)0
-rw-r--r--drivers/misc/habanalabs/common/irq.c (renamed from drivers/misc/habanalabs/irq.c)38
-rw-r--r--drivers/misc/habanalabs/common/memory.c (renamed from drivers/misc/habanalabs/memory.c)5
-rw-r--r--drivers/misc/habanalabs/common/mmu.c (renamed from drivers/misc/habanalabs/mmu.c)3
-rw-r--r--drivers/misc/habanalabs/common/pci.c (renamed from drivers/misc/habanalabs/pci.c)151
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c (renamed from drivers/misc/habanalabs/sysfs.c)3
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c909
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h24
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c12
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/Makefile2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c196
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h24
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c15
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2
-rw-r--r--drivers/misc/habanalabs/include/common/armcp_if.h (renamed from drivers/misc/habanalabs/include/armcp_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h (renamed from drivers/misc/habanalabs/include/hl_boot_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/qman_if.h (renamed from drivers/misc/habanalabs/include/qman_if.h)0
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h21
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h4
-rw-r--r--drivers/misc/hpilo.c11
-rw-r--r--drivers/misc/hpilo.h22
-rw-r--r--drivers/misc/ibmasm/command.c6
-rw-r--r--drivers/misc/ibmasm/dot_command.c6
-rw-r--r--drivers/misc/ibmasm/event.c4
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmvmc.c6
-rw-r--r--drivers/misc/lattice-ecp3-config.c19
-rw-r--r--drivers/misc/lkdtm/bugs.c53
-rw-r--r--drivers/misc/lkdtm/heap.c9
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c22
-rw-r--r--drivers/misc/lkdtm/usercopy.c7
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/bus-fixup.c23
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c8
-rw-r--r--drivers/misc/mei/hbm.c74
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c66
-rw-r--r--drivers/misc/mei/hw-me.h9
-rw-r--r--drivers/misc/mei/hw-txe.c5
-rw-r--r--drivers/misc/mei/hw.h8
-rw-r--r--drivers/misc/mei/main.c31
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c10
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c10
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c4
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c1
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c8
-rw-r--r--drivers/misc/mic/host/mic_intr.c4
-rw-r--r--drivers/misc/mic/host/mic_main.c1
-rw-r--r--drivers/misc/mic/host/mic_x100.c13
-rw-r--r--drivers/misc/mic/host/mic_x100.h9
-rw-r--r--drivers/misc/mic/scif/scif_api.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c3
-rw-r--r--drivers/misc/mic/scif/scif_epd.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c34
-rw-r--r--drivers/misc/mic/scif/scif_nm.c17
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c18
-rw-r--r--drivers/misc/mic/scif/scif_ports.c9
-rw-r--r--drivers/misc/mic/scif/scif_rma.c12
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/misc/ocxl/config.c18
-rw-r--r--drivers/misc/pch_phub.c57
-rw-r--r--drivers/misc/phantom.c20
-rw-r--r--drivers/misc/pti.c16
-rw-r--r--drivers/misc/sgi-xp/xp_main.c4
-rw-r--r--drivers/misc/sram-exec.c2
-rw-r--r--drivers/misc/ti-st/st_core.c79
-rw-r--r--drivers/misc/ti-st/st_kim.c71
-rw-r--r--drivers/misc/tifm_7xx1.c30
-rw-r--r--drivers/misc/uacce/uacce.c9
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mux/adgs1408.c6
-rw-r--r--drivers/nvmem/Kconfig3
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c4
-rw-r--r--drivers/nvmem/qfprom.c315
-rw-r--r--drivers/nvmem/sc27xx-efuse.c27
-rw-r--r--drivers/nvmem/sprd-efuse.c4
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile17
-rw-r--r--drivers/phy/allwinner/Kconfig2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/allwinner/phy-sun6i-mipi-dphy.c4
-rw-r--r--drivers/phy/broadcom/Kconfig8
-rw-r--r--drivers/phy/broadcom/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c457
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c2
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c45
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c3
-rw-r--r--drivers/phy/phy-core.c5
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/Kconfig34
-rw-r--r--drivers/phy/qualcomm/Makefile4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c571
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c510
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c85
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h131
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c172
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h168
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c226
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h226
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c648
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c63
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/samsung/Kconfig17
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c4
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c4
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c16
-rw-r--r--drivers/phy/samsung/phy-exynos7-ufs.h81
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c366
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h139
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c11
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c5
-rw-r--r--drivers/phy/xilinx/Kconfig13
-rw-r--r--drivers/phy/xilinx/Makefile3
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c993
-rw-r--r--drivers/soundwire/Makefile10
-rw-r--r--drivers/soundwire/bus.c130
-rw-r--r--drivers/soundwire/bus_type.c19
-rw-r--r--drivers/soundwire/cadence_master.c70
-rw-r--r--drivers/soundwire/cadence_master.h4
-rw-r--r--drivers/soundwire/intel.c549
-rw-r--r--drivers/soundwire/intel.h22
-rw-r--r--drivers/soundwire/intel_init.c356
-rw-r--r--drivers/soundwire/qcom.c4
-rw-r--r--drivers/soundwire/stream.c98
-rw-r--r--drivers/uio/uio_dmem_genirq.c19
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c266
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h23
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c1
232 files changed, 9149 insertions, 3874 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f50c5f182bb5..f936530a19b0 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1969,9 +1969,8 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_thread_dec_tmpref(target_thread);
binder_free_transaction(t);
return;
- } else {
- __release(&target_thread->proc->inner_lock);
}
+ __release(&target_thread->proc->inner_lock);
next = t->from_parent;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2760,11 +2759,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
binder_node_lock(node);
if (oneway) {
BUG_ON(thread);
- if (node->has_async_transaction) {
+ if (node->has_async_transaction)
pending_async = true;
- } else {
+ else
node->has_async_transaction = true;
- }
}
binder_inner_proc_lock(proc);
@@ -2982,6 +2980,12 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
+ if (WARN_ON(proc == target_proc)) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3635,10 +3639,17 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_node *ctx_mgr_node;
mutex_lock(&context->context_mgr_node_lock);
ctx_mgr_node = context->binder_context_mgr_node;
- if (ctx_mgr_node)
+ if (ctx_mgr_node) {
+ if (ctx_mgr_node->proc == proc) {
+ binder_user_error("%d:%d context manager tried to acquire desc 0\n",
+ proc->pid, thread->pid);
+ mutex_unlock(&context->context_mgr_node_lock);
+ return -EINVAL;
+ }
ret = binder_inc_ref_for_node(
proc, ctx_mgr_node,
strong, NULL, &rdata);
+ }
mutex_unlock(&context->context_mgr_node_lock);
}
if (ret)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index cbe6aa77d50d..69609696a843 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -547,6 +547,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
{
struct binder_buffer *prev, *next = NULL;
bool to_free = true;
+
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7cf566aafe1f..7b76fefde3f8 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler_types.h>
#include <linux/errno.h>
@@ -351,6 +351,7 @@ static const struct super_operations binderfs_super_ops = {
static inline bool is_binderfs_control_device(const struct dentry *dentry)
{
struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
return info->control_dentry == dentry;
}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 189bff2115a8..2a473c09bc33 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -27,7 +27,16 @@ static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
}
static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
@@ -150,6 +159,27 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
}
}
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
/**
* dprc_add_new_devices - Adds devices to the logical bus for a DPRC
*
@@ -166,30 +196,23 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
- int error;
int i;
+ /* probe the allocable objects first */
for (i = 0; i < num_child_objects_in_mc; i++) {
- struct fsl_mc_device *child_dev;
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- if (strlen(obj_desc->type) == 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
- /*
- * Check if device is already known to Linux:
- */
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
- put_device(&child_dev->dev);
- continue;
- }
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
- &child_dev);
- if (error < 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 324d49d6df89..b69794e7364d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -213,6 +213,31 @@ struct device_type fsl_mc_bus_dpseci_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -229,6 +254,11 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
{ &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
{ NULL, NULL }
};
int i;
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 6ae48ad80409..a30b53f1d87d 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -82,7 +82,7 @@ int __must_check fsl_create_mc_io(struct device *dev,
mc_io->portal_phys_addr = mc_portal_phys_addr;
mc_io->portal_size = mc_portal_size;
if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_init(&mc_io->spinlock);
+ raw_spin_lock_init(&mc_io->spinlock);
else
mutex_init(&mc_io->mutex);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 3221a7fbaf0a..85a0225db522 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -251,7 +251,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
else
mutex_lock(&mc_io->mutex);
@@ -287,7 +287,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
error = 0;
common_exit:
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
else
mutex_unlock(&mc_io->mutex);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 98c3a5d8003e..b1bd336761b1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -38,7 +38,7 @@ config PRINTER
box (as opposed to using a serial printer; if the connector at the
printer has 9 or 25 holes ["female"], then it's serial), say Y.
Also read the Printing-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ <https://www.tldp.org/docs.html#howto>.
It is possible to share one parallel port among several devices
(e.g. printer and ZIP drive) and it is safe to compile the
@@ -201,7 +201,7 @@ config DTLK
depends on ISA
help
This driver is for the DoubleTalk PC, a speech synthesizer
- manufactured by RC Systems (<http://www.rcsys.com/>). It is also
+ manufactured by RC Systems (<https://www.rcsys.com/>). It is also
called the `internal DoubleTalk'.
To compile this driver as a module, choose M here: the
@@ -237,7 +237,7 @@ config APPLICOM
This driver provides the kernel-side support for the intelligent
fieldbus cards made by Applicom International. More information
about these cards can be found on the WWW at the address
- <http://www.applicom-int.com/>, or by email from David Woodhouse
+ <https://www.applicom-int.com/>, or by email from David Woodhouse
<dwmw2@infradead.org>.
To compile this driver as a module, choose M here: the
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 691f5898bb32..f8d79d393b69 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -126,7 +126,7 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
- int bRC = -EIO;
+ int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
static const unsigned short ausDspBases[] = {
0x0030, 0x4E30, 0x8E30, 0xCE30,
@@ -497,7 +497,7 @@ exit_smapi_request_error:
int smapi_set_DSP_power_state(bool bOn)
{
- int bRC = -EIO;
+ int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
unsigned short usPowerFunction;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 3484e9145aea..380bf518338e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -37,7 +37,7 @@ static struct raw_device_data *raw_devices;
static DEFINE_MUTEX(raw_mutex);
static const struct file_operations raw_ctl_fops; /* forward declaration */
-static int max_raw_minors = MAX_RAW_MINORS;
+static int max_raw_minors = CONFIG_MAX_RAW_DEVS;
module_param(max_raw_minors, int, 0);
MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
@@ -317,9 +317,9 @@ static int __init raw_init(void)
int ret;
if (max_raw_minors < 1 || max_raw_minors > 65536) {
- printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
- " between 1 and 65536), using %d\n", MAX_RAW_MINORS);
- max_raw_minors = MAX_RAW_MINORS;
+ pr_warn("raw: invalid max_raw_minors (must be between 1 and 65536), using %d\n",
+ CONFIG_MAX_RAW_DEVS);
+ max_raw_minors = CONFIG_MAX_RAW_DEVS;
}
raw_devices = vzalloc(array_size(max_raw_minors,
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 56db949a7b70..6a0059e508e3 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -172,7 +172,7 @@ static struct tty_driver *ttyprintk_driver;
static int __init ttyprintk_init(void)
{
- int ret = -ENOMEM;
+ int ret;
spin_lock_init(&tpk_port.spinlock);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ca691bce9791..a2da8f768b94 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2112,18 +2112,18 @@ fail:
return err;
}
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
};
MODULE_DEVICE_TABLE(virtio, id_table);
-static unsigned int features[] = {
+static const unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
VIRTIO_CONSOLE_F_MULTIPORT,
};
-static struct virtio_device_id rproc_serial_id_table[] = {
+static const struct virtio_device_id rproc_serial_id_table[] = {
#if IS_ENABLED(CONFIG_REMOTEPROC)
{ VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
#endif
@@ -2131,7 +2131,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
};
MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
-static unsigned int rproc_serial_features[] = {
+static const unsigned int rproc_serial_features[] = {
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 4379475c99ed..9378075d04e9 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,10 +20,16 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
-#define INVALID_RETRY_COUNTER 0xFFFFFFFF
+#define INVALID_RETRY_COUNTER 0xFF
+#define INVALID_DCMF_VERSION 0xFF
+
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
@@ -35,11 +41,16 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @lock: a mutex to protect callback completion state
* @status.current_image: address of image currently running in flash
* @status.fail_image: address of failed image in flash
- * @status.version: the version number of RSU firmware
+ * @status.version: the interface version number of RSU firmware
* @status.state: the state of RSU system
* @status.error_details: error code
* @status.error_location: the error offset inside the image that failed
+ * @dcmf_version.dcmf0: Quartus dcmf0 version
+ * @dcmf_version.dcmf1: Quartus dcmf1 version
+ * @dcmf_version.dcmf2: Quartus dcmf2 version
+ * @dcmf_version.dcmf3: Quartus dcmf3 version
* @retry_counter: the current image's retry counter
+ * @max_retry: the preset max retry value
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
@@ -54,7 +65,16 @@ struct stratix10_rsu_priv {
unsigned int error_details;
unsigned int error_location;
} status;
+
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_version;
+
unsigned int retry_counter;
+ unsigned int max_retry;
};
/**
@@ -109,7 +129,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ dev_warn(client->dev, "FW doesn't support notify\n");
else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -119,7 +139,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
/**
* rsu_retry_callback() - Callback from Intel service layer for getting
- * the current image's retry counter from firmware
+ * the current image's retry counter from the firmware
* @client: pointer to client
* @data: pointer to callback data structure
*
@@ -136,7 +156,7 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support retry\n");
+ dev_warn(client->dev, "FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
BIT(data->status));
@@ -145,6 +165,57 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
}
/**
+ * rsu_max_retry_callback() - Callback from Intel service layer for getting
+ * the max retry value from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for max retry.
+ */
+static void rsu_max_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *max_retry = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->max_retry = *max_retry;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "FW doesn't support max retry\n");
+ else
+ dev_err(client->dev, "Failed to get max retry %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_dcmf_version_callback() - Callback from Intel service layer for getting
+ * the DCMF version
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF version number
+ */
+static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value1 = (unsigned long long *)data->kaddr1;
+ unsigned long long *value2 = (unsigned long long *)data->kaddr2;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_version.dcmf0 = FIELD_GET(RSU_DCMF0_MASK, *value1);
+ priv->dcmf_version.dcmf1 = FIELD_GET(RSU_DCMF1_MASK, *value1);
+ priv->dcmf_version.dcmf2 = FIELD_GET(RSU_DCMF2_MASK, *value2);
+ priv->dcmf_version.dcmf3 = FIELD_GET(RSU_DCMF3_MASK, *value2);
+ } else
+ dev_err(client->dev, "failed to get DCMF version\n");
+
+ complete(&priv->completion);
+}
+
+/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
* @command: RSU status or update command
@@ -282,6 +353,61 @@ static ssize_t retry_counter_show(struct device *dev,
return sprintf(buf, "0x%08x\n", priv->retry_counter);
}
+static ssize_t max_retry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->max_retry);
+}
+
+static ssize_t dcmf0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf0);
+}
+
+static ssize_t dcmf1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf1);
+}
+
+static ssize_t dcmf2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf2);
+}
+
+static ssize_t dcmf3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
+}
+
static ssize_t reboot_image_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -290,7 +416,7 @@ static ssize_t reboot_image_store(struct device *dev,
unsigned long address;
int ret;
- if (priv == 0)
+ if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &address);
@@ -315,7 +441,7 @@ static ssize_t notify_store(struct device *dev,
unsigned long status;
int ret;
- if (priv == 0)
+ if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &status);
@@ -353,6 +479,11 @@ static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(error_location);
static DEVICE_ATTR_RO(error_details);
static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_RO(max_retry);
+static DEVICE_ATTR_RO(dcmf0);
+static DEVICE_ATTR_RO(dcmf1);
+static DEVICE_ATTR_RO(dcmf2);
+static DEVICE_ATTR_RO(dcmf3);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
@@ -364,6 +495,11 @@ static struct attribute *rsu_attrs[] = {
&dev_attr_error_location.attr,
&dev_attr_error_details.attr,
&dev_attr_retry_counter.attr,
+ &dev_attr_max_retry.attr,
+ &dev_attr_dcmf0.attr,
+ &dev_attr_dcmf1.attr,
+ &dev_attr_dcmf2.attr,
+ &dev_attr_dcmf3.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
NULL
@@ -391,6 +527,11 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
priv->status.version = 0;
priv->status.state = 0;
priv->retry_counter = INVALID_RETRY_COUNTER;
+ priv->dcmf_version.dcmf0 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
+ priv->max_retry = INVALID_RETRY_COUNTER;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
@@ -412,12 +553,27 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
+ /* get DCMF version from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_VERSION,
+ 0, rsu_dcmf_version_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF version %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
+ ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
+ rsu_max_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU max retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
return ret;
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index e0db8dbfc9d1..3aa489dba30a 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -305,9 +305,15 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
+ case COMMAND_RSU_MAX_RETRY:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
+ case COMMAND_RSU_DCMF_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -406,6 +412,16 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+ case COMMAND_RSU_MAX_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_MAX_RETRY;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -474,6 +490,7 @@ static int svc_normal_to_secure_thread(void *data)
* doesn't support RSU notify or retry
*/
if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_MAX_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02d8cbad1ae2..02b60fde0430 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,15 +16,6 @@
#include "dfl-afu.h"
-static void put_all_pages(struct page **pages, int npages)
-{
- int i;
-
- for (i = 0; i < npages; i++)
- if (pages[i])
- put_page(pages[i]);
-}
-
void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
{
struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
@@ -57,22 +48,22 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm;
}
- pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
+ pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages);
if (pinned < 0) {
ret = pinned;
goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto put_pages;
+ goto unpin_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
return 0;
-put_pages:
- put_all_pages(region->pages, pinned);
+unpin_pages:
+ unpin_user_pages(region->pages, pinned);
free_pages:
kfree(region->pages);
unlock_vm:
@@ -94,7 +85,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
long npages = region->length >> PAGE_SHIFT;
struct device *dev = &pdata->dev->dev;
- put_all_pages(region->pages, npages);
+ unpin_user_pages(region->pages, npages);
kfree(region->pages);
account_locked_vm(current->mm, npages, false);
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index c1467ae1a6b6..c4691187cca9 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -14,6 +14,7 @@
* Mitchel Henry <henry.mitchel@intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/uaccess.h>
#include "dfl-afu.h"
@@ -219,6 +220,21 @@ static void port_err_uinit(struct platform_device *pdev,
afu_port_err_mask(&pdev->dev, true);
}
+static long
+port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
const struct dfl_feature_id port_err_id_table[] = {
{.id = PORT_FEATURE_ID_ERROR,},
{0,}
@@ -227,4 +243,5 @@ const struct dfl_feature_id port_err_id_table[] = {
const struct dfl_feature_ops port_err_ops = {
.init = port_err_init,
.uinit = port_err_uinit,
+ .ioctl = port_err_ioctl,
};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 3fa2c5992173..753cda4b2568 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -530,6 +530,30 @@ static const struct dfl_feature_ops port_stp_ops = {
.init = port_stp_init,
};
+static long
+port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_UINT_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
+static const struct dfl_feature_id port_uint_id_table[] = {
+ {.id = PORT_FEATURE_ID_UINT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_uint_ops = {
+ .ioctl = port_uint_ioctl,
+};
+
static struct dfl_feature_driver port_feature_drvs[] = {
{
.id_table = port_hdr_id_table,
@@ -548,6 +572,10 @@ static struct dfl_feature_driver port_feature_drvs[] = {
.ops = &port_stp_ops,
},
{
+ .id_table = port_uint_id_table,
+ .ops = &port_uint_ops,
+ },
+ {
.ops = NULL,
}
};
@@ -578,6 +606,7 @@ static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
@@ -587,6 +616,9 @@ static int afu_release(struct inode *inode, struct file *filp)
dfl_feature_dev_use_end(pdata);
if (!dfl_feature_dev_use_count(pdata)) {
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
__port_reset(pdev);
afu_dma_region_destroy(pdata);
}
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index f897d414b923..51c2892ec06d 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -15,6 +15,7 @@
* Mitchel, Henry <henry.mitchel@intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/uaccess.h>
#include "dfl.h"
@@ -348,6 +349,22 @@ static void fme_global_err_uinit(struct platform_device *pdev,
fme_err_mask(&pdev->dev, true);
}
+static long
+fme_global_error_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_FME_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
const struct dfl_feature_id fme_global_err_id_table[] = {
{.id = FME_FEATURE_ID_GLOBAL_ERR,},
{0,}
@@ -356,4 +373,5 @@ const struct dfl_feature_id fme_global_err_id_table[] = {
const struct dfl_feature_ops fme_global_err_ops = {
.init = fme_global_err_init,
.uinit = fme_global_err_uinit,
+ .ioctl = fme_global_error_ioctl,
};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index fc210d4e1863..77ea04d4edbe 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -620,11 +620,17 @@ static int fme_release(struct inode *inode, struct file *filp)
{
struct dfl_feature_platform_data *pdata = filp->private_data;
struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
mutex_lock(&pdata->lock);
dfl_feature_dev_use_end(pdata);
+
+ if (!dfl_feature_dev_use_count(pdata))
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
mutex_unlock(&pdata->lock);
return 0;
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index a78c409bf2c4..e220bec2927d 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -39,10 +39,32 @@ static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
return pcim_iomap_table(pcidev)[bar];
}
+static int cci_pci_alloc_irq(struct pci_dev *pcidev)
+{
+ int ret, nvec = pci_msix_vec_count(pcidev);
+
+ if (nvec <= 0) {
+ dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
+ return 0;
+ }
+
+ ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0)
+ return ret;
+
+ return nvec;
+}
+
+static void cci_pci_free_irq(struct pci_dev *pcidev)
+{
+ pci_free_irq_vectors(pcidev);
+}
+
/* PCI Device ID */
#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
/* VF Device */
#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
@@ -55,6 +77,7 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
{0,}
};
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
@@ -78,17 +101,34 @@ static void cci_remove_feature_devs(struct pci_dev *pcidev)
/* remove all children feature devices */
dfl_fpga_feature_devs_remove(drvdata->cdev);
+ cci_pci_free_irq(pcidev);
+}
+
+static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
+{
+ unsigned int i;
+ int *table;
+
+ table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
+ if (!table)
+ return table;
+
+ for (i = 0; i < nvec; i++)
+ table[i] = pci_irq_vector(pcidev, i);
+
+ return table;
}
/* enumerate feature devices under pci device */
static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ int port_num, bar, i, nvec, ret = 0;
struct dfl_fpga_enum_info *info;
struct dfl_fpga_cdev *cdev;
resource_size_t start, len;
- int port_num, bar, i, ret = 0;
void __iomem *base;
+ int *irq_table;
u32 offset;
u64 v;
@@ -97,11 +137,30 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
if (!info)
return -ENOMEM;
+ /* add irq info for enumeration if the device support irq */
+ nvec = cci_pci_alloc_irq(pcidev);
+ if (nvec < 0) {
+ dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+ ret = nvec;
+ goto enum_info_free_exit;
+ } else if (nvec) {
+ irq_table = cci_pci_create_irq_table(pcidev, nvec);
+ if (!irq_table) {
+ ret = -ENOMEM;
+ goto irq_free_exit;
+ }
+
+ ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+ kfree(irq_table);
+ if (ret)
+ goto irq_free_exit;
+ }
+
/* start to find Device Feature List from Bar 0 */
base = cci_pci_ioremap_bar(pcidev, 0);
if (!base) {
ret = -ENOMEM;
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
/*
@@ -154,7 +213,7 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
dfl_fpga_enum_info_add_dfl(info, start, len, base);
} else {
ret = -ENODEV;
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
/* start enumeration with prepared enumeration information */
@@ -162,11 +221,14 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
if (IS_ERR(cdev)) {
dev_err(&pcidev->dev, "Enumeration failure\n");
ret = PTR_ERR(cdev);
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
drvdata->cdev = cdev;
+irq_free_exit:
+ if (ret)
+ cci_pci_free_irq(pcidev);
enum_info_free_exit:
dfl_fpga_enum_info_free(info);
@@ -211,12 +273,10 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
}
ret = cci_enumerate_feature_devs(pcidev);
- if (ret) {
- dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
- goto disable_error_report_exit;
- }
+ if (!ret)
+ return ret;
- return ret;
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
disable_error_report_exit:
pci_disable_pcie_error_reporting(pcidev);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 990994874bf1..649958a36e62 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -10,7 +10,9 @@
* Wu Hao <hao.wu@intel.com>
* Xiao Guangrong <guangrong.xiao@linux.intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include "dfl.h"
@@ -421,6 +423,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
*
* @dev: device to enumerate.
* @cdev: the container device for all feature devices.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
* @feature_dev: current feature device.
* @ioaddr: header register region address of feature device in enumeration.
* @sub_features: a sub features linked list for feature device in enumeration.
@@ -429,6 +434,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
struct build_feature_devs_info {
struct device *dev;
struct dfl_fpga_cdev *cdev;
+ unsigned int nr_irqs;
+ int *irq_table;
+
struct platform_device *feature_dev;
void __iomem *ioaddr;
struct list_head sub_features;
@@ -442,12 +450,16 @@ struct build_feature_devs_info {
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
+ * @irq_base: start of irq index in this sub feature.
+ * @nr_irqs: number of irqs of this sub feature.
*/
struct dfl_feature_info {
u64 fid;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
+ unsigned int irq_base;
+ unsigned int nr_irqs;
};
static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
@@ -487,8 +499,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* it will be automatically freed by device's release() callback,
* platform_device_release().
*/
- pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
- GFP_KERNEL);
+ pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -520,13 +531,30 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
struct dfl_feature *feature = &pdata->features[index];
+ struct dfl_feature_irq_ctx *ctx;
+ unsigned int i;
/* save resource information for each feature */
+ feature->dev = fdev;
feature->id = finfo->fid;
feature->resource_index = index;
feature->ioaddr = finfo->ioaddr;
fdev->resource[index++] = finfo->mmio_res;
+ if (finfo->nr_irqs) {
+ ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
+ sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < finfo->nr_irqs; i++)
+ ctx[i].irq =
+ binfo->irq_table[finfo->irq_base + i];
+
+ feature->irq_ctx = ctx;
+ feature->nr_irqs = finfo->nr_irqs;
+ }
+
list_del(&finfo->node);
kfree(finfo);
}
@@ -638,6 +666,78 @@ static u64 feature_id(void __iomem *start)
return 0;
}
+static int parse_feature_irqs(struct build_feature_devs_info *binfo,
+ resource_size_t ofst, u64 fid,
+ unsigned int *irq_base, unsigned int *nr_irqs)
+{
+ void __iomem *base = binfo->ioaddr + ofst;
+ unsigned int i, ibase, inr = 0;
+ int virq;
+ u64 v;
+
+ /*
+ * Ideally DFL framework should only read info from DFL header, but
+ * current version DFL only provides mmio resources information for
+ * each feature in DFL Header, no field for interrupt resources.
+ * Interrupt resource information is provided by specific mmio
+ * registers of each private feature which supports interrupt. So in
+ * order to parse and assign irq resources, DFL framework has to look
+ * into specific capability registers of these private features.
+ *
+ * Once future DFL version supports generic interrupt resource
+ * information in common DFL headers, the generic interrupt parsing
+ * code will be added. But in order to be compatible to old version
+ * DFL, the driver may still fall back to these quirks.
+ */
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+
+ if (!inr) {
+ *irq_base = 0;
+ *nr_irqs = 0;
+ return 0;
+ }
+
+ dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n",
+ fid, ibase, inr);
+
+ if (ibase + inr > binfo->nr_irqs) {
+ dev_err(binfo->dev,
+ "Invalid interrupt number in feature 0x%llx\n", fid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < inr; i++) {
+ virq = binfo->irq_table[ibase + i];
+ if (virq < 0 || virq > NR_IRQS) {
+ dev_err(binfo->dev,
+ "Invalid irq table entry for feature 0x%llx\n",
+ fid);
+ return -EINVAL;
+ }
+ }
+
+ *irq_base = ibase;
+ *nr_irqs = inr;
+
+ return 0;
+}
+
/*
* when create sub feature instances, for private features, it doesn't need
* to provide resource size and feature id as they could be read from DFH
@@ -650,7 +750,9 @@ create_feature_instance(struct build_feature_devs_info *binfo,
struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
resource_size_t size, u64 fid)
{
+ unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ int ret;
/* read feature size and id if inputs are invalid */
size = size ? size : feature_size(dfl->ioaddr + ofst);
@@ -659,6 +761,10 @@ create_feature_instance(struct build_feature_devs_info *binfo,
if (dfl->len - ofst < size)
return -EINVAL;
+ ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
+ if (ret)
+ return ret;
+
finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
if (!finfo)
return -ENOMEM;
@@ -667,6 +773,8 @@ create_feature_instance(struct build_feature_devs_info *binfo,
finfo->mmio_res.start = dfl->start + ofst;
finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->irq_base = irq_base;
+ finfo->nr_irqs = nr_irqs;
finfo->ioaddr = dfl->ioaddr + ofst;
list_add_tail(&finfo->node, &binfo->sub_features);
@@ -853,6 +961,10 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
devm_kfree(dev, dfl);
}
+ /* remove irq table */
+ if (info->irq_table)
+ devm_kfree(dev, info->irq_table);
+
devm_kfree(dev, info);
put_device(dev);
}
@@ -892,6 +1004,45 @@ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
}
EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+/**
+ * dfl_fpga_enum_info_add_irq - add irq table to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
+ *
+ * One FPGA device may have several interrupts. This function adds irq
+ * information of the DFL fpga device to enum info for next step enumeration.
+ * This function should be called before dfl_fpga_feature_devs_enumerate().
+ * As we only support one irq domain for all DFLs in the same enum info, adding
+ * irq table a second time for the same enum info will return error.
+ *
+ * If we need to enumerate DFLs which belong to different irq domains, we
+ * should fill more enum info and enumerate them one by one.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table)
+{
+ if (!nr_irqs || !irq_table)
+ return -EINVAL;
+
+ if (info->irq_table)
+ return -EEXIST;
+
+ info->irq_table = devm_kmemdup(info->dev, irq_table,
+ sizeof(int) * nr_irqs, GFP_KERNEL);
+ if (!info->irq_table)
+ return -ENOMEM;
+
+ info->nr_irqs = nr_irqs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
+
static int remove_feature_dev(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -959,6 +1110,10 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
binfo->dev = info->dev;
binfo->cdev = cdev;
+ binfo->nr_irqs = info->nr_irqs;
+ if (info->nr_irqs)
+ binfo->irq_table = info->irq_table;
+
/*
* start enumeration for all feature devices based on Device Feature
* Lists.
@@ -1241,6 +1396,160 @@ done:
}
EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
+static irqreturn_t dfl_irq_handler(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = arg;
+
+ eventfd_signal(trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
+ int fd)
+{
+ struct platform_device *pdev = feature->dev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+
+ irq = feature->irq_ctx[idx].irq;
+
+ if (feature->irq_ctx[idx].trigger) {
+ free_irq(irq, feature->irq_ctx[idx].trigger);
+ kfree(feature->irq_ctx[idx].name);
+ eventfd_ctx_put(feature->irq_ctx[idx].trigger);
+ feature->irq_ctx[idx].trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+ feature->irq_ctx[idx].name =
+ kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx,
+ dev_name(&pdev->dev), feature->id);
+ if (!feature->irq_ctx[idx].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ ret = PTR_ERR(trigger);
+ goto free_name;
+ }
+
+ ret = request_irq(irq, dfl_irq_handler, 0,
+ feature->irq_ctx[idx].name, trigger);
+ if (!ret) {
+ feature->irq_ctx[idx].trigger = trigger;
+ return ret;
+ }
+
+ eventfd_ctx_put(trigger);
+free_name:
+ kfree(feature->irq_ctx[idx].name);
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
+ *
+ * @feature: dfl sub feature.
+ * @start: start of irq index in this dfl sub feature.
+ * @count: number of irqs.
+ * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
+ * unbind "count" specified number of irqs if fds ptr is NULL.
+ *
+ * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
+ * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
+ * NULL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds)
+{
+ unsigned int i;
+ int ret = 0;
+
+ /* overflow */
+ if (unlikely(start + count < start))
+ return -EINVAL;
+
+ /* exceeds nr_irqs */
+ if (start + count > feature->nr_irqs)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int fd = fds ? fds[i] : -1;
+
+ ret = do_set_irq_trigger(feature, start + i, fd);
+ if (ret) {
+ while (i--)
+ do_set_irq_trigger(feature, start + i, -1);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
+
+/**
+ * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ return put_user(feature->nr_irqs, (__u32 __user *)arg);
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
+
+/**
+ * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_irq_set hdr;
+ s32 *fds;
+ long ret;
+
+ if (!feature->nr_irqs)
+ return -ENOENT;
+
+ if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
+ (hdr.start + hdr.count < hdr.start))
+ return -EINVAL;
+
+ fds = memdup_user((void __user *)(arg + sizeof(hdr)),
+ hdr.count * sizeof(s32));
+ if (IS_ERR(fds))
+ return PTR_ERR(fds);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
+ mutex_unlock(&pdata->lock);
+
+ kfree(fds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
+
static void __exit dfl_fpga_exit(void)
{
dfl_chardev_uinit();
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 2f5d3052e36e..a32dfba2a88b 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,7 +17,9 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/eventfd.h>
#include <linux/fs.h>
+#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/platform_device.h>
@@ -112,6 +114,13 @@
#define FME_PORT_OFST_ACC_VF 1
#define FME_PORT_OFST_IMP BIT_ULL(60)
+/* FME Error Capability Register */
+#define FME_ERROR_CAP 0x70
+
+/* FME Error Capability Register Bitfield */
+#define FME_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define FME_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
/* PORT Header Register Set */
#define PORT_HDR_DFH DFH
#define PORT_HDR_GUID_L GUID_L
@@ -145,6 +154,20 @@
#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
+/* Port Error Capability Register */
+#define PORT_ERROR_CAP 0x38
+
+/* Port Error Capability Register Bitfield */
+#define PORT_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define PORT_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
+/* Port Uint Capability Register */
+#define PORT_UINT_CAP 0x8
+
+/* Port Uint Capability Register Bitfield */
+#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
+#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -189,20 +212,39 @@ struct dfl_feature_driver {
};
/**
+ * struct dfl_feature_irq_ctx - dfl private feature interrupt context
+ *
+ * @irq: Linux IRQ number of this interrupt.
+ * @trigger: eventfd context to signal when interrupt happens.
+ * @name: irq name needed when requesting irq.
+ */
+struct dfl_feature_irq_ctx {
+ int irq;
+ struct eventfd_ctx *trigger;
+ char *name;
+};
+
+/**
* struct dfl_feature - sub feature of the feature devices
*
+ * @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
+ * @irq_ctx: interrupt context list.
+ * @nr_irqs: number of interrupt contexts.
* @ops: ops of this sub feature.
* @priv: priv data of this feature.
*/
struct dfl_feature {
+ struct platform_device *dev;
u64 id;
int resource_index;
void __iomem *ioaddr;
+ struct dfl_feature_irq_ctx *irq_ctx;
+ unsigned int nr_irqs;
const struct dfl_feature_ops *ops;
void *priv;
};
@@ -299,12 +341,6 @@ struct dfl_feature_ops {
#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
-static inline int dfl_feature_platform_data_size(const int num)
-{
- return sizeof(struct dfl_feature_platform_data) +
- num * sizeof(struct dfl_feature);
-}
-
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs);
@@ -390,10 +426,14 @@ static inline u8 dfl_feature_revision(void __iomem *base)
*
* @dev: parent device.
* @dfls: list of device feature lists.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by hw irq numbers.
*/
struct dfl_fpga_enum_info {
struct device *dev;
struct list_head dfls;
+ unsigned int nr_irqs;
+ int *irq_table;
};
/**
@@ -417,6 +457,8 @@ struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
resource_size_t start, resource_size_t len,
void __iomem *ioaddr);
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table);
void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
/**
@@ -468,4 +510,13 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds);
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab9028940a..2deccacc3aa7 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -328,7 +328,7 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
void *priv)
{
struct fpga_bridge *bridge;
- int id, ret = 0;
+ int id, ret;
if (!name || !strlen(name)) {
dev_err(dev, "Attempt to register with no name!\n");
@@ -340,10 +340,8 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
return NULL;
id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
+ if (id < 0)
goto error_kfree;
- }
mutex_init(&bridge->mutex);
INIT_LIST_HEAD(&bridge->node);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index e05104f5e40c..f38bab01432e 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -581,10 +581,8 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
return NULL;
id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
+ if (id < 0)
goto error_kfree;
- }
mutex_init(&mgr->ref_mutex);
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 272ee0c22822..2967aa2a74e2 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Xilinx Spartan6 Slave Serial SPI Driver
+ * Xilinx Spartan6 and 7 Series Slave Serial SPI Driver
*
* Copyright (C) 2017 DENX Software Engineering
*
@@ -23,6 +23,7 @@
struct xilinx_spi_conf {
struct spi_device *spi;
struct gpio_desc *prog_b;
+ struct gpio_desc *init_b;
struct gpio_desc *done;
};
@@ -36,13 +37,45 @@ static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+/**
+ * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
+ * a given delay if the pin is unavailable
+ *
+ * @mgr: The FPGA manager object
+ * @value: Value INIT_B to wait for (1 = asserted = low)
+ * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
+ *
+ * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
+ * too much time passed waiting for that. If no INIT_B GPIO is available
+ * then always return 0.
+ */
+static int wait_for_init_b(struct fpga_manager *mgr, int value,
+ unsigned long alt_udelay)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ if (conf->init_b) {
+ while (time_before(jiffies, timeout)) {
+ /* dump_state(conf, "wait for init_d .."); */
+ if (gpiod_get_value(conf->init_b) == value)
+ return 0;
+ usleep_range(100, 400);
+ }
+ return -ETIMEDOUT;
+ }
+
+ udelay(alt_udelay);
+
+ return 0;
+}
+
static int xilinx_spi_write_init(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count)
{
struct xilinx_spi_conf *conf = mgr->priv;
- const size_t prog_latency_7500us = 7500;
- const size_t prog_pulse_1us = 1;
+ int err;
if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
@@ -51,17 +84,28 @@ static int xilinx_spi_write_init(struct fpga_manager *mgr,
gpiod_set_value(conf->prog_b, 1);
- udelay(prog_pulse_1us); /* min is 500 ns */
+ err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
+ if (err) {
+ dev_err(&mgr->dev, "INIT_B pin did not go low\n");
+ gpiod_set_value(conf->prog_b, 0);
+ return err;
+ }
gpiod_set_value(conf->prog_b, 0);
+ err = wait_for_init_b(mgr, 0, 0);
+ if (err) {
+ dev_err(&mgr->dev, "INIT_B pin did not go high\n");
+ return err;
+ }
+
if (gpiod_get_value(conf->done)) {
dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
return -EIO;
}
/* program latency */
- usleep_range(prog_latency_7500us, prog_latency_7500us + 100);
+ usleep_range(7500, 7600);
return 0;
}
@@ -156,6 +200,13 @@ static int xilinx_spi_probe(struct spi_device *spi)
return PTR_ERR(conf->prog_b);
}
+ conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+ if (IS_ERR(conf->init_b)) {
+ dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
+ PTR_ERR(conf->init_b));
+ return PTR_ERR(conf->init_b);
+ }
+
conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
if (IS_ERR(conf->done)) {
dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 366716f11b1a..1df6ab5d339d 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -759,7 +759,7 @@ static int check_urb_status(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "%s: overflow actual length is %d\n",
__func__, urb->actual_length);
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index 67dbe6fda9a1..58ea374d8aaa 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -1233,7 +1233,7 @@ int gb_interface_add(struct gb_interface *intf)
case GB_INTERFACE_TYPE_GREYBUS:
dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
intf->vendor_id, intf->product_id);
- /* fall-through */
+ fallthrough;
case GB_INTERFACE_TYPE_UNIPRO:
dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
intf->ddbl1_manufacturer_id,
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 16ebf38a9f66..1801804a7762 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -568,10 +568,7 @@ out:
}
static struct amba_id catu_ids[] = {
- {
- .id = 0x000bb9ee,
- .mask = 0x000fffff,
- },
+ CS_AMBA_ID(0x000bb9ee),
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 84f1dcb69827..1a3169e69bb1 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -226,9 +226,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = coresight_get_enabled_sink(true);
}
- if (!sink)
- goto err;
-
mask = &event_data->mask;
/*
@@ -254,6 +251,16 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
+ * No sink provided - look for a default sink for one of the
+ * devices. At present we only support topology where all CPUs
+ * use the same sink [N:1], so only need to find one sink. The
+ * coresight_build_path later will remove any CPU that does not
+ * attach to the sink, or if we have not found a sink.
+ */
+ if (!sink)
+ sink = coresight_find_default_sink(csdev);
+
+ /*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
@@ -267,6 +274,10 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
+ /* no sink found for any CPU - cannot trace */
+ if (!sink)
+ goto err;
+
/* If we don't have any CPUs ready for tracing, abort */
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 0c35cd5e0d1d..6d7d2169bfb2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -196,12 +196,14 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
- /*
- * Request to keep the trace unit powered and also
- * emulation of powerdown
- */
- writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
- drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up) {
+ /*
+ * Request to keep the trace unit powered and also
+ * emulation of powerdown
+ */
+ writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
+ TRCPDCR_PU, drvdata->base + TRCPDCR);
+ }
/* Enable the trace unit */
writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -476,10 +478,12 @@ static void etm4_disable_hw(void *info)
CS_UNLOCK(drvdata->base);
- /* power can be removed from the trace unit now */
- control = readl_relaxed(drvdata->base + TRCPDCR);
- control &= ~TRCPDCR_PU;
- writel_relaxed(control, drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up) {
+ /* power can be removed from the trace unit now */
+ control = readl_relaxed(drvdata->base + TRCPDCR);
+ control &= ~TRCPDCR_PU;
+ writel_relaxed(control, drvdata->base + TRCPDCR);
+ }
control = readl_relaxed(drvdata->base + TRCPRGCTLR);
@@ -507,6 +511,12 @@ static void etm4_disable_hw(void *info)
readl_relaxed(drvdata->base + TRCSSCSRn(i));
}
+ /* read back the current counter values */
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ config->cntr_val[i] =
+ readl_relaxed(drvdata->base + TRCCNTVRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -1196,8 +1206,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
- state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
}
/*
@@ -1208,10 +1218,10 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
*/
for (i = 0; i < drvdata->numcidc; i++)
- state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+ state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+ state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
@@ -1309,18 +1319,18 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- writel_relaxed(state->trcacvr[i],
+ writeq_relaxed(state->trcacvr[i],
drvdata->base + TRCACVRn(i));
- writel_relaxed(state->trcacatr[i],
+ writeq_relaxed(state->trcacatr[i],
drvdata->base + TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writel_relaxed(state->trccidcvr[i],
+ writeq_relaxed(state->trccidcvr[i],
drvdata->base + TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- writel_relaxed(state->trcvmidcvr[i],
+ writeq_relaxed(state->trcvmidcvr[i],
drvdata->base + TRCVMIDCVRn(i));
writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
@@ -1468,6 +1478,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
+ if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4a695bf90582..b8283e1d6d88 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -133,7 +133,7 @@
#define ETMv4_MAX_CTXID_CMP 8
#define ETM_MAX_VMID_CMP 8
#define ETM_MAX_PE_CMP 8
-#define ETM_MAX_RES_SEL 16
+#define ETM_MAX_RES_SEL 32
#define ETM_MAX_SS_CMP 8
#define ETM_ARCH_V4 0x40
@@ -325,7 +325,7 @@ struct etmv4_save_state {
u32 trccntctlr[ETMv4_MAX_CNTR];
u32 trccntvr[ETMv4_MAX_CNTR];
- u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+ u32 trcrsctlr[ETM_MAX_RES_SEL];
u32 trcssccr[ETM_MAX_SS_CMP];
u32 trcsscsr[ETM_MAX_SS_CMP];
@@ -334,7 +334,7 @@ struct etmv4_save_state {
u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
- u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u64 trcvmidcvr[ETM_MAX_VMID_CMP];
u32 trccidcctlr0;
u32 trccidcctlr1;
u32 trcvmidcctlr0;
@@ -407,6 +407,8 @@ struct etmv4_save_state {
* @config: structure holding configuration parameters.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
+ * @skip_power_up: Indicates if an implementation can skip powering up
+ * the trace unit.
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -454,6 +456,7 @@ struct etmv4_drvdata {
struct etmv4_config config;
struct etmv4_save_state *save_state;
bool state_needs_restore;
+ bool skip_power_up;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index e4912abda3aa..bfd44231d7ad 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -27,9 +27,8 @@ static int coresight_alloc_conns(struct device *dev,
struct coresight_platform_data *pdata)
{
if (pdata->nr_outport) {
- pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
- sizeof(*pdata->conns),
- GFP_KERNEL);
+ pdata->conns = devm_kcalloc(dev, pdata->nr_outport,
+ sizeof(*pdata->conns), GFP_KERNEL);
if (!pdata->conns)
return -ENOMEM;
}
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 36c943ae94d5..f2dc625ea585 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -150,6 +150,8 @@ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct coresight_device *coresight_get_sink_by_id(u32 id);
+struct coresight_device *
+coresight_find_default_sink(struct coresight_device *csdev);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index e7dc1c31d20d..78acf29c49ca 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -32,12 +33,14 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
* @spinlock: serialize enable/disable operations.
+ * @check_idfilter_val: check if the context is lost upon clock removal.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
+ bool check_idfilter_val;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -66,29 +69,43 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
int rc = 0;
- u32 reg;
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
+ u32 id0val, id1val;
CS_UNLOCK(drvdata->base);
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ id0val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0);
+ id1val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1);
+
+ /*
+ * Some replicator designs lose context when AMBA clocks are removed,
+ * so have a check for this.
+ */
+ if (drvdata->check_idfilter_val && id0val == 0x0 && id1val == 0x0)
+ id0val = id1val = 0xff;
+
+ if (id0val == 0xff && id1val == 0xff)
rc = coresight_claim_device_unlocked(drvdata->base);
+ if (!rc) {
+ switch (outport) {
+ case 0:
+ id0val = 0x0;
+ break;
+ case 1:
+ id1val = 0x0;
+ break;
+ default:
+ WARN_ON(1);
+ rc = -EINVAL;
+ }
+ }
+
/* Ensure that the outport is enabled. */
- if (!rc)
- writel_relaxed(0x00, drvdata->base + reg);
+ if (!rc) {
+ writel_relaxed(id0val, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(id1val, drvdata->base + REPLICATOR_IDFILTER1);
+ }
+
CS_LOCK(drvdata->base);
return rc;
@@ -239,6 +256,10 @@ static int replicator_probe(struct device *dev, struct resource *res)
desc.groups = replicator_groups;
}
+ if (fwnode_property_present(dev_fwnode(dev),
+ "qcom,replicator-loses-context"))
+ drvdata->check_idfilter_val = true;
+
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
@@ -348,16 +369,9 @@ static int dynamic_replicator_probe(struct amba_device *adev,
}
static const struct amba_id dynamic_replicator_ids[] = {
- {
- .id = 0x000bb909,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC-600 */
- .id = 0x000bb9ec,
- .mask = 0x000fffff,
- },
- { 0, 0 },
+ CS_AMBA_ID(0x000bb909),
+ CS_AMBA_ID(0x000bb9ec), /* Coresight SoC-600 */
+ {},
};
static struct amba_driver dynamic_replicator_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index b908ca104645..673d2f56ed1e 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -727,8 +727,6 @@ static int acpi_stm_get_stimulus_area(struct device *dev, struct resource *res)
struct acpi_device *adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
rc = acpi_dev_get_resources(adev, &res_list, NULL, NULL);
if (rc < 0)
return rc;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 36cce2bfb744..6375504ba8b0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -639,15 +639,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EINVAL;
- }
-
/* Re-enable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 625882bc8b08..b29c2db94d96 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1110,7 +1110,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
}
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 39fba1d16e6e..7040d583bed9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -484,7 +484,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
break;
case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
- desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &tmc_etr_cs_ops;
ret = tmc_etr_setup_caps(dev, devid,
coresight_get_uci_data(id));
@@ -496,6 +496,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
dev_list = &etf_devs;
@@ -538,6 +539,28 @@ out:
return ret;
}
+static void tmc_shutdown(struct amba_device *adev)
+{
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ if (drvdata->mode == CS_MODE_DISABLED)
+ goto out;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ tmc_etr_disable_hw(drvdata);
+
+ /*
+ * We do not care about coresight unregister here unlike remove
+ * callback which is required for making coresight modular since
+ * the system is going down after this.
+ */
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb961),
/* Coresight SoC 600 TMC-ETR/ETS */
@@ -556,6 +579,7 @@ static struct amba_driver tmc_driver = {
.suppress_bind_attrs = true,
},
.probe = tmc_probe,
+ .shutdown = tmc_shutdown,
.id_table = tmc_ids,
};
builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 71de978575f3..6e8d2dc33d17 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -268,6 +268,7 @@ ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index f3efbb3b2b4d..e9c90f2de34a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -769,6 +769,171 @@ void coresight_release_path(struct list_head *path)
path = NULL;
}
+/* return true if the device is a suitable type for a default sink */
+static inline bool coresight_is_def_sink_type(struct coresight_device *csdev)
+{
+ /* sink & correct subtype */
+ if (((csdev->type == CORESIGHT_DEV_TYPE_SINK) ||
+ (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) &&
+ (csdev->subtype.sink_subtype >= CORESIGHT_DEV_SUBTYPE_SINK_BUFFER))
+ return true;
+ return false;
+}
+
+/**
+ * coresight_select_best_sink - return the best sink for use as default from
+ * the two provided.
+ *
+ * @sink: current best sink.
+ * @depth: search depth where current sink was found.
+ * @new_sink: new sink for comparison with current sink.
+ * @new_depth: search depth where new sink was found.
+ *
+ * Sinks prioritised according to coresight_dev_subtype_sink, with only
+ * subtypes CORESIGHT_DEV_SUBTYPE_SINK_BUFFER or higher being used.
+ *
+ * Where two sinks of equal priority are found, the sink closest to the
+ * source is used (smallest search depth).
+ *
+ * return @new_sink & update @depth if better than @sink, else return @sink.
+ */
+static struct coresight_device *
+coresight_select_best_sink(struct coresight_device *sink, int *depth,
+ struct coresight_device *new_sink, int new_depth)
+{
+ bool update = false;
+
+ if (!sink) {
+ /* first found at this level */
+ update = true;
+ } else if (new_sink->subtype.sink_subtype >
+ sink->subtype.sink_subtype) {
+ /* found better sink */
+ update = true;
+ } else if ((new_sink->subtype.sink_subtype ==
+ sink->subtype.sink_subtype) &&
+ (*depth > new_depth)) {
+ /* found same but closer sink */
+ update = true;
+ }
+
+ if (update)
+ *depth = new_depth;
+ return update ? new_sink : sink;
+}
+
+/**
+ * coresight_find_sink - recursive function to walk trace connections from
+ * source to find a suitable default sink.
+ *
+ * @csdev: source / current device to check.
+ * @depth: [in] search depth of calling dev, [out] depth of found sink.
+ *
+ * This will walk the connection path from a source (ETM) till a suitable
+ * sink is encountered and return that sink to the original caller.
+ *
+ * If current device is a plain sink return that & depth, otherwise recursively
+ * call child connections looking for a sink. Select best possible using
+ * coresight_select_best_sink.
+ *
+ * return best sink found, or NULL if not found at this node or child nodes.
+ */
+static struct coresight_device *
+coresight_find_sink(struct coresight_device *csdev, int *depth)
+{
+ int i, curr_depth = *depth + 1, found_depth = 0;
+ struct coresight_device *found_sink = NULL;
+
+ if (coresight_is_def_sink_type(csdev)) {
+ found_depth = curr_depth;
+ found_sink = csdev;
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK)
+ goto return_def_sink;
+ /* look past LINKSINK for something better */
+ }
+
+ /*
+ * Not a sink we want - or possible child sink may be better.
+ * recursively explore each port found on this element.
+ */
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child_dev, *sink = NULL;
+ int child_depth = curr_depth;
+
+ child_dev = csdev->pdata->conns[i].child_dev;
+ if (child_dev)
+ sink = coresight_find_sink(child_dev, &child_depth);
+
+ if (sink)
+ found_sink = coresight_select_best_sink(found_sink,
+ &found_depth,
+ sink,
+ child_depth);
+ }
+
+return_def_sink:
+ /* return found sink and depth */
+ if (found_sink)
+ *depth = found_depth;
+ return found_sink;
+}
+
+/**
+ * coresight_find_default_sink: Find a sink suitable for use as a
+ * default sink.
+ *
+ * @csdev: starting source to find a connected sink.
+ *
+ * Walks connections graph looking for a suitable sink to enable for the
+ * supplied source. Uses CoreSight device subtypes and distance from source
+ * to select the best sink.
+ *
+ * If a sink is found, then the default sink for this device is set and
+ * will be automatically used in future.
+ *
+ * Used in cases where the CoreSight user (perf / sysfs) has not selected a
+ * sink.
+ */
+struct coresight_device *
+coresight_find_default_sink(struct coresight_device *csdev)
+{
+ int depth = 0;
+
+ /* look for a default sink if we have not found for this device */
+ if (!csdev->def_sink)
+ csdev->def_sink = coresight_find_sink(csdev, &depth);
+ return csdev->def_sink;
+}
+
+static int coresight_remove_sink_ref(struct device *dev, void *data)
+{
+ struct coresight_device *sink = data;
+ struct coresight_device *source = to_coresight_device(dev);
+
+ if (source->def_sink == sink)
+ source->def_sink = NULL;
+ return 0;
+}
+
+/**
+ * coresight_clear_default_sink: Remove all default sink references to the
+ * supplied sink.
+ *
+ * If supplied device is a sink, then check all the bus devices and clear
+ * out all the references to this sink from the coresight_device def_sink
+ * parameter.
+ *
+ * @csdev: coresight sink - remove references to this from all sources.
+ */
+static void coresight_clear_default_sink(struct coresight_device *csdev)
+{
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK) ||
+ (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) {
+ bus_for_each_dev(&coresight_bustype, NULL, csdev,
+ coresight_remove_sink_ref);
+ }
+}
+
/** coresight_validate_source - make sure a source has the right credentials
* @csdev: the device structure for a source.
* @function: the function this was called from.
@@ -1358,6 +1523,7 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
+ coresight_clear_default_sink(csdev);
coresight_release_platform_data(csdev, csdev->pdata);
device_unregister(&csdev->dev);
}
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 9e1ab701785c..befd111049c0 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -269,23 +269,22 @@ static int aggregate_requests(struct icc_node *node)
static int apply_constraints(struct icc_path *path)
{
struct icc_node *next, *prev = NULL;
+ struct icc_provider *p;
int ret = -EINVAL;
int i;
for (i = 0; i < path->num_nodes; i++) {
next = path->reqs[i].node;
+ p = next->provider;
- /*
- * Both endpoints should be valid master-slave pairs of the
- * same interconnect provider that will be configured.
- */
- if (!prev || next->provider != prev->provider) {
+ /* both endpoints should be valid master-slave pairs */
+ if (!prev || (p != prev->provider && !p->inter_set)) {
prev = next;
continue;
}
/* set the constraints */
- ret = next->provider->set(prev, next);
+ ret = p->set(prev, next);
if (ret)
goto out;
@@ -340,12 +339,12 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
* Returns a valid pointer to struct icc_node on success or ERR_PTR()
* on failure.
*/
-static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
struct icc_provider *provider;
- if (!spec || spec->args_count != 1)
+ if (!spec)
return ERR_PTR(-EINVAL);
mutex_lock(&icc_lock);
@@ -359,6 +358,7 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
return node;
}
+EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
static void devm_icc_release(struct device *dev, void *res)
{
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3ca4325cc191..ce136d685d14 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -83,7 +83,7 @@ config IBM_ASM
WARNING: This software may not be supported or function
correctly on your IBM server. Please consult the IBM ServerProven
- website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
+ website <https://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
for information on the specific driver level and support statement
for your IBM server.
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 33bba1802289..80d87e8a0bea 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -923,7 +923,7 @@ struct c2port_device *c2port_device_register(char *name,
}
dev_set_drvdata(c2dev->dev, c2dev);
- strncpy(c2dev->name, name, C2PORT_NAME_LEN);
+ strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1);
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index 1f56267ed2f4..895128475d83 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index 489ebe907688..a07674ed0596 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -37,10 +37,11 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg1 = 0;
u8 reg3 = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
if (!rtsx_vendor_setting_valid(reg1))
@@ -52,16 +53,17 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
- rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+ pci_read_config_byte(pdev, PCR_SETTING_REG3, &reg3);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
}
static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 659056164b21..39a6a7ecc32e 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -23,9 +23,10 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (rts5209_vendor_setting1_valid(reg)) {
@@ -34,7 +35,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->aspm_en = rts5209_reg_to_aspm(reg);
}
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rts5209_vendor_setting2_valid(reg)) {
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 3a9467aaa435..f5f392ddf3d6 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -56,9 +56,10 @@ static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -69,7 +70,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
new file mode 100644
index 000000000000..28feab1449ab
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5228.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0xB5, 0xB5, 0xB5},
+ {0xE6, 0x7E, 0xFE},
+ {0x6B, 0x6B, 0x6B},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ u32 reg;
+
+ /* 0x724~0x727 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+
+ /* 0x814~0x817 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40);
+}
+
+static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5228_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5228_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1,
+ RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART);
+ mdelay(2);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON);
+
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5228_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL,
+ RTS5228_PUPDC, RTS5228_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5228_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5228_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5228_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5228_stop_cmd(pcr);
+ rts5228_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static void rts5228_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+}
+
+static void rts5228_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+}
+
+static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5228_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO_POWERON_MASK, 0);
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5228_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_LMT_THD_MASK,
+ RTS5228_LDO1_LMT_THD_1500);
+
+ rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5228_enable_ocp(pcr);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(1000);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5228_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5228_clear_ocpstat(pcr);
+ rts5228_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (0 == (lval & 0x0F))
+ rtsx_pci_enable_oobs_polling(pcr);
+ else
+ rtsx_pci_disable_oobs_polling(pcr);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5228_init_from_cfg(pcr);
+
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5228_fill_driving(pcr, OUTPUT_3V3);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ return 0;
+}
+
+static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ mdelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5228_enable_aspm(pcr, true);
+ else
+ rts5228_disable_aspm(pcr, false);
+}
+
+static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5228_pcr_ops = {
+ .fetch_vendor_settings = rtsx5228_fetch_vendor_settings,
+ .turn_on_led = rts5228_turn_on_led,
+ .turn_off_led = rts5228_turn_off_led,
+ .extra_init_hw = rts5228_extra_init_hw,
+ .enable_auto_blink = rts5228_enable_auto_blink,
+ .disable_auto_blink = rts5228_disable_auto_blink,
+ .card_power_on = rts5228_card_power_on,
+ .card_power_off = rts5228_card_power_off,
+ .switch_output_voltage = rts5228_switch_output_voltage,
+ .force_power_down = rts5228_force_power_down,
+ .stop_cmd = rts5228_stop_cmd,
+ .set_aspm = rts5228_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5228_enable_ocp,
+ .disable_ocp = rts5228_disable_ocp,
+ .init_ocp = rts5228_init_ocp,
+ .process_ocp = rts5228_process_ocp,
+ .clear_ocpstat = rts5228_clear_ocpstat,
+ .optimize_phy = rts5228_optimize_phy,
+};
+
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = clk - 4;
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = 125/clk + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2) - 1;
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5228_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5228_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5228_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
+}
diff --git a/drivers/misc/cardreader/rts5228.h b/drivers/misc/cardreader/rts5228.h
new file mode 100644
index 000000000000..6a872246aeed
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5228_H
+#define RTS5228_H
+
+
+#define RTS5228_AUTOLOAD_CFG0 0xFF7B
+#define RTS5228_AUTOLOAD_CFG1 0xFF7C
+#define RTS5228_AUTOLOAD_CFG2 0xFF7D
+#define RTS5228_AUTOLOAD_CFG3 0xFF7E
+#define RTS5228_AUTOLOAD_CFG4 0xFF7F
+
+#define RTS5228_REG_VREF 0xFE97
+#define RTS5228_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5228_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5228_SSC_DEPTH_MASK 0x07
+#define RTS5228_SSC_DEPTH_DISALBE 0x00
+#define RTS5228_SSC_DEPTH_8M 0x01
+#define RTS5228_SSC_DEPTH_4M 0x02
+#define RTS5228_SSC_DEPTH_2M 0x03
+#define RTS5228_SSC_DEPTH_1M 0x04
+#define RTS5228_SSC_DEPTH_512K 0x05
+#define RTS5228_SSC_DEPTH_256K 0x06
+#define RTS5228_SSC_DEPTH_128K 0x07
+
+/* DMACTL 0xFE2C */
+#define RTS5228_DMA_PACK_SIZE_MASK 0xF0
+
+#define RTS5228_REG_LDO12_CFG 0xFF6E
+#define RTS5228_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO12_100 (0x00<<1)
+#define RTS5228_LDO12_105 (0x01<<1)
+#define RTS5228_LDO12_110 (0x02<<1)
+#define RTS5228_LDO12_115 (0x03<<1)
+#define RTS5228_LDO12_120 (0x04<<1)
+#define RTS5228_LDO12_125 (0x05<<1)
+#define RTS5228_LDO12_130 (0x06<<1)
+#define RTS5228_LDO12_135 (0x07<<1)
+#define RTS5228_REG_PWD_LDO12 (0x01<<0)
+
+#define RTS5228_REG_LDO12_L12 0xFF6F
+#define RTS5228_LDO12_L12_MASK (0x07<<4)
+#define RTS5228_LDO12_L12_120 (0x04<<4)
+
+/* LDO control register */
+#define RTS5228_CARD_PWR_CTL 0xFD50
+#define RTS5228_PUPDC (0x01<<5)
+
+#define RTS5228_LDO1233318_POW_CTL 0xFF70
+#define RTS5228_LDO3318_POWERON (0x01<<3)
+#define RTS5228_LDO1_POWEROFF (0x00<<0)
+#define RTS5228_LDO1_SOFTSTART (0x01<<0)
+#define RTS5228_LDO1_FULLON (0x03<<0)
+#define RTS5228_LDO1_POWERON_MASK (0x03<<0)
+#define RTS5228_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5228_DV3318_CFG 0xFF71
+#define RTS5228_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5228_DV3318_17 (0x00<<4)
+#define RTS5228_DV3318_1V75 (0x01<<4)
+#define RTS5228_DV3318_18 (0x02<<4)
+#define RTS5228_DV3318_1V85 (0x03<<4)
+#define RTS5228_DV3318_19 (0x04<<4)
+#define RTS5228_DV3318_33 (0x07<<4)
+#define RTS5228_DV3318_SR_MASK (0x03<<2)
+#define RTS5228_DV3318_SR_0 (0x00<<2)
+#define RTS5228_DV3318_SR_250 (0x01<<2)
+#define RTS5228_DV3318_SR_500 (0x02<<2)
+#define RTS5228_DV3318_SR_1000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG0 0xFF72
+#define RTS5228_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5228_LDO1_OCP_EN (0x01<<4)
+#define RTS5228_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5228_LDO1_OCP_LMT_EN (0x01<<1)
+
+#define RTS5228_LDO1_OCP_THD_730 (0x00<<5)
+#define RTS5228_LDO1_OCP_THD_780 (0x01<<5)
+#define RTS5228_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5228_LDO1_OCP_THD_930 (0x03<<5)
+#define RTS5228_LDO1_OCP_THD_1000 (0x04<<5)
+#define RTS5228_LDO1_OCP_THD_1070 (0x05<<5)
+#define RTS5228_LDO1_OCP_THD_1140 (0x06<<5)
+#define RTS5228_LDO1_OCP_THD_1220 (0x07<<5)
+
+#define RTS5228_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5228_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5228_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5228_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG1 0xFF73
+#define RTS5228_LDO1_SR_TIME_MASK (0x03<<6)
+#define RTS5228_LDO1_SR_0_0 (0x00<<6)
+#define RTS5228_LDO1_SR_0_25 (0x01<<6)
+#define RTS5228_LDO1_SR_0_5 (0x02<<6)
+#define RTS5228_LDO1_SR_1_0 (0x03<<6)
+#define RTS5228_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO1_18 (0x05<<1)
+#define RTS5228_LDO1_33 (0x07<<1)
+#define RTS5228_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5228_AUXCLK_GAT_CTL 0xFF74
+
+#define RTS5228_REG_RREF_CTL_0 0xFF75
+#define RTS5228_FORCE_RREF_EXTL (0x01<<7)
+#define RTS5228_REG_BG33_MASK (0x07<<0)
+#define RTS5228_RREF_12_1V (0x04<<0)
+#define RTS5228_RREF_12_3V (0x05<<0)
+
+#define RTS5228_REG_RREF_CTL_1 0xFF76
+
+#define RTS5228_REG_RREF_CTL_2 0xFF77
+#define RTS5228_TEST_INTL_RREF (0x01<<7)
+#define RTS5228_DGLCH_TIME_MASK (0x03<<5)
+#define RTS5228_DGLCH_TIME_50 (0x00<<5)
+#define RTS5228_DGLCH_TIME_75 (0x01<<5)
+#define RTS5228_DGLCH_TIME_100 (0x02<<5)
+#define RTS5228_DGLCH_TIME_125 (0x03<<5)
+#define RTS5228_REG_REXT_TUNE_MASK (0x1F<<0)
+
+#define RTS5228_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+
+
+/* Single LUN, support SD */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5228_H */
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9f080a32ef50..89e6f124ca5c 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -23,9 +23,10 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -37,7 +38,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 6c6c9e95a29f..941b3d77f1e9 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -55,9 +55,10 @@ static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -70,7 +71,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -93,32 +94,33 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
u32 lval;
- if (CHK_PCI_PID(pcr, PID_524A))
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG1, &lval);
- else
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG2, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 7a9dbb778e84..b9f66b1384a6 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,9 +64,10 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -79,7 +80,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -496,21 +497,27 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &pcr->option;
u32 lval;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
@@ -518,7 +525,7 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 195822ec858e..471961487ff8 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -59,9 +59,11 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
+
/* 0x814~0x817 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (!rts5261_vendor_setting_valid(reg)) {
@@ -76,7 +78,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->flags |= PCR_REVERSE_SOCKET;
/* 0x724~0x727 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
pcr->aspm_en = rts5261_reg_to_aspm(reg);
@@ -361,6 +363,7 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int retval;
u32 lval, i;
u8 valid, efuse_valid, tmp;
@@ -386,8 +389,7 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
if (efuse_valid == 0) {
- retval = rtsx_pci_read_config_dword(pcr,
- PCR_SETTING_REG2, &lval);
+ retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
if (retval != 0)
pcr_dbg(pcr, "read 0x814 DW fail\n");
pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
@@ -399,9 +401,9 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
lval = lval & 0x00FFFFFF;
- retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
if (retval != 0)
pcr_dbg(pcr, "write config fail\n");
@@ -410,27 +412,33 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
@@ -439,7 +447,7 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 0d5928bc1b6d..37ccc67f4914 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -23,6 +23,7 @@
#include "rtsx_pcr.h"
#include "rts5261.h"
+#include "rts5228.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -50,6 +51,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -206,16 +208,10 @@ int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
int err, i, finished = 0;
u8 tmp;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
+ rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -247,16 +243,10 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
int err, i, finished = 0;
u16 data;
- u8 *ptr, tmp;
+ u8 tmp, val1, val2;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -272,17 +262,9 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
if (!finished)
return -ETIMEDOUT;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
-
- ptr = rtsx_pci_get_cmd_data(pcr);
- data = ((u16)ptr[1] << 8) | ptr[0];
+ rtsx_pci_read_register(pcr, PHYDATA0, &val1);
+ rtsx_pci_read_register(pcr, PHYDATA1, &val2);
+ data = val1 | (val2 << 8);
if (val)
*val = data;
@@ -417,7 +399,7 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
if (len > 0xFFFF)
val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
| (((u64)len >> 16) << 6) | option;
@@ -723,6 +705,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (PCI_PID(pcr) == PID_5261)
return rts5261_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
+ if (PCI_PID(pcr) == PID_5228)
+ return rts5228_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
@@ -1111,8 +1096,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
}
-#ifdef CONFIG_PM
-static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
@@ -1126,7 +1110,6 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
}
-#endif
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
@@ -1202,6 +1185,36 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
}
}
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val |= 1<<9;
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
+
+}
+
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val &= ~(1<<9);
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
+
+}
+
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
@@ -1231,9 +1244,13 @@ int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int err;
- pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
+ if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
+ RTS5228_LDO1_SR_0_5);
+
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
rtsx_pci_enable_bus_int(pcr);
@@ -1280,6 +1297,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (PCI_PID(pcr) == PID_5261)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5261_SSC_DEPTH_2M);
+ else if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5228_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
@@ -1314,6 +1334,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_525A:
case PID_5260:
case PID_5261:
+ case PID_5228:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1324,9 +1345,10 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_init_ocp(pcr);
/* Enable clk_request_n to enable clock power management */
- rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
/* Enter L1 when host tx idle */
- rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
+ pci_write_config_byte(pdev, 0x70F, 0x5B);
if (pcr->ops->extra_init_hw) {
err = pcr->ops->extra_init_hw(pcr);
@@ -1401,6 +1423,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5261:
rts5261_init_params(pcr);
break;
+
+ case 0x5228:
+ rts5228_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1604,10 +1630,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
-#ifdef CONFIG_PM
-
-static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
+static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
@@ -1623,17 +1648,15 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
- pci_save_state(pcidev);
- pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
- pci_disable_device(pcidev);
- pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+ device_wakeup_disable(dev_d);
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
-static int rtsx_pci_resume(struct pci_dev *pcidev)
+static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
int ret = 0;
@@ -1645,13 +1668,6 @@ static int rtsx_pci_resume(struct pci_dev *pcidev)
mutex_lock(&pcr->pcr_mutex);
- pci_set_power_state(pcidev, PCI_D0);
- pci_restore_state(pcidev);
- ret = pci_enable_device(pcidev);
- if (ret)
- goto out;
- pci_set_master(pcidev);
-
ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
if (ret)
goto out;
@@ -1667,6 +1683,8 @@ out:
return ret;
}
+#ifdef CONFIG_PM
+
static void rtsx_pci_shutdown(struct pci_dev *pcidev)
{
struct pcr_handle *handle;
@@ -1686,19 +1704,18 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
#else /* CONFIG_PM */
-#define rtsx_pci_suspend NULL
-#define rtsx_pci_resume NULL
#define rtsx_pci_shutdown NULL
#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
+
static struct pci_driver rtsx_pci_driver = {
.name = DRV_NAME_RTSX_PCI,
.id_table = rtsx_pci_ids,
.probe = rtsx_pci_probe,
.remove = rtsx_pci_remove,
- .suspend = rtsx_pci_suspend,
- .resume = rtsx_pci_resume,
+ .driver.pm = &rtsx_pci_pm_ops,
.shutdown = rtsx_pci_shutdown,
};
module_pci_driver(rtsx_pci_driver);
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 024cbd998b2a..6b322db8738e 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
void rts5261_init_params(struct rtsx_pcr *pcr);
+void rts5228_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -70,6 +71,8 @@ static inline u8 map_sd_drive(int idx)
#define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80))
#define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80)
+#define rtsx_check_mmc_support(reg) ((reg) & 0x10)
+#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02)
#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
@@ -100,6 +103,8 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr);
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index a328cab11014..59eda55d92a3 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -759,7 +759,7 @@ static int rtsx_usb_post_reset(struct usb_interface *intf)
return 0;
}
-static struct usb_device_id rtsx_usb_usb_ids[] = {
+static const struct usb_device_id rtsx_usb_usb_ids[] = {
{ USB_DEVICE(0x0BDA, 0x0129) },
{ USB_DEVICE(0x0BDA, 0x0139) },
{ USB_DEVICE(0x0BDA, 0x0140) },
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index b290bc2ee240..55b7ee0e8f93 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -166,37 +166,24 @@ void cb710_set_irq_handler(struct cb710_slot *slot,
}
EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
-#ifdef CONFIG_PM
-
-static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cb710_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
devm_free_irq(&pdev->dev, pdev->irq, chip);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- if (state.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int cb710_resume(struct pci_dev *pdev)
+static int __maybe_unused cb710_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pcim_enable_device(pdev);
- if (err)
- return err;
return devm_request_irq(&pdev->dev, pdev->irq,
cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
}
-#endif /* CONFIG_PM */
-
static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -312,15 +299,14 @@ static const struct pci_device_id cb710_pci_tbl[] = {
{ 0, }
};
+static SIMPLE_DEV_PM_OPS(cb710_pm_ops, cb710_suspend, cb710_resume);
+
static struct pci_driver cb710_driver = {
.name = KBUILD_MODNAME,
.id_table = cb710_pci_tbl,
.probe = cb710_probe,
.remove = cb710_remove_one,
-#ifdef CONFIG_PM
- .suspend = cb710_suspend,
- .resume = cb710_resume,
-#endif
+ .driver.pm = &cb710_pm_ops,
};
static int __init cb710_init_module(void)
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index dfd2969e3628..e5a4ed3701eb 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -117,6 +117,7 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
/**
* cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
* @miter: sg mapping iterator used for writing
+ * @data: data to write to sg buffer
*
* Description:
* Writes 32-bit word starting at byte pointed to by @miter@
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index cb9cca35a226..5b93ff51d82a 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -175,7 +175,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
struct update_nodes_workarea *unwa;
u32 action, node_count;
int token, rc, i;
- __be32 *data, drc_index, phandle;
+ __be32 *data, phandle;
char *buf;
token = rtas_token("ibm,update-nodes");
@@ -213,7 +213,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
break;
case OPCODE_ADD:
/* nothing to do, just move pointer */
- drc_index = *data++;
+ data++;
break;
}
}
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
index b7c57f67f4f5..aba5e20eeb1f 100644
--- a/drivers/misc/cxl/hcalls.c
+++ b/drivers/misc/cxl/hcalls.c
@@ -167,7 +167,7 @@ long cxl_h_attach_process(u64 unit_address,
}
}
-/**
+/*
* cxl_h_detach_process - Detach a process element from a coherent
* platform function.
*/
@@ -197,7 +197,7 @@ long cxl_h_detach_process(u64 unit_address, u64 process_token)
}
}
-/**
+/*
* cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
* the partition to manipulate or query
* certain coherent platform function behaviors.
@@ -238,7 +238,7 @@ static long cxl_h_control_function(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_afu - Perform a reset to the coherent platform function.
*/
long cxl_h_reset_afu(u64 unit_address)
@@ -249,7 +249,7 @@ long cxl_h_reset_afu(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_suspend_process - Suspend a process from being executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -262,7 +262,7 @@ long cxl_h_suspend_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_resume_process - Resume a process to be executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -275,7 +275,7 @@ long cxl_h_resume_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_read_error_state - Checks the error state of the coherent
* platform function.
* R4 contains the error state
@@ -288,7 +288,7 @@ long cxl_h_read_error_state(u64 unit_address, u64 *state)
state);
}
-/**
+/*
* cxl_h_get_afu_err - collect the AFU error buffer
* Parameter1 = byte offset into error buffer to retrieve, valid values
* are between 0 and (ibm,error-buffer-size - 1)
@@ -304,7 +304,7 @@ long cxl_h_get_afu_err(u64 unit_address, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_get_config - collect configuration record for the
* coherent platform function
* Parameter1 = # of configuration record to retrieve, valid values are
@@ -324,7 +324,7 @@ long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_terminate_process - Terminate the process before completion
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -337,7 +337,7 @@ long cxl_h_terminate_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = # of VPD record to retrieve, valid values are between 0
* and (ibm,#config-records - 1).
@@ -355,7 +355,7 @@ long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
out);
}
-/**
+/*
* cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
*/
long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
@@ -365,7 +365,7 @@ long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
0, 0, 0, 0, reg);
}
-/**
+/*
* cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
* based on an interrupt
* Parameter1 = value to write to the function-wide error interrupt register
@@ -378,7 +378,7 @@ long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
* an error log
*/
@@ -390,7 +390,7 @@ long cxl_h_get_error_log(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_collect_int_info - Collect interrupt info about a coherent
* platform function after an interrupt occurred.
*/
@@ -425,7 +425,7 @@ long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_faults - Control the operation of a coherent platform
* function after a fault occurs.
*
@@ -470,7 +470,7 @@ long cxl_h_control_faults(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
* allows the partition to manipulate or query
* certain coherent platform facility behaviors.
@@ -509,7 +509,7 @@ static long cxl_h_control_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
*/
long cxl_h_reset_adapter(u64 unit_address)
@@ -520,7 +520,7 @@ long cxl_h_reset_adapter(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = 4K naturally aligned real buffer containing block
* list entries
@@ -536,7 +536,7 @@ long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
out);
}
-/**
+/*
* cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
* hypervisor call provide platform support for
* downloading a base adapter image to the coherent
@@ -616,7 +616,7 @@ static long cxl_h_download_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_download_adapter_image - Download the base image to the coherent
* platform facility.
*/
@@ -629,7 +629,7 @@ long cxl_h_download_adapter_image(u64 unit_address,
list_address, num, out);
}
-/**
+/*
* cxl_h_validate_adapter_image - Validate the base image in the coherent
* platform facility.
*/
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index f0263d1a1fdf..d97a243ad30c 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
if (rc)
- goto err;
+ goto err1;
rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
if (rc)
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 1cf320e2a415..1264253cc07b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -150,7 +150,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
@@ -184,7 +184,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_SET_FAILED : 0;
}
static struct pci_ops cxl_pcie_pci_ops =
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 713e92ee27ac..3c4eaba86576 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -66,13 +66,13 @@
Path Models", IEEE Transactions on communications, COM-25,
No. 6, June
1977.
- http://www.rowetel.com/images/echo/dual_path_paper.pdf
+ https://www.rowetel.com/images/echo/dual_path_paper.pdf
[2] The classic, very useful paper that tells you how to
actually build a real world echo canceller:
Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice
Echo Canceller with a TMS320020,
- http://www.rowetel.com/images/echo/spra129.pdf
+ https://www.rowetel.com/images/echo/spra129.pdf
[3] I have written a series of blog posts on this work, here is
Part 1: http://www.rowetel.com/blog/?p=18
@@ -80,7 +80,7 @@
[4] The source code http://svn.rowetel.com/software/oslec/
[5] A nice reference on LMS filters:
- http://en.wikipedia.org/wiki/Least_mean_squares_filter
+ https://en.wikipedia.org/wiki/Least_mean_squares_filter
Credits:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 9ff18d4961ce..2591c21b2b5d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -225,7 +225,7 @@ static const struct of_device_id at24_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at24_of_match);
-static const struct acpi_device_id at24_acpi_ids[] = {
+static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
{ "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 36a2eb837371..9627294fe3e9 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
/**
* eeprom_93cx6_readb - Read a byte from eeprom
* @eeprom: Pointer to eeprom structure
- * @word: Byte index from where we should start reading
+ * @byte: Byte index from where we should start reading
* @data: target pointer where the information will have to be stored
*
* This function will read a byte of the eeprom data
@@ -270,7 +270,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
* @eeprom: Pointer to eeprom structure
* @byte: Index from where we should start reading
* @data: target pointer where the information will have to be stored
- * @words: Number of bytes that should be read.
+ * @bytes: Number of bytes that should be read.
*
* This function will read all requested bytes from the eeprom,
* this is done by calling eeprom_93cx6_readb() multiple times.
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3c2d405bc79b..f950d0155876 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -103,7 +103,9 @@ EXPORT_SYMBOL_GPL(enclosure_for_each_device);
* enclosure_register - register device as an enclosure
*
* @dev: device containing the enclosure
+ * @name: chosen device name
* @components: number of components in the enclosure
+ * @cb: platform call-backs
*
* This sets up the device for being an enclosure. Note that @dev does
* not have to be a dedicated enclosure device. It may be some other type
@@ -266,7 +268,7 @@ static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_alloc - prepare a new enclosure component
* @edev: the enclosure to add the component
- * @num: the device number
+ * @number: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
@@ -347,7 +349,7 @@ EXPORT_SYMBOL_GPL(enclosure_component_register);
/**
* enclosure_add_device - add a device as being part of an enclosure
* @edev: the enclosure device being added to.
- * @num: the number of the component
+ * @component: the number of the component
* @dev: the device being added
*
* Declares a real device to reside in slot (or identifier) @num of an
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
/**
* enclosure_remove_device - remove a device from an enclosure
* @edev: the enclosure device
- * @num: the number of the component to remove
+ * @dev: device to remove/put
*
* Returns zero on success or an error.
*
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 1dc6c7c5cbce..acc459fc8105 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -165,6 +165,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
/**
* genwqe_bus_reset() - Card recovery
+ * @cd: GenWQE device information
*
* pci_reset_function() will recover the device and ensure that the
* registers are accessible again when it completes with success. If
@@ -262,6 +263,7 @@ static void genwqe_tweak_hardware(struct genwqe_dev *cd)
/**
* genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ * @cd: GenWQE device information
*
* Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
* be ignored. This is e.g. true for the bitstream we gave to the card
@@ -280,6 +282,7 @@ int genwqe_flash_readback_fails(struct genwqe_dev *cd)
/**
* genwqe_T_psec() - Calculate PF/VF timeout register content
+ * @cd: GenWQE device information
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -303,6 +306,7 @@ static int genwqe_T_psec(struct genwqe_dev *cd)
/**
* genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*
* Do this _after_ card_reset() is called. Otherwise the values will
* vanish. The settings need to be done when the queues are inactive.
@@ -329,6 +333,7 @@ static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
/**
* genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*/
static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
{
@@ -543,6 +548,7 @@ static int genwqe_start(struct genwqe_dev *cd)
/**
* genwqe_stop() - Stop card operation
+ * @cd: GenWQE device information
*
* Recovery notes:
* As long as genwqe_thread runs we might access registers during
@@ -569,6 +575,8 @@ static int genwqe_stop(struct genwqe_dev *cd)
/**
* genwqe_recover_card() - Try to recover the card if it is possible
+ * @cd: GenWQE device information
+ * @fatal_err: Indicate whether to attempt soft reset
*
* If fatal_err is set no register access is possible anymore. It is
* likely that genwqe_start fails in that situation. Proper error
@@ -618,6 +626,7 @@ static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
/**
* genwqe_fir_checking() - Check the fault isolation registers of the card
+ * @cd: GenWQE device information
*
* If this code works ok, can be tried out with help of the genwqe_poke tool:
* sudo ./tools/genwqe_poke 0x8 0xfefefefefef
@@ -762,6 +771,7 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd)
/**
* genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
+ * @pci_dev: PCI device information struct
*
* Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
* reset method will not work in all cases.
@@ -826,8 +836,9 @@ static int genwqe_platform_recovery(struct genwqe_dev *cd)
return rc;
}
-/*
+/**
* genwqe_reload_bistream() - reload card bitstream
+ * @cd: GenWQE device information
*
* Set the appropriate register and call fundamental reset to reaload the card
* bitstream.
@@ -880,6 +891,7 @@ static int genwqe_reload_bistream(struct genwqe_dev *cd)
/**
* genwqe_health_thread() - Health checking thread
+ * @data: GenWQE device information
*
* This thread is only started for the PF of the card.
*
@@ -1043,18 +1055,17 @@ static int genwqe_health_thread_running(struct genwqe_dev *cd)
static int genwqe_health_check_stop(struct genwqe_dev *cd)
{
- int rc;
-
if (!genwqe_health_thread_running(cd))
return -EIO;
- rc = kthread_stop(cd->health_thread);
+ kthread_stop(cd->health_thread);
cd->health_thread = NULL;
return 0;
}
/**
* genwqe_pci_setup() - Allocate PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
@@ -1140,6 +1151,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/**
* genwqe_pci_remove() - Free PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
@@ -1154,7 +1166,8 @@ static void genwqe_pci_remove(struct genwqe_dev *cd)
/**
* genwqe_probe() - Device initialization
- * @pdev: PCI device information struct
+ * @pci_dev: PCI device information struct
+ * @id: PCI device ID
*
* Callable for multiple cards. This function is called on bind.
*
@@ -1214,6 +1227,7 @@ static int genwqe_probe(struct pci_dev *pci_dev,
/**
* genwqe_remove() - Called when device is removed (hot-plugable)
+ * @pci_dev: PCI device information struct
*
* Or when driver is unloaded respecitively when unbind is done.
*/
@@ -1233,8 +1247,10 @@ static void genwqe_remove(struct pci_dev *pci_dev)
genwqe_dev_free(cd);
}
-/*
+/**
* genwqe_err_error_detected() - Error detection callback
+ * @pci_dev: PCI device information struct
+ * @state: PCI channel state
*
* This callback is called by the PCI subsystem whenever a PCI bus
* error is detected.
@@ -1324,7 +1340,7 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
return 0;
}
-static struct pci_error_handlers genwqe_err_handler = {
+static const struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
@@ -1342,6 +1358,8 @@ static struct pci_driver genwqe_driver = {
/**
* genwqe_devnode() - Set default access mode for genwqe devices.
+ * @dev: Pointer to device (unused)
+ * @mode: Carrier to pass-back given mode (permissions)
*
* Default mode should be rw for everybody. Do not change default
* device name.
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 905106579935..0db4000dedf2 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -244,10 +244,13 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
(cd->card_state != GENWQE_CARD_USED);
}
+#define RET_DDCB_APPENDED 1
+#define RET_DDCB_TAPPED 2
/**
* enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @queue: queue this operation should be done on
+ * @pddcb: pointer to ddcb structure
* @ddcb_no: pointer to ddcb number being tapped
*
* Start execution of DDCB by tapping or append to queue via NEXT
@@ -259,9 +262,6 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
* Return: 1 if new DDCB is appended to previous
* 2 if DDCB queue is tapped via register/simulation
*/
-#define RET_DDCB_APPENDED 1
-#define RET_DDCB_TAPPED 2
-
static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
struct ddcb *pddcb, int ddcb_no)
{
@@ -316,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
+ * @req: pointer to requsted DDCB parameters
+ * @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
* conversion made, since data structure in ASV is still
@@ -356,6 +358,7 @@ static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
/**
* genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
* @cd: pointer to genwqe device descriptor
+ * @queue: queue to be checked
*
* Return: Number of DDCBs which were finished
*/
@@ -553,6 +556,8 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
/**
* get_next_ddcb() - Get next available DDCB
* @cd: pointer to genwqe device descriptor
+ * @queue: DDCB queue
+ * @num: internal DDCB number
*
* DDCB's content is completely cleared but presets for PRE and
* SEQNUM. This function must only be called when ddcb_lock is held.
@@ -900,7 +905,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
/**
* __genwqe_execute_raw_ddcb() - Setup and execute DDCB
* @cd: pointer to genwqe device descriptor
- * @req: user provided DDCB request
+ * @cmd: user provided DDCB command
* @f_flags: file mode: blocking, non-blocking
*/
int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
@@ -965,6 +970,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
/**
* genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
+ * @cd: pointer to genwqe device descriptor
*
* We use this as condition for our wait-queue code.
*/
@@ -993,6 +999,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
/**
* genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
+ * @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
* queue. This is needed for statistics as well as conditon if we want
@@ -1171,6 +1178,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
/**
* genwqe_card_thread() - Work thread for the DDCB queue
+ * @data: pointer to genwqe device descriptor
*
* The idea is to check if there are DDCBs in processing. If there are
* some finished DDCBs, we process them and wakeup the
@@ -1299,6 +1307,7 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd)
/**
* queue_wake_up_all() - Handles fatal error case
+ * @cd: pointer to genwqe device descriptor
*
* The PCI device got unusable and we have to stop all pending
* requests as fast as we can. The code after this must purge the
@@ -1323,6 +1332,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
/**
* genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
+ * @cd: pointer to genwqe device descriptor
*
* Relies on the pre-condition that there are no users of the card
* device anymore e.g. with open file-descriptors.
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 1b5b82e65268..491fb4482da2 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 040a0bda3125..55fc5b80e649 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -87,7 +87,7 @@ static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
* @cfile: Descriptor of opened file
* @u_addr: User virtual address
* @size: Size of buffer
- * @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
*
* Return: Pointer to the corresponding mapping NULL if not found
*/
@@ -144,6 +144,7 @@ static void __genwqe_del_mapping(struct genwqe_file *cfile,
* @u_addr: user virtual address
* @size: size of buffer
* @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
@@ -249,6 +250,8 @@ static void genwqe_remove_pinnings(struct genwqe_file *cfile)
/**
* genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
+ * @cd: GenWQE device information
+ * @sig: Signal to send out
*
* E.g. genwqe_send_signal(cd, SIGIO);
*/
@@ -380,6 +383,7 @@ static void genwqe_vma_open(struct vm_area_struct *vma)
/**
* genwqe_vma_close() - Called each time when vma is unmapped
+ * @vma: VMA area to close
*
* Free memory which got allocated by GenWQE mmap().
*/
@@ -416,6 +420,8 @@ static const struct vm_operations_struct genwqe_vma_ops = {
/**
* genwqe_mmap() - Provide contignous buffers to userspace
+ * @filp: File pointer (unused)
+ * @vma: VMA area to map
*
* We use mmap() to allocate contignous buffers used for DMA
* transfers. After the buffer is allocated we remap it to user-space
@@ -484,16 +490,15 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
}
+#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
+
/**
* do_flash_update() - Excute flash update (write image or CVPD)
- * @cd: genwqe device
+ * @cfile: Descriptor of opened file
* @load: details about image load
*
* Return: 0 if successful
*/
-
-#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
-
static int do_flash_update(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
@@ -820,6 +825,8 @@ static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
/**
* ddcb_cmd_cleanup() - Remove dynamically created fixup entries
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Only if there are any. Pinnings are not removed.
*/
@@ -844,6 +851,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Before the DDCB gets executed we need to handle the fixups. We
* replace the user-space addresses with DMA addresses or do
@@ -974,6 +983,8 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
+ * @cfile: Descriptor of opened file
+ * @cmd: Command identifier (passed from user)
*
* The code will build up the translation tables or lookup the
* contignous memory allocation table to find the right translations
@@ -1339,6 +1350,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
/**
* genwqe_device_remove() - Remove genwqe's char device
+ * @cd: GenWQE device information
*
* This function must be called after the client devices are removed
* because it will free the major/minor number range for the genwqe
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 28a3fb1533f7..b2f115602523 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,7 +129,7 @@ static ssize_t base_clock_show(struct device *dev,
}
static DEVICE_ATTR_RO(base_clock);
-/**
+/*
* curr_bitstream_show() - Show the current bitstream id
*
* There is a bug in some old versions of the CPLD which selects the
@@ -156,7 +156,7 @@ static ssize_t curr_bitstream_show(struct device *dev,
}
static DEVICE_ATTR_RO(curr_bitstream);
-/**
+/*
* next_bitstream_show() - Show the next activated bitstream
*
* IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
@@ -260,7 +260,7 @@ static struct attribute *genwqe_normal_attributes[] = {
NULL,
};
-/**
+/*
* genwqe_is_visible() - Determine if sysfs attribute should be visible or not
*
* VFs have restricted mmio capabilities, so not all sysfs entries
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 77c21caf2acd..039b923d1d60 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,6 +129,9 @@ u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
/**
* genwqe_read_app_id() - Extract app_id
+ * @cd: genwqe device descriptor
+ * @app_name: carrier used to pass-back name
+ * @len: length of data for name
*
* app_unitcfg need to be filled with valid data first
*/
@@ -183,7 +186,7 @@ void genwqe_init_crc32(void)
* @init: initial crc (0xffffffff at start)
*
* polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
-
+ *
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
* result in a crc32 of 0xf33cb7d3.
*
@@ -277,7 +280,7 @@ static int genwqe_sgl_size(int num_pages)
return roundup(len, PAGE_SIZE);
}
-/**
+/*
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
@@ -460,6 +463,8 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
+ * @cd: genwqe device descriptor
+ * @sgl: scatter gather list describing user-space memory
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transferred from cached
@@ -710,6 +715,7 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
/**
* genwqe_set_interrupt_capability() - Configure MSI capability structure
* @cd: pointer to the device
+ * @count: number of vectors to allocate
* Return: 0 if no error
*/
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
@@ -738,7 +744,7 @@ void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
* @i: index to desired entry
* @m: maximum possible entries
* @addr: addr which is read
- * @index: index in debug array
+ * @idx: index in debug array
* @val: read value
*/
static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
@@ -818,6 +824,8 @@ int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
/**
* genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
*/
int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
{
@@ -871,6 +879,10 @@ int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
/**
* genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
+ * @regs: register information
+ * @max_regs: number of register entries
*/
int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
struct genwqe_reg *regs, unsigned int max_regs)
@@ -956,6 +968,10 @@ int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
/**
* genwqe_write_vreg() - Write register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @val: value to write
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -969,6 +985,9 @@ int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
/**
* genwqe_read_vreg() - Read register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -981,6 +1000,7 @@ u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
/**
* genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ * @cd: genwqe device descriptor
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -1005,6 +1025,7 @@ int genwqe_base_clock_frequency(struct genwqe_dev *cd)
/**
* genwqe_stop_traps() - Stop traps
+ * @cd: genwqe device descriptor
*
* Before reading out the analysis data, we need to stop the traps.
*/
@@ -1015,6 +1036,7 @@ void genwqe_stop_traps(struct genwqe_dev *cd)
/**
* genwqe_start_traps() - Start traps
+ * @cd: genwqe device descriptor
*
* After having read the data, we can/must enable the traps again.
*/
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index 421ebd903069..a786c0a7de9a 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -3,16 +3,15 @@
# Makefile for HabanaLabs AI accelerators driver
#
-obj-m := habanalabs.o
+obj-$(CONFIG_HABANA_AI) := habanalabs.o
-habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
- command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
- command_submission.o mmu.o firmware_if.o pci.o
-
-habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
+include $(src)/common/Makefile
+habanalabs-y += $(HL_COMMON_FILES)
include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
+
+habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
new file mode 100644
index 000000000000..b984bfa4face
--- /dev/null
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
+ common/asid.o common/habanalabs_ioctl.o \
+ common/command_buffer.o common/hw_queue.o common/irq.o \
+ common/sysfs.o common/hwmon.o common/memory.o \
+ common/command_submission.o common/mmu.o common/firmware_if.o \
+ common/pci.o
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/common/asid.c
index a2fdf31cf27c..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 02d13f71b1df..7c38c4f7f9c0 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -10,12 +10,18 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/genalloc.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
- hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- (void *) (uintptr_t) cb->kernel_address,
- cb->bus_address);
+ if (cb->is_internal)
+ gen_pool_free(hdev->internal_cb_pool,
+ cb->kernel_address, cb->size);
+ else
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
+ (void *) (uintptr_t) cb->kernel_address,
+ cb->bus_address);
+
kfree(cb);
}
@@ -44,9 +50,10 @@ static void cb_release(struct kref *ref)
}
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
- int ctx_id)
+ int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
+ u32 cb_offset;
void *p;
/*
@@ -65,13 +72,25 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
if (!cb)
return NULL;
- if (ctx_id == HL_KERNEL_ASID_ID)
+ if (internal_cb) {
+ p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
+ if (!p) {
+ kfree(cb);
+ return NULL;
+ }
+
+ cb_offset = p - hdev->internal_cb_pool_virt_addr;
+ cb->is_internal = true;
+ cb->bus_address = hdev->internal_cb_va_base + cb_offset;
+ } else if (ctx_id == HL_KERNEL_ASID_ID) {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
- else
+ } else {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
GFP_USER | __GFP_ZERO);
+ }
+
if (!p) {
dev_err(hdev->dev,
"failed to allocate %d of dma memory for CB\n",
@@ -87,7 +106,7 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
}
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 cb_size, u64 *handle, int ctx_id)
+ u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
@@ -112,28 +131,30 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out_err;
}
- /* Minimum allocation must be PAGE SIZE */
- if (cb_size < PAGE_SIZE)
- cb_size = PAGE_SIZE;
-
- if (ctx_id == HL_KERNEL_ASID_ID &&
- cb_size <= hdev->asic_prop.cb_pool_cb_size) {
-
- spin_lock(&hdev->cb_pool_lock);
- if (!list_empty(&hdev->cb_pool)) {
- cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
- pool_list);
- list_del(&cb->pool_list);
- spin_unlock(&hdev->cb_pool_lock);
- alloc_new_cb = false;
- } else {
- spin_unlock(&hdev->cb_pool_lock);
- dev_dbg(hdev->dev, "CB pool is empty\n");
+ if (!internal_cb) {
+ /* Minimum allocation must be PAGE SIZE */
+ if (cb_size < PAGE_SIZE)
+ cb_size = PAGE_SIZE;
+
+ if (ctx_id == HL_KERNEL_ASID_ID &&
+ cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+
+ spin_lock(&hdev->cb_pool_lock);
+ if (!list_empty(&hdev->cb_pool)) {
+ cb = list_first_entry(&hdev->cb_pool,
+ typeof(*cb), pool_list);
+ list_del(&cb->pool_list);
+ spin_unlock(&hdev->cb_pool_lock);
+ alloc_new_cb = false;
+ } else {
+ spin_unlock(&hdev->cb_pool_lock);
+ dev_dbg(hdev->dev, "CB pool is empty\n");
+ }
}
}
if (alloc_new_cb) {
- cb = hl_cb_alloc(hdev, cb_size, ctx_id);
+ cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
if (!cb) {
rc = -ENOMEM;
goto out_err;
@@ -229,8 +250,8 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->cb_mgr,
- args->in.cb_size, &handle,
- hpriv->ctx->asid);
+ args->in.cb_size, &handle,
+ hpriv->ctx->asid, false);
}
memset(args, 0, sizeof(*args));
@@ -398,14 +419,15 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
idr_destroy(&mgr->cb_handles);
}
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb)
{
u64 cb_handle;
struct hl_cb *cb;
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, internal_cb);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -437,7 +459,7 @@ int hl_cb_pool_init(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, false);
if (cb) {
cb->is_pool = true;
list_add(&cb->pool_list, &hdev->cb_pool);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index f3a8f113865d..b9840e368eb5 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -246,6 +246,18 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
kfree(job);
}
+static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
+ ctx->cs_counters.device_in_reset_drop_cnt;
+ hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
+ ctx->cs_counters.out_of_mem_drop_cnt;
+ hdev->aggregated_cs_counters.parsing_drop_cnt +=
+ ctx->cs_counters.parsing_drop_cnt;
+ hdev->aggregated_cs_counters.queue_full_drop_cnt +=
+ ctx->cs_counters.queue_full_drop_cnt;
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs,
@@ -349,13 +361,16 @@ static void cs_do_release(struct kref *ref)
dma_fence_signal(cs->fence);
dma_fence_put(cs->fence);
+ cs_counters_aggregate(hdev, cs->ctx);
+
+ kfree(cs->jobs_in_queue_cnt);
kfree(cs);
}
static void cs_timedout(struct work_struct *work)
{
struct hl_device *hdev;
- int ctx_asid, rc;
+ int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
rc = cs_get_unless_zero(cs);
@@ -371,11 +386,10 @@ static void cs_timedout(struct work_struct *work)
cs->timedout = true;
hdev = cs->ctx->hdev;
- ctx_asid = cs->ctx->asid;
- /* TODO: add information about last signaled seq and last emitted seq */
- dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
- ctx_asid, cs->sequence);
+ dev_err(hdev->dev,
+ "Command submission %llu has not finished in time!\n",
+ cs->sequence);
cs_put(cs);
@@ -418,21 +432,29 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
spin_lock(&ctx->cs_lock);
cs_cmpl->cs_seq = ctx->cs_sequence;
- other = ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ other = ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
- spin_unlock(&ctx->cs_lock);
dev_dbg(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
rc = -EAGAIN;
goto free_fence;
}
+ cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+ sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
+ if (!cs->jobs_in_queue_cnt) {
+ rc = -ENOMEM;
+ goto free_fence;
+ }
+
dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
ctx->asid, ctx->cs_sequence);
cs->sequence = cs_cmpl->cs_seq;
- ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)] =
&cs_cmpl->base_fence;
ctx->cs_sequence++;
@@ -447,6 +469,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return 0;
free_fence:
+ spin_unlock(&ctx->cs_lock);
kfree(cs_cmpl);
free_cs:
kfree(cs);
@@ -463,10 +486,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
void hl_cs_rollback_all(struct hl_device *hdev)
{
+ int i;
struct hl_cs *cs, *tmp;
/* flush all completions */
- flush_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ flush_workqueue(hdev->cq_wq[i]);
/* Make sure we don't have leftovers in the H/W queues mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
@@ -502,7 +527,7 @@ static int validate_queue_index(struct hl_device *hdev,
/* This must be checked here to prevent out-of-bounds access to
* hw_queues_props array
*/
- if (chunk->queue_index >= HL_MAX_QUEUES) {
+ if (chunk->queue_index >= asic->max_queues) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
@@ -511,7 +536,7 @@ static int validate_queue_index(struct hl_device *hdev,
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
- dev_err(hdev->dev, "Queue index %d is not applicable\n",
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
}
@@ -638,12 +663,15 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
- if (rc)
+ if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
goto free_cs_object;
+ }
if (is_kernel_allocated_cb) {
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
rc = -EINVAL;
goto free_cs_object;
}
@@ -657,6 +685,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
+ hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
if (is_kernel_allocated_cb)
@@ -689,6 +718,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
cs->ctx->asid, cs->sequence, job->id, rc);
@@ -697,6 +727,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -746,6 +777,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
+ enum hl_queue_type q_type;
u64 *signal_seq_arr = NULL, signal_seq;
u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
int rc;
@@ -778,9 +810,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+ q_type = hw_queue_prop->type;
- if ((q_idx >= HL_MAX_QUEUES) ||
- (hw_queue_prop->type != QUEUE_TYPE_EXT)) {
+ if ((q_idx >= hdev->asic_prop.max_queues) ||
+ (!hw_queue_prop->supports_sync_stream)) {
dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
goto free_cs_chunk_array;
@@ -877,25 +910,28 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
*cs_seq = cs->sequence;
- job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ job = hl_cs_allocate_job(hdev, q_type, true);
if (!job) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto put_cs;
}
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (cs->type == CS_TYPE_WAIT)
+ cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+ else
+ cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+ cb = hl_cb_kernel_create(hdev, cb_size,
+ q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
if (!cb) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
kfree(job);
rc = -EFAULT;
goto put_cs;
}
- if (cs->type == CS_TYPE_WAIT)
- cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
- else
- cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
-
job->id = 0;
job->cs = cs;
job->user_cb = cb;
@@ -1134,7 +1170,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
rc = PTR_ERR(fence);
if (rc == -EINVAL)
dev_notice_ratelimited(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu\n",
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
} else if (fence) {
rc = dma_fence_wait_timeout(fence, true, timeout);
@@ -1167,15 +1203,21 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc < 0) {
- dev_err_ratelimited(hdev->dev,
- "Error %ld on waiting for CS handle %llu\n",
- rc, seq);
if (rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for CS handle %llu\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
} else if (rc == -ETIMEDOUT) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has timed-out while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
} else if (rc == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has been aborted while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_ABORTED;
}
return rc;
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/common/context.c
index ec92b3506b1f..3e375958e73b 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -22,9 +22,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
* to this function unless the ref count is 0
*/
- for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
+ for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
dma_fence_put(ctx->cs_pending[i]);
+ kfree(ctx->cs_pending);
+
if (ctx->asid != HL_KERNEL_ASID_ID) {
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
@@ -110,8 +112,7 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
return;
dev_warn(hdev->dev,
- "Context %d closed or terminated but its CS are executing\n",
- ctx->asid);
+ "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -126,34 +127,49 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
+ ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL);
+ if (!ctx->cs_pending)
+ return -ENOMEM;
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mmu ctx module\n");
- goto mem_ctx_err;
+ goto err_free_cs_pending;
}
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_cs_pending;
}
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
- goto mem_ctx_err;
+ goto err_asid_free;
+ }
+
+ rc = hdev->asic_funcs->ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "ctx_init failed\n");
+ goto err_vm_ctx_fini;
}
}
return 0;
-mem_ctx_err:
- if (ctx->asid != HL_KERNEL_ASID_ID)
- hl_asid_free(hdev, ctx->asid);
+err_vm_ctx_fini:
+ hl_vm_ctx_fini(ctx);
+err_asid_free:
+ hl_asid_free(hdev, ctx->asid);
+err_free_cs_pending:
+ kfree(ctx->cs_pending);
return rc;
}
@@ -170,6 +186,7 @@ int hl_ctx_put(struct hl_ctx *ctx)
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
+ struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
@@ -179,13 +196,13 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
return ERR_PTR(-EINVAL);
}
- if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
spin_unlock(&ctx->cs_lock);
return NULL;
}
fence = dma_fence_get(
- ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
+ ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)]);
spin_unlock(&ctx->cs_lock);
return fence;
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 0bc036e01ee8..c50c6fc9e905 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
#include <linux/debugfs.h>
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/common/device.c
index 59608d1bac88..be16b75bdfdb 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
*/
static int device_early_init(struct hl_device *hdev)
{
- int rc;
+ int i, rc;
+ char workq_name[32];
switch (hdev->asic_type) {
case ASIC_GOYA:
@@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev)
if (rc)
goto early_fini;
- hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
- if (hdev->cq_wq == NULL) {
- dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
- rc = -ENOMEM;
- goto asid_fini;
+ if (hdev->asic_prop.completion_queues_count) {
+ hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
+ sizeof(*hdev->cq_wq),
+ GFP_ATOMIC);
+ if (!hdev->cq_wq) {
+ rc = -ENOMEM;
+ goto asid_fini;
+ }
+ }
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
+ snprintf(workq_name, 32, "hl-free-jobs-%u", i);
+ hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
+ if (hdev->cq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
+ rc = -ENOMEM;
+ goto free_cq_wq;
+ }
}
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
@@ -321,7 +335,10 @@ free_chip_info:
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
- destroy_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ if (hdev->cq_wq[i])
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
asid_fini:
hl_asid_fini(hdev);
early_fini:
@@ -339,6 +356,8 @@ early_fini:
*/
static void device_early_fini(struct hl_device *hdev)
{
+ int i;
+
mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
- destroy_workqueue(hdev->cq_wq);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
hl_asid_fini(hdev);
@@ -838,6 +860,22 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
if (rc)
return 0;
+ if (hard_reset) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev,
+ ARMCP_PACKET_DISABLE_PCI_ACCESS))
+ dev_warn(hdev->dev,
+ "Failed to disable PCI access by F/W\n");
+ }
+
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -995,6 +1033,12 @@ again:
}
}
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev,
@@ -1002,8 +1046,6 @@ again:
goto out_err;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
@@ -1144,14 +1186,17 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
- hdev->completion_queue = kcalloc(cq_cnt,
- sizeof(*hdev->completion_queue),
- GFP_KERNEL);
+ if (cq_cnt) {
+ hdev->completion_queue = kcalloc(cq_cnt,
+ sizeof(*hdev->completion_queue),
+ GFP_KERNEL);
- if (!hdev->completion_queue) {
- dev_err(hdev->dev, "failed to allocate completion queues\n");
- rc = -ENOMEM;
- goto hw_queues_destroy;
+ if (!hdev->completion_queue) {
+ dev_err(hdev->dev,
+ "failed to allocate completion queues\n");
+ rc = -ENOMEM;
+ goto hw_queues_destroy;
+ }
}
for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
@@ -1162,6 +1207,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
"failed to initialize completion queue\n");
goto cq_fini;
}
+ hdev->completion_queue[i].cq_idx = i;
}
/*
@@ -1219,6 +1265,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
*/
add_cdev_sysfs_on_err = true;
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1226,8 +1278,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto out_disabled;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index d27841cb5bcb..f70302cdab1b 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hl_boot_if.h"
+#include "../include/common/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/genalloc.h>
@@ -15,7 +15,10 @@
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
+ *
* @hdev: pointer to hl_device structure.
+ * @fw_name: the firmware image name
+ * @dst: IO memory mapped address space to copy firmware to
*
* Copy fw code from firmware file to device memory.
*
@@ -286,7 +289,7 @@ int hl_fw_armcp_info_get(struct hl_device *hdev)
HL_ARMCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP info pkt, error %d\n", rc);
+ "Failed to handle ArmCP info pkt, error %d\n", rc);
goto out;
}
@@ -337,7 +340,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP EEPROM packet, error %d\n", rc);
+ "Failed to handle ArmCP EEPROM packet, error %d\n", rc);
goto out;
}
@@ -390,6 +393,53 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
"Device boot error - NIC F/W initialization failed\n");
}
+static void hl_detect_cpu_boot_status(struct hl_device *hdev, u32 status)
+{
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "Device boot error - BTL did NOT run\n");
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck inside WFE loop\n");
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in BTL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in Preboot\n");
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in SPL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in u-boot\n");
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "Device boot error - u-boot stopped by user\n");
+ break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Device boot error - Invalid status code %d\n",
+ status);
+ break;
+ }
+}
+
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 boot_err0_reg, bool skip_bmc,
@@ -463,50 +513,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
* versions but we keep them here for backward compatibility
*/
if (rc) {
- switch (status) {
- case CPU_BOOT_STATUS_NA:
- dev_err(hdev->dev,
- "Device boot error - BTL did NOT run\n");
- break;
- case CPU_BOOT_STATUS_IN_WFE:
- dev_err(hdev->dev,
- "Device boot error - Stuck inside WFE loop\n");
- break;
- case CPU_BOOT_STATUS_IN_BTL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in BTL\n");
- break;
- case CPU_BOOT_STATUS_IN_PREBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in Preboot\n");
- break;
- case CPU_BOOT_STATUS_IN_SPL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in SPL\n");
- break;
- case CPU_BOOT_STATUS_IN_UBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in u-boot\n");
- break;
- case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- break;
- case CPU_BOOT_STATUS_UBOOT_NOT_READY:
- dev_err(hdev->dev,
- "Device boot error - u-boot stopped by user\n");
- break;
- case CPU_BOOT_STATUS_TS_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- break;
- default:
- dev_err(hdev->dev,
- "Device boot error - Invalid status code %d\n",
- status);
- break;
- }
-
+ hl_detect_cpu_boot_status(hdev, status);
rc = -EIO;
goto out;
}
@@ -566,7 +573,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
- "Device failed to load, %d\n", status);
+ "Failed to load firmware to device, %d\n",
+ status);
rc = -EIO;
goto out;
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 194d83352696..018d9d67e8e6 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -8,8 +8,9 @@
#ifndef HABANALABSP_H_
#define HABANALABSP_H_
-#include "include/armcp_if.h"
-#include "include/qman_if.h"
+#include "../include/common/armcp_if.h"
+#include "../include/common/qman_if.h"
+#include <uapi/misc/habanalabs.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
@@ -40,11 +41,6 @@
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
-#define HL_MAX_QUEUES 128
-
-/* MUST BE POWER OF 2 and larger than 1 */
-#define HL_MAX_PENDING_CS 64
-
#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
/* Memory */
@@ -53,6 +49,10 @@
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+/*
+ * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
+ * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
+ */
#define HL_RSVD_SOBS 4
#define HL_RSVD_MONS 2
@@ -61,6 +61,11 @@
#define HL_MAX_SOB_VAL (1 << 15)
+#define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
+#define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
+
+#define HL_PCI_NUM_BARS 6
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -86,6 +91,16 @@ struct hl_device;
struct hl_fpriv;
/**
+ * enum hl_pci_match_mode - pci match mode per region
+ * @PCI_ADDRESS_MATCH_MODE: address match mode
+ * @PCI_BAR_MATCH_MODE: bar match mode
+ */
+enum hl_pci_match_mode {
+ PCI_ADDRESS_MATCH_MODE,
+ PCI_BAR_MATCH_MODE
+};
+
+/**
* enum hl_fw_component - F/W components to read version through registers.
* @FW_COMP_UBOOT: u-boot.
* @FW_COMP_PREBOOT: preboot.
@@ -121,6 +136,32 @@ enum hl_cs_type {
};
/*
+ * struct hl_inbound_pci_region - inbound region descriptor
+ * @mode: pci match mode for this region
+ * @addr: region target address
+ * @size: region size in bytes
+ * @offset_in_bar: offset within bar (address match mode)
+ * @bar: bar id
+ */
+struct hl_inbound_pci_region {
+ enum hl_pci_match_mode mode;
+ u64 addr;
+ u64 size;
+ u64 offset_in_bar;
+ u8 bar;
+};
+
+/*
+ * struct hl_outbound_pci_region - outbound region descriptor
+ * @addr: region target address
+ * @size: region size in bytes
+ */
+struct hl_outbound_pci_region {
+ u64 addr;
+ u64 size;
+};
+
+/*
* struct hl_hw_sob - H/W SOB info.
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
@@ -141,11 +182,13 @@ struct hl_hw_sob {
* false otherwise.
* @requires_kernel_cb: true if a CB handle must be provided for jobs on this
* queue, false otherwise (a CB address must be provided).
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
u8 requires_kernel_cb;
+ u8 supports_sync_stream;
};
/**
@@ -241,14 +284,19 @@ struct hl_mmu_properties {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
+ * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
+ * @max_pending_cs: maximum of concurrent pending command submissions
+ * @max_queues: maximum amount of queues in the system
+ * @sync_stream_first_sob: first sync object available for sync stream use
+ * @sync_stream_first_mon: first monitor available for sync stream use
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
- struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
+ struct hw_queue_properties *hw_queues_props;
struct armcp_info armcp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
@@ -282,9 +330,14 @@ struct asic_fixed_properties {
u32 psoc_pci_pll_nf;
u32 psoc_pci_pll_od;
u32 psoc_pci_pll_div_factor;
+ u32 psoc_timestamp_frequency;
u32 high_pll;
u32 cb_pool_cb_cnt;
u32 cb_pool_cb_size;
+ u32 max_pending_cs;
+ u32 max_queues;
+ u16 sync_stream_first_sob;
+ u16 sync_stream_first_mon;
u8 tpc_enabled_mask;
u8 completion_queues_count;
};
@@ -339,6 +392,7 @@ struct hl_cb_mgr {
* @ctx_id: holds the ID of the owner's context.
* @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
+ * @is_internal: internaly allocated
*/
struct hl_cb {
struct kref refcount;
@@ -355,6 +409,7 @@ struct hl_cb {
u32 ctx_id;
u8 mmap;
u8 is_pool;
+ u8 is_internal;
};
@@ -364,38 +419,19 @@ struct hl_cb {
struct hl_cs_job;
-/*
- * Currently, there are two limitations on the maximum length of a queue:
- *
- * 1. The memory footprint of the queue. The current allocated space for the
- * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
- * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
- * which currently is 4096/16 = 256 entries.
- *
- * To increase that, we need either to decrease the size of the
- * BD (difficult), or allocate more than a single page (easier).
- *
- * 2. Because the size of the JOB handle field in the BD CTL / completion queue
- * is 10-bit, we can have up to 1024 open jobs per hardware queue.
- * Therefore, each queue can hold up to 1024 entries.
- *
- * HL_QUEUE_LENGTH is in units of struct hl_bd.
- * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
- */
-
-#define HL_PAGE_SIZE 4096 /* minimum page size */
-/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
-#define HL_QUEUE_LENGTH 256
+/* Queue length of external and HW queues */
+#define HL_QUEUE_LENGTH 4096
#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
-/*
- * HL_CQ_LENGTH is in units of struct hl_cq_entry.
- * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
- */
+#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
+#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
+#endif
+
+/* HL_CQ_LENGTH is in units of struct hl_cq_entry */
#define HL_CQ_LENGTH HL_QUEUE_LENGTH
#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
-/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
+/* Must be power of 2 */
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
@@ -422,6 +458,7 @@ struct hl_cs_job;
* exist).
* @curr_sob_offset: the id offset to the currently used SOB from the
* HL_RSVD_SOBS that are being used by this queue.
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hl_hw_queue {
struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
@@ -430,7 +467,7 @@ struct hl_hw_queue {
u64 kernel_address;
dma_addr_t bus_address;
u32 pi;
- u32 ci;
+ atomic_t ci;
u32 hw_queue_id;
u32 cq_id;
u32 msi_vec;
@@ -440,6 +477,7 @@ struct hl_hw_queue {
u16 base_mon_id;
u8 valid;
u8 curr_sob_offset;
+ u8 supports_sync_stream;
};
/**
@@ -447,6 +485,7 @@ struct hl_hw_queue {
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
+ * @cq_idx: completion queue index in array
* @hw_queue_id: the id of the matching H/W queue
* @ci: ci inside the queue
* @pi: pi inside the queue
@@ -456,6 +495,7 @@ struct hl_cq {
struct hl_device *hdev;
u64 kernel_address;
dma_addr_t bus_address;
+ u32 cq_idx;
u32 hw_queue_id;
u32 ci;
u32 pi;
@@ -519,6 +559,15 @@ enum hl_pll_frequency {
PLL_LAST
};
+#define PLL_REF_CLK 50
+
+enum div_select_defs {
+ DIV_SEL_REF_CLK = 0,
+ DIV_SEL_PLL_CLK = 1,
+ DIV_SEL_DIVIDED_REF = 2,
+ DIV_SEL_DIVIDED_PLL = 3,
+};
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -601,14 +650,13 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @ctx_init: context dependent initialization.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
* @read_device_fw_version: read the device's firmware versions that are
* contained in registers
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
- * @ext_queue_init: Initialize the given external queue.
- * @ext_queue_reset: Reset the given external queue.
* @get_signal_cb_size: Get signal CB size.
* @get_wait_cb_size: Get wait CB size.
* @gen_signal_cb: Generate a signal CB.
@@ -705,14 +753,13 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*ctx_init)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
void (*read_device_fw_version)(struct hl_device *hdev,
enum hl_fw_component fwc);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
- void (*ext_queue_init)(struct hl_device *hdev, u32 hw_queue_id);
- void (*ext_queue_reset)(struct hl_device *hdev, u32 hw_queue_id);
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
@@ -748,7 +795,6 @@ struct hl_va_range {
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
- * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
@@ -782,18 +828,18 @@ struct hl_va_range {
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
- DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
- struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
+ struct dma_fence **cs_pending;
struct hl_va_range *host_va_range;
struct hl_va_range *host_huge_va_range;
struct hl_va_range *dram_va_range;
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
+ struct hl_cs_counters cs_counters;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -868,7 +914,7 @@ struct hl_userptr {
* @aborted: true if CS was aborted due to some device error.
*/
struct hl_cs {
- u16 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ u16 *jobs_in_queue_cnt;
struct hl_ctx *ctx;
struct list_head job_list;
spinlock_t job_lock;
@@ -1352,7 +1398,9 @@ struct hl_device_idle_busy_ts {
/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
- * @pcie_bar: array of available PCIe bars.
+ * @pcie_bar_phys: array of available PCIe bars physical addresses.
+ * (required only for PCI address match mode)
+ * @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
@@ -1363,7 +1411,8 @@ struct hl_device_idle_busy_ts {
* @asic_name: ASIC specific nmae.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
- * @cq_wq: work queue of completion queues for executing work in process context
+ * @cq_wq: work queues of completion queues for executing work in process
+ * context.
* @eq_wq: work queue of event queue for executing work in process context.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
@@ -1392,12 +1441,17 @@ struct hl_device_idle_busy_ts {
* @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool.
+ * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
+ * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
+ * @internal_cb_pool: internal command buffer memory pool.
+ * @internal_cb_va_base: internal cb pool mmu virtual address base
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
* @fpriv_list_lock: protects the fpriv_list
* @compute_ctx: current compute context executing.
* @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
* and vice-versa
+ * @aggregated_cs_counters: aggregated cs counters among all contexts
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1442,12 +1496,14 @@ struct hl_device_idle_busy_ts {
* @cdev_sysfs_created: were char devices and sysfs nodes created.
* @stop_on_err: true if engines should stop on error.
* @supports_sync_stream: is sync stream supported.
+ * @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
*/
struct hl_device {
struct pci_dev *pdev;
- void __iomem *pcie_bar[6];
+ u64 pcie_bar_phys[HL_PCI_NUM_BARS];
+ void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
struct cdev cdev;
struct cdev cdev_ctrl;
@@ -1458,7 +1514,7 @@ struct hl_device {
char asic_name[16];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
- struct workqueue_struct *cq_wq;
+ struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
@@ -1490,6 +1546,11 @@ struct hl_device {
struct list_head cb_pool;
spinlock_t cb_pool_lock;
+ void *internal_cb_pool_virt_addr;
+ dma_addr_t internal_cb_pool_dma_addr;
+ struct gen_pool *internal_cb_pool;
+ u64 internal_cb_va_base;
+
struct list_head fpriv_list;
struct mutex fpriv_list_lock;
@@ -1497,6 +1558,8 @@ struct hl_device {
struct hl_device_idle_busy_ts *idle_busy_ts_arr;
+ struct hl_cs_counters aggregated_cs_counters;
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -1529,6 +1592,7 @@ struct hl_device {
u8 cdev_sysfs_created;
u8 stop_on_err;
u8 supports_sync_stream;
+ u8 sync_stream_queue_idx;
u8 supports_coresight;
u8 supports_soft_reset;
@@ -1697,7 +1761,7 @@ int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
- u64 *handle, int ctx_id);
+ u64 *handle, int ctx_id, bool internal_cb);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
@@ -1705,7 +1769,8 @@ struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
void hl_cb_put(struct hl_cb *cb);
void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
@@ -1769,9 +1834,10 @@ int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
u64 addr);
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size);
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region);
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region);
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 22716da9f85f..c6b31e93fb5e 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -238,7 +238,6 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->axi_drain = 0;
hdev->sram_scrambler_enable = 1;
hdev->dram_scrambler_enable = 1;
- hdev->rl_enable = 1;
hdev->bmc_enable = 1;
hdev->hard_reset_on_fw_events = 1;
}
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 52eedd3a6c3a..5af1c03da473 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -276,6 +276,27 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
}
+static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_cs_counters cs_counters = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ if (hpriv->ctx)
+ memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ return copy_to_user(out, &cs_counters,
+ min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -336,6 +357,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
+ case HL_INFO_CS_COUNTERS:
+ return cs_counters_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index f4434b39ef1b..287681646071 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -23,10 +23,14 @@ inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
return ptr;
}
+static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
+{
+ return atomic_read(ci) & ((queue_len << 1) - 1);
+}
static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
{
- int delta = (q->pi - q->ci);
+ int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
if (delta >= 0)
return (queue_len - delta);
@@ -40,21 +44,14 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
struct hl_hw_queue *q;
int i;
- hdev->asic_funcs->hw_queues_lock(hdev);
-
if (hdev->disabled)
- goto out;
+ return;
q = &hdev->kernel_queues[0];
- for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_INT) {
- q->ci += cs->jobs_in_queue_cnt[i];
- q->ci &= ((q->int_queue_len << 1) - 1);
- }
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_INT)
+ atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
-
-out:
- hdev->asic_funcs->hw_queues_unlock(hdev);
}
/*
@@ -161,6 +158,13 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
{
int free_slots_cnt;
+ if (num_of_entries > q->int_queue_len) {
+ dev_err(hdev->dev,
+ "Cannot populate queue %u with %u jobs\n",
+ q->hw_queue_id, num_of_entries);
+ return -ENOMEM;
+ }
+
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
@@ -174,38 +178,26 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
- * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
* @hdev: Pointer to hl_device structure.
* @q: Pointer to hl_hw_queue structure.
* @num_of_entries: How many entries to check for space.
*
- * Perform the following:
- * - Make sure we have enough space in the completion queue.
- * This check also ensures that there is enough space in the h/w queue, as
- * both queues are of the same size.
- * - Reserve space in the completion queue (needs to be reversed if there
- * is a failure down the road before the actual submission of work).
+ * Notice: We do not reserve queue entries so this function mustn't be called
+ * more than once per CS for the same queue
*
- * Both operations are done using the "free_slots_cnt" field of the completion
- * queue. The CI counters of the queue and the completion queue are not
- * needed/used for the H/W queue type.
*/
static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
- atomic_t *free_slots =
- &hdev->completion_queue[q->cq_id].free_slots_cnt;
+ int free_slots_cnt;
- /*
- * Check we have enough space in the completion queue.
- * Add -1 to counter (decrement) unless counter was already 0.
- * In that case, CQ is full so we can't submit a new CB.
- * atomic_add_unless will return 0 if counter was already 0.
- */
- if (atomic_add_negative(num_of_entries * -1, free_slots)) {
- dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
- num_of_entries, q->hw_queue_id);
- atomic_add(num_of_entries, free_slots);
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
@@ -366,7 +358,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
- struct hl_cq *cq;
u64 ptr;
u32 offset, ctl, len;
@@ -376,7 +367,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
* write address offset in the SM block (QMAN LBW message).
* The write address offset is calculated as "COMP_OFFSET << 2".
*/
- offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
@@ -395,17 +386,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- /*
- * No need to protect pi_offset because scheduling to the
- * H/W queues is done under the scheduler mutex
- *
- * No need to check if CQ is full because it was already
- * checked in hw_queue_sanity_checks
- */
- cq = &hdev->completion_queue[q->cq_id];
-
- cq->pi = hl_cq_inc_ptr(cq->pi);
-
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
@@ -509,19 +489,23 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
+ u32 max_queues;
int rc = 0, i, cq_cnt;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hl_device_disabled_or_in_reset(hdev)) {
+ ctx->cs_counters.device_in_reset_drop_cnt++;
dev_err(hdev->dev,
"device is disabled or in reset, CS rejected!\n");
rc = -EPERM;
goto out;
}
+ max_queues = hdev->asic_prop.max_queues;
+
q = &hdev->kernel_queues[0];
- for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
if (cs->jobs_in_queue_cnt[i]) {
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
@@ -543,11 +527,12 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
break;
}
- if (rc)
+ if (rc) {
+ ctx->cs_counters.queue_full_drop_cnt++;
goto unroll_cq_resv;
+ }
- if (q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW)
+ if (q->queue_type == QUEUE_TYPE_EXT)
cq_cnt++;
}
}
@@ -598,10 +583,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
unroll_cq_resv:
q = &hdev->kernel_queues[0];
- for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW) &&
- cs->jobs_in_queue_cnt[i]) {
+ for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
+ if ((q->queue_type == QUEUE_TYPE_EXT) &&
+ (cs->jobs_in_queue_cnt[i])) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -625,7 +609,7 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- q->ci = hl_queue_inc_ptr(q->ci);
+ atomic_inc(&q->ci);
}
static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
@@ -660,12 +644,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
}
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
- if (!is_cpu_queue)
- hdev->asic_funcs->ext_queue_init(hdev, q->hw_queue_id);
-
return 0;
free_queue:
@@ -697,7 +678,7 @@ static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
q->pi = 0;
- q->ci = 0;
+ atomic_set(&q->ci, 0);
return 0;
}
@@ -726,12 +707,48 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
return 0;
}
+static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_hw_sob *hw_sob;
+ int sob, queue_idx = hdev->sync_stream_queue_idx++;
+
+ hw_queue->base_sob_id =
+ prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS;
+ hw_queue->base_mon_id =
+ prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS;
+ hw_queue->next_sob_val = 1;
+ hw_queue->curr_sob_offset = 0;
+
+ for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
+ hw_sob = &hw_queue->hw_sob[sob];
+ hw_sob->hdev = hdev;
+ hw_sob->sob_id = hw_queue->base_sob_id + sob;
+ hw_sob->q_idx = q_idx;
+ kref_init(&hw_sob->kref);
+ }
+}
+
+static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+
+ /*
+ * In case we got here due to a stuck CS, the refcnt might be bigger
+ * than 1 and therefore we reset it.
+ */
+ kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
+ hw_queue->curr_sob_offset = 0;
+ hw_queue->next_sob_val = 1;
+}
+
/*
* queue_init - main initialization function for H/W queue object
*
@@ -747,8 +764,6 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
{
int rc;
- BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
q->hw_queue_id = hw_queue_id;
switch (q->queue_type) {
@@ -774,6 +789,9 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
break;
}
+ if (q->supports_sync_stream)
+ sync_stream_queue_init(hdev, q->hw_queue_id);
+
if (rc)
return rc;
@@ -835,7 +853,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
struct hl_hw_queue *q;
int i, rc, q_ready_cnt;
- hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
+ hdev->kernel_queues = kcalloc(asic->max_queues,
sizeof(*hdev->kernel_queues), GFP_KERNEL);
if (!hdev->kernel_queues) {
@@ -845,9 +863,11 @@ int hl_hw_queues_create(struct hl_device *hdev)
/* Initialize the H/W queues */
for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
- i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
+ i < asic->max_queues ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
+ q->supports_sync_stream =
+ asic->hw_queues_props[i].supports_sync_stream;
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
@@ -870,9 +890,10 @@ release_queues:
void hl_hw_queues_destroy(struct hl_device *hdev)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -881,15 +902,17 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
if ((!q->valid) ||
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
- q->pi = q->ci = 0;
+ q->pi = 0;
+ atomic_set(&q->ci, 0);
- if (q->queue_type == QUEUE_TYPE_EXT)
- hdev->asic_funcs->ext_queue_reset(hdev, q->hw_queue_id);
+ if (q->supports_sync_stream)
+ sync_stream_queue_reset(hdev, q->hw_queue_id);
}
}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index b997336fa75f..b997336fa75f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/common/irq.c
index fac65fbd70e8..c8db717023f5 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -10,11 +10,12 @@
#include <linux/slab.h>
/**
- * This structure is used to schedule work of EQ entry and armcp_reset event
+ * struct hl_eqe_work - This structure is used to schedule work of EQ
+ * entry and armcp_reset event
*
- * @eq_work - workqueue object to run when EQ entry is received
- * @hdev - pointer to device structure
- * @eq_entry - copy of the EQ entry
+ * @eq_work: workqueue object to run when EQ entry is received
+ * @hdev: pointer to device structure
+ * @eq_entry: copy of the EQ entry
*/
struct hl_eqe_work {
struct work_struct eq_work;
@@ -22,7 +23,7 @@ struct hl_eqe_work {
struct hl_eq_entry eq_entry;
};
-/*
+/**
* hl_cq_inc_ptr - increment ci or pi of cq
*
* @ptr: the current ci or pi value of the completion queue
@@ -38,7 +39,7 @@ inline u32 hl_cq_inc_ptr(u32 ptr)
return ptr;
}
-/*
+/**
* hl_eq_inc_ptr - increment ci of eq
*
* @ptr: the current ci value of the event queue
@@ -65,7 +66,7 @@ static void irq_handle_eqe(struct work_struct *work)
kfree(eqe_work);
}
-/*
+/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
@@ -118,15 +119,10 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if ((shadow_index_valid) && (!hdev->disabled)) {
job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
- queue_work(hdev->cq_wq, &job->finish_work);
+ queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
}
- /* Update ci of the context's queue. There is no
- * need to protect it with spinlock because this update is
- * done only inside IRQ and there is a different IRQ per
- * queue
- */
- queue->ci = hl_queue_inc_ptr(queue->ci);
+ atomic_inc(&queue->ci);
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
@@ -141,7 +137,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
-/*
+/**
* hl_irq_handler_eq - irq handler for event queue
*
* @irq: irq number
@@ -205,7 +201,7 @@ skip_irq:
return IRQ_HANDLED;
}
-/*
+/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
@@ -219,8 +215,6 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
- BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
&q->bus_address, GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -237,7 +231,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
return 0;
}
-/*
+/**
* hl_cq_fini - destroy completion queue
*
* @hdev: pointer to device structure
@@ -268,7 +262,7 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
}
-/*
+/**
* hl_eq_init - main initialization function for an event queue object
*
* @hdev: pointer to device structure
@@ -281,8 +275,6 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
- BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
HL_EQ_SIZE_IN_BYTES,
&q->bus_address);
@@ -296,7 +288,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
return 0;
}
-/*
+/**
* hl_eq_fini - destroy event queue
*
* @hdev: pointer to device structure
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/common/memory.c
index 47da84a17719..dce9273e557a 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -7,7 +7,7 @@
#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -1730,8 +1730,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_notice(hdev->dev,
- "ctx %d is freed while it has va in use\n",
- ctx->asid);
+ "user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index a290d6b49d78..edcc11d5eaf1 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/genalloc.h>
#include <linux/slab.h>
@@ -502,7 +502,6 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
return 0;
mutex_init(&ctx->mmu_lock);
- hash_init(ctx->mmu_phys_hash);
hash_init(ctx->mmu_shadow_hash);
return dram_default_mapping_init(ctx);
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/common/pci.c
index 9f634ef6f5b3..7bd3737571f3 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -6,16 +6,22 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/pci/pci_general.h"
+#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
+#include <linux/bitfield.h>
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
+#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
+#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
+#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
+#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
+
/**
* hl_pci_bars_map() - Map PCI BARs.
* @hdev: Pointer to hl_device structure.
- * @bar_name: Array of BAR names.
+ * @name: Array of BAR names.
* @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
*
* Request PCI regions and map them to kernel virtual addresses.
@@ -61,7 +67,7 @@ err:
return rc;
}
-/*
+/**
* hl_pci_bars_unmap() - Unmap PCI BARS.
* @hdev: Pointer to hl_device structure.
*
@@ -80,9 +86,11 @@ static void hl_pci_bars_unmap(struct hl_device *hdev)
pci_release_regions(pdev);
}
-/*
+/**
* hl_pci_elbi_write() - Write through the ELBI interface.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -140,6 +148,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
/**
* hl_pci_iatu_write() - iatu write routine.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -161,7 +171,7 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
return 0;
}
-/*
+/**
* hl_pci_reset_link_through_bridge() - Reset PCI link.
* @hdev: Pointer to hl_device structure.
*/
@@ -183,110 +193,94 @@ static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
}
/**
- * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
+ * hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
- * @inbound_region: Inbound region number.
- * @bar: PCI BAR number.
- * @addr: Address in DRAM. Must be aligned to DRAM bar size.
+ * @region: Inbound region number.
+ * @pci_region: Inbound region parameters.
*
- * Configure the iATU so that the DRAM bar will start at the specified address.
+ * Configure the iATU inbound region.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
- u64 addr)
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 offset;
- int rc;
+ u64 bar_phys_base, region_base, region_end_address;
+ u32 offset, ctrl_reg_val;
+ int rc = 0;
- switch (inbound_region) {
- case 0:
- offset = 0x100;
- break;
- case 1:
- offset = 0x300;
- break;
- case 2:
- offset = 0x500;
- break;
- default:
- dev_err(hdev->dev, "Invalid inbound region %d\n",
- inbound_region);
- return -EINVAL;
- }
+ /* region offset */
+ offset = (0x200 * region) + 0x100;
+
+ if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
+ bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
+ region_base = bar_phys_base + pci_region->offset_in_bar;
+ region_end_address = region_base + pci_region->size - 1;
- if (bar != 0 && bar != 2 && bar != 4) {
- dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
- return -EINVAL;
+ rc |= hl_pci_iatu_write(hdev, offset + 0x8,
+ lower_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0xC,
+ upper_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x10,
+ lower_32_bits(region_end_address));
}
/* Point to the specified address */
- rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
- rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
+ rc = hl_pci_iatu_write(hdev, offset + 0x14,
+ lower_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18,
+ upper_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
- /* Enable + BAR match + match enable + BAR number */
- rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
+
+ /* Enable + bar/address match + match enable + bar number */
+ ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
+ pci_region->mode);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
+
+ if (pci_region->mode == PCI_BAR_MATCH_MODE)
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
+ pci_region->bar);
+
+ rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
/* Return the DBI window to the default location */
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
if (rc)
- dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
- addr);
+ dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
+ pci_region->bar, pci_region->addr);
return rc;
}
/**
- * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
+ * hl_pci_set_outbound_region() - Configure outbound region 0
* @hdev: Pointer to hl_device structure.
- * @sram_base_address: SRAM base address.
- * @dram_base_address: DRAM base address.
- * @host_phys_base_address: Base physical address of host memory for device
- * transactions.
- * @host_phys_size: Size of host memory for device transactions.
+ * @pci_region: Outbound region parameters.
*
- * This is needed in case the firmware doesn't initialize the iATU.
+ * Configure the iATU outbound region 0.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size)
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 host_phys_end_addr;
+ u64 outbound_region_end_address;
int rc = 0;
- /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
- rc = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x100, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
-
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
-
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
- /* Point to DRAM */
- if (!hdev->asic_funcs->set_dram_bar_base)
- return -EINVAL;
- if (hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address) ==
- U64_MAX)
- return -EIO;
-
- /* Outbound Region 0 - Point to Host */
- host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
+ /* Outbound Region 0 */
+ outbound_region_end_address =
+ pci_region->addr + pci_region->size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
- lower_32_bits(host_phys_base_address));
+ lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, 0x00C,
- upper_32_bits(host_phys_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
+ upper_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, 0x010,
+ lower_32_bits(outbound_region_end_address));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
@@ -294,7 +288,8 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
else
rc |= hl_pci_iatu_write(hdev, 0x018, 0);
- rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
+ rc |= hl_pci_iatu_write(hdev, 0x020,
+ upper_32_bits(outbound_region_end_address));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
/* Enable */
@@ -304,16 +299,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
- if (rc)
- return -EIO;
-
- return 0;
+ return rc;
}
/**
* hl_pci_set_dma_mask() - Set DMA masks for the device.
* @hdev: Pointer to hl_device structure.
- * @dma_mask: number of bits for the requested dma mask.
*
* This function sets the DMA masks (regular and consistent) for a specified
* value. If it doesn't succeed, it tries to set it to a fall-back value
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 70b6b1863c2e..b3cb0ac4721c 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -331,6 +331,9 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
char *data;
int rc;
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
if (!max_size)
return -EINVAL;
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
index f802cdc980ca..c9f4703cff24 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 637a9d608707..00a0a7238d81 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -6,12 +6,12 @@
*/
#include "gaudiP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_1.h"
-#include "include/gaudi/gaudi_masks.h"
-#include "include/gaudi/gaudi_fw_if.h"
-#include "include/gaudi/gaudi_reg_map.h"
-#include "include/gaudi/gaudi_async_ids_map_extended.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_1.h"
+#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_reg_map.h"
+#include "../include/gaudi/gaudi_async_ids_map_extended.h"
#include <linux/module.h>
#include <linux/pci.h>
@@ -21,6 +21,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
+#include <linux/bitfield.h>
/*
* Gaudi security scheme:
@@ -74,7 +75,6 @@
#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
-#define GAUDI_PLDM_SRESET_TIMEOUT_MSEC 14000 /* 14s */
#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
@@ -321,6 +321,13 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+struct ecc_info_extract_params {
+ u64 block_address;
+ u32 num_memories;
+ bool derr;
+ bool disable_clock_gating;
+};
+
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
u64 phys_addr);
static int gaudi_send_job_on_qman0(struct hl_device *hdev,
@@ -339,22 +346,25 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
- if (GAUDI_QUEUE_ID_SIZE >= HL_MAX_QUEUES) {
- dev_err(hdev->dev,
- "Number of H/W queues must be smaller than %d\n",
- HL_MAX_QUEUES);
- return -EFAULT;
- }
+ prop->max_queues = GAUDI_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
- for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
+ for (i = 0 ; i < prop->max_queues ; i++) {
if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 1;
+ prop->hw_queues_props[i].supports_sync_stream = 1;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
@@ -363,14 +373,13 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
}
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
-
+ prop->sync_stream_first_sob = 0;
+ prop->sync_stream_first_mon = 0;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = GAUDI_HBM_SIZE_32GB;
prop->dram_end_address = prop->dram_base_address +
@@ -435,6 +444,8 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+ prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
+
return 0;
}
@@ -457,6 +468,7 @@ static int gaudi_pci_bars_map(struct hl_device *hdev)
static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -464,7 +476,10 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 2 - Bar 4 - Point to HBM */
- rc = hl_pci_set_dram_bar_base(hdev, 2, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = HBM_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
if (rc)
return U64_MAX;
@@ -478,22 +493,43 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
static int gaudi_init_iatu(struct hl_device *hdev)
{
- int rc = 0;
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
/* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
- rc = hl_pci_iatu_write(hdev, 0x314,
- lower_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x318,
- upper_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x300, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x304, 0xC0080200);
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = CFG_BAR_ID;
+ inbound_region.addr = SPI_FLASH_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+ /* Inbound Region 2 - Bar 4 - Point to HBM */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = HBM_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
if (rc)
- return -EIO;
+ goto done;
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
static int gaudi_early_init(struct hl_device *hdev)
@@ -516,7 +552,8 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_BAR_ID),
SRAM_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -526,20 +563,26 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
static int gaudi_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -554,11 +597,36 @@ static int gaudi_early_fini(struct hl_device *hdev)
static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ u32 nr = RREG32(mmPSOC_CPU_PLL_NR);
+ u32 nf = RREG32(mmPSOC_CPU_PLL_NF);
+ u32 od = RREG32(mmPSOC_CPU_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
static int _gaudi_init_tpc_mem(struct hl_device *hdev,
@@ -573,7 +641,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
u8 tpc_id;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -1644,8 +1712,8 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
hbm0_wr = 0x33333333;
- hbm1_wr = 0x33333333;
hbm0_rd = 0x77777777;
+ hbm1_wr = 0x55555555;
hbm1_rd = 0xDDDDDDDD;
WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
@@ -1695,125 +1763,6 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
}
-static void gaudi_init_rate_limiter(struct hl_device *hdev)
-{
- u32 nr, nf, od, sat, rst, timeout;
- u64 freq;
-
- nr = RREG32(mmPSOC_HBM_PLL_NR);
- nf = RREG32(mmPSOC_HBM_PLL_NF);
- od = RREG32(mmPSOC_HBM_PLL_OD);
- freq = (50 * (nf + 1)) / ((nr + 1) * (od + 1));
-
- dev_dbg(hdev->dev, "HBM frequency is %lluMHz\n", freq);
-
- /* Configuration is for five (5) DDMA channels */
- if (freq == 800) {
- sat = 4;
- rst = 11;
- timeout = 15;
- } else if (freq == 900) {
- sat = 4;
- rst = 15;
- timeout = 16;
- } else if (freq == 950) {
- sat = 4;
- rst = 15;
- timeout = 15;
- } else {
- dev_warn(hdev->dev,
- "unsupported HBM frequency %lluMHz, no rate-limiters\n",
- freq);
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1, 0x111);
-
- if (!hdev->rl_enable) {
- dev_info(hdev->dev, "Rate limiters disabled\n");
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN, 1);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN, 1);
-}
-
static void gaudi_init_golden_registers(struct hl_device *hdev)
{
u32 tpc_offset;
@@ -1823,8 +1772,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_hbm_cred(hdev);
- gaudi_init_rate_limiter(hdev);
-
hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
@@ -1845,9 +1792,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
-
- /* WA for H3-2081 */
- WREG32(mmPCIE_WRAP_MAX_OUTSTAND, 0x10ff);
}
static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
@@ -2649,29 +2593,16 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
- }
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
@@ -2696,10 +2627,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
- if (hard_reset)
- gaudi_disable_msi(hdev);
- else
- gaudi_sync_irqs(hdev);
+ gaudi_disable_msi(hdev);
}
static int gaudi_mmu_init(struct hl_device *hdev)
@@ -2733,8 +2661,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@@ -2984,16 +2911,6 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_hbm_dma_qmans(hdev);
- /*
- * Before pushing u-boot/linux to device, need to set the hbm bar to
- * base address of dram
- */
- if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
- dev_err(hdev->dev,
- "failed to map HBM bar to DRAM base address\n");
- return -EIO;
- }
-
rc = gaudi_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
@@ -3052,48 +2969,56 @@ disable_queues:
static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 status, reset_timeout_ms, boot_strap = 0;
+ u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0;
+
+ if (!hard_reset) {
+ dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
+ return;
+ }
if (hdev->pldm) {
- if (hard_reset)
- reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
- else
- reset_timeout_ms = GAUDI_PLDM_SRESET_TIMEOUT_MSEC;
+ reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
} else {
reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
- if (hard_reset) {
- /* Tell ASIC not to re-initialize PCIe */
- WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+ /* Set device to handle FLR by H/W as we will put the device CPU to
+ * halt mode
+ */
+ WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
+ PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
- boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
- /* H/W bug WA:
- * rdata[31:0] = strap_read_val;
- * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
- */
- boot_strap = (((boot_strap & 0x7FE00000) << 1) |
- (boot_strap & 0x001FFFFF));
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
-
- /* Restart BTL/BLR upon hard-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
-
- WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
- 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
- } else {
- /* Don't restart BTL/BLR upon soft-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 0);
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
- WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST,
- 1 << PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued SOFT reset command, going to wait %dms\n",
- reset_timeout_ms);
- }
+ msleep(cpu_timeout_ms);
+
+ /* Tell ASIC not to re-initialize PCIe */
+ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+ boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+
+ /* H/W bug WA:
+ * rdata[31:0] = strap_read_val;
+ * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
+ */
+ boot_strap = (((boot_strap & 0x7FE00000) << 1) |
+ (boot_strap & 0x001FFFFF));
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+
+ /* Restart BTL/BLR upon hard-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -3107,18 +3032,6 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
"Timeout while waiting for device to reset 0x%x\n",
status);
- if (!hard_reset) {
- gaudi->hw_cap_initialized &= ~(HW_CAP_PCI_DMA | HW_CAP_MME |
- HW_CAP_TPC_MASK |
- HW_CAP_HBM_DMA);
-
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_SOFT_RESET);
- return;
- }
-
- /* We continue here only for hard-reset */
-
WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
@@ -3578,7 +3491,7 @@ static int gaudi_test_queues(struct hl_device *hdev)
{
int i, rc, ret_val = 0;
- for (i = 0 ; i < HL_MAX_QUEUES ; i++) {
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
rc = gaudi_test_queue(hdev, i);
if (rc)
@@ -4159,9 +4072,8 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -4233,9 +4145,8 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -4364,11 +4275,11 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
- u32 cb_size, ctl;
+ u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -4393,6 +4304,15 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
goto release_cb;
}
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause && !hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+
job->id = 0;
job->user_cb = cb;
job->user_cb->cs_cnt++;
@@ -4404,11 +4324,23 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
-
hl_debugfs_remove_job(hdev, job);
kfree(job);
cb->cs_cnt--;
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause) {
+ dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
+ rc = -EIO;
+ if (!hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+ }
+
release_cb:
hl_cb_put(cb);
hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
@@ -5254,62 +5186,76 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev)
* | |0xF4C memory wrappers 127:96 |
* +-------------------+------------------------------------------------------+
*/
-static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
- const char *block_name,
- u64 block_address, int num_memories,
- bool derr, bool disable_clock_gating)
+static int gaudi_extract_ecc_info(struct hl_device *hdev,
+ struct ecc_info_extract_params *params, u64 *ecc_address,
+ u64 *ecc_syndrom, u8 *memory_wrapper_idx)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- int num_mem_regs = num_memories / 32 + ((num_memories % 32) ? 1 : 0);
+ u32 i, num_mem_regs, reg, err_bit;
+ u64 err_addr, err_word = 0;
+ int rc = 0;
+
+ num_mem_regs = params->num_memories / 32 +
+ ((params->num_memories % 32) ? 1 : 0);
- if (block_address >= CFG_BASE)
- block_address -= CFG_BASE;
+ if (params->block_address >= CFG_BASE)
+ params->block_address -= CFG_BASE;
- if (derr)
- block_address += GAUDI_ECC_DERR0_OFFSET;
+ if (params->derr)
+ err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
else
- block_address += GAUDI_ECC_SERR0_OFFSET;
+ err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
- if (disable_clock_gating) {
+ if (params->disable_clock_gating) {
mutex_lock(&gaudi->clk_gate_mutex);
hdev->asic_funcs->disable_clock_gating(hdev);
}
- switch (num_mem_regs) {
- case 1:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x\n",
- block_name, RREG32(block_address));
- break;
- case 2:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4));
- break;
- case 3:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8));
- break;
- case 4:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8), RREG32(block_address + 0xc));
- break;
- default:
- break;
+ /* Set invalid wrapper index */
+ *memory_wrapper_idx = 0xFF;
+
+ /* Iterate through memory wrappers, a single bit must be set */
+ for (i = 0 ; i > num_mem_regs ; i++) {
+ err_addr += i * 4;
+ err_word = RREG32(err_addr);
+ if (err_word) {
+ err_bit = __ffs(err_word);
+ *memory_wrapper_idx = err_bit + (32 * i);
+ break;
+ }
+ }
+ if (*memory_wrapper_idx == 0xFF) {
+ dev_err(hdev->dev, "ECC error information cannot be found\n");
+ rc = -EINVAL;
+ goto enable_clk_gate;
}
- if (disable_clock_gating) {
+ WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
+ *memory_wrapper_idx);
+
+ *ecc_address =
+ RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
+ *ecc_syndrom =
+ RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
+
+ /* Clear error indication */
+ reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
+ if (params->derr)
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
+ else
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
+
+ WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
+
+enable_clk_gate:
+ if (params->disable_clock_gating) {
hdev->asic_funcs->set_clock_gating(hdev);
+
mutex_unlock(&gaudi->clk_gate_mutex);
}
+
+ return rc;
}
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
@@ -5362,239 +5308,99 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
}
-static void gaudi_print_ecc_info(struct hl_device *hdev, u16 event_type)
+static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_ecc_data *ecc_data)
{
- u64 block_address;
- u8 index;
- int num_memories;
- char desc[32];
- bool derr;
- bool disable_clock_gating;
+ struct ecc_info_extract_params params;
+ u64 ecc_address = 0, ecc_syndrom = 0;
+ u8 index, memory_wrapper_idx = 0;
+ bool extract_info_from_fw;
+ int rc;
switch (event_type) {
- case GAUDI_EVENT_PCIE_CORE_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_CORE_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
+ case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
+ extract_info_from_fw = true;
break;
case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
index = event_type - GAUDI_EVENT_TPC0_SERR;
- block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ params.num_memories = 90;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
index = event_type - GAUDI_EVENT_TPC0_DERR;
- block_address =
+ params.block_address =
mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = true;
- disable_clock_gating = true;
+ params.num_memories = 90;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_SERR:
case GAUDI_EVENT_MME1_ACC_SERR:
case GAUDI_EVENT_MME2_ACC_SERR:
case GAUDI_EVENT_MME3_ACC_SERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_DERR:
case GAUDI_EVENT_MME1_ACC_DERR:
case GAUDI_EVENT_MME2_ACC_DERR:
case GAUDI_EVENT_MME3_ACC_DERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = true;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_SERR:
case GAUDI_EVENT_MME1_SBAB_SERR:
case GAUDI_EVENT_MME2_SBAB_SERR:
case GAUDI_EVENT_MME3_SBAB_SERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = false;
- disable_clock_gating = true;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_DERR:
case GAUDI_EVENT_MME1_SBAB_DERR:
case GAUDI_EVENT_MME2_SBAB_DERR:
case GAUDI_EVENT_MME3_SBAB_DERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = true;
- disable_clock_gating = true;
- break;
- case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_SERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_DERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_SERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_DERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_SERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_DERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
- index = event_type - GAUDI_EVENT_SRAM0_SERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
- index = event_type - GAUDI_EVENT_SRAM0_DERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_SERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- num_memories = 60;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_DERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- derr = true;
- num_memories = 60;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = false;
- num_memories = 64;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = true;
- num_memories = 64;
- disable_clock_gating = false;
- break;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = true;
+ params.disable_clock_gating = true;
default:
return;
}
- gaudi_print_ecc_info_generic(hdev, desc, block_address, num_memories,
- derr, disable_clock_gating);
+ if (extract_info_from_fw) {
+ ecc_address = le64_to_cpu(ecc_data->ecc_address);
+ ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
+ memory_wrapper_idx = ecc_data->memory_wrapper_idx;
+ } else {
+ rc = gaudi_extract_ecc_info(hdev, &params, &ecc_address,
+ &ecc_syndrom, &memory_wrapper_idx);
+ if (rc)
+ return;
+ }
+
+ dev_err(hdev->dev,
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
+ ecc_address, ecc_syndrom, memory_wrapper_idx);
}
static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
@@ -5644,8 +5450,6 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- gaudi_print_ecc_info(hdev, event_type);
-
if (razwi) {
gaudi_print_razwi_info(hdev);
gaudi_print_mmu_error_info(hdev);
@@ -5875,10 +5679,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- fallthrough;
- case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
@@ -5974,6 +5783,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
fallthrough;
case GAUDI_EVENT_MMU_SERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
case GAUDI_EVENT_PCIE_DEC:
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
@@ -6458,47 +6272,14 @@ static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
-{
- return gaudi_cq_assignment[cq_idx];
-}
-
-static void gaudi_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+static int gaudi_ctx_init(struct hl_ctx *ctx)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
- struct hl_hw_sob *hw_sob;
- int sob, ext_idx = gaudi->ext_queue_idx++;
-
- /*
- * The external queues might not sit sequentially, hence use the
- * real external queue index for the SOB/MON base id.
- */
- hw_queue->base_sob_id = ext_idx * HL_RSVD_SOBS;
- hw_queue->base_mon_id = ext_idx * HL_RSVD_MONS;
- hw_queue->next_sob_val = 1;
- hw_queue->curr_sob_offset = 0;
-
- for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
- hw_sob = &hw_queue->hw_sob[sob];
- hw_sob->hdev = hdev;
- hw_sob->sob_id = hw_queue->base_sob_id + sob;
- hw_sob->q_idx = q_idx;
- kref_init(&hw_sob->kref);
- }
+ return 0;
}
-static void gaudi_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
-
- /*
- * In case we got here due to a stuck CS, the refcnt might be bigger
- * than 1 and therefore we reset it.
- */
- kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
- hw_queue->curr_sob_offset = 0;
- hw_queue->next_sob_val = 1;
+ return gaudi_cq_assignment[cq_idx];
}
static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
@@ -6523,16 +6304,17 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
memset(pkt, 0, sizeof(*pkt));
- value = 1 << GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT; /* inc by 1 */
- value |= 1 << GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT; /* add mode */
+ /* Inc by 1, Mode ADD */
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
- ctl = (sob_id * 4) << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT; /* SOB id */
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 3 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S SOB base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6545,12 +6327,12 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
memset(pkt, 0, pkt_size);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_MB_SHIFT; /* only last pkt needs MB */
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6566,18 +6348,19 @@ static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
memset(pkt, 0, pkt_size);
- value = (sob_id / 8) << GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT;
- value |= sob_val << GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT;
- value |= 0 << GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT; /* GREATER_OR_EQUAL */
- value |= mask << GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT;
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
+ 0); /* GREATER OR EQUAL*/
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6591,15 +6374,14 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
memset(pkt, 0, pkt_size);
- cfg = 1 << GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT;
- cfg |= 1 << GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT;
- cfg |= 2 << GAUDI_PKT_FENCE_CFG_ID_SHIFT;
+ cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
- ctl = 0 << GAUDI_PKT_FENCE_CTL_PRED_SHIFT;
- ctl |= PACKET_FENCE << GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_FENCE_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
@@ -6798,13 +6580,12 @@ static const struct hl_asic_funcs gaudi_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = gaudi_halt_coresight,
+ .ctx_init = gaudi_ctx_init,
.get_clk_rate = gaudi_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.read_device_fw_version = gaudi_read_device_fw_version,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
- .ext_queue_init = gaudi_ext_queue_init,
- .ext_queue_reset = gaudi_ext_queue_reset,
.get_signal_cb_size = gaudi_get_signal_cb_size,
.get_wait_cb_size = gaudi_get_wait_cb_size,
.gen_signal_cb = gaudi_gen_signal_cb,
@@ -6817,7 +6598,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
/**
* gaudi_set_asic_funcs - set GAUDI function pointers
*
- * @*hdev: pointer to hl_device structure
+ * @hdev: pointer to hl_device structure
*
*/
void gaudi_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 41a8d9bff6bf..5dc99f6f0296 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -9,11 +9,11 @@
#define GAUDIP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/gaudi/gaudi_packets.h"
-#include "include/gaudi/gaudi.h"
-#include "include/gaudi/gaudi_async_events.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/gaudi/gaudi_packets.h"
+#include "../include/gaudi/gaudi.h"
+#include "../include/gaudi/gaudi_async_events.h"
#define NUMBER_OF_EXT_HW_QUEUES 12
#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES
@@ -57,6 +57,12 @@
#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_MAX_PENDING_CS 1024
+
+#if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS)
+#error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
#define PCI_DMA_NUMBER_OF_CHNLS 3
#define HBM_DMA_NUMBER_OF_CHNLS 5
#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \
@@ -117,14 +123,14 @@
/* Internal QMANs PQ sizes */
-#define MME_QMAN_LENGTH 64
+#define MME_QMAN_LENGTH 1024
#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define HBM_DMA_QMAN_LENGTH 64
+#define HBM_DMA_QMAN_LENGTH 1024
#define HBM_DMA_QMAN_SIZE_IN_BYTES \
(HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define TPC_QMAN_LENGTH 64
+#define TPC_QMAN_LENGTH 1024
#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
@@ -228,7 +234,6 @@ struct gaudi_internal_qman_info {
* engine.
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
- * @ext_queue_idx: helper index for external queues initialization.
* @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
* 8-bit value so use u8.
*/
@@ -249,7 +254,6 @@ struct gaudi_device {
u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
u32 hw_cap_initialized;
u8 multi_msi_mode;
- u8 ext_queue_idx;
u8 mmu_cache_inv_pi;
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index bf0e062d7b87..5673ee49819e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -6,9 +6,9 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_coresight.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
-#include "include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_coresight.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/gaudi_masks.h"
#include <uapi/misc/habanalabs.h>
#include <linux/coresight.h>
@@ -392,6 +392,7 @@ static int gaudi_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -420,7 +421,10 @@ static int gaudi_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
/* SW-2176 - SW WA for HW bug */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 6dd2c2a1cd70..1076b4932ce2 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_fw_if.h"
void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 6a351e31fa6a..8d5d6ddee6ed 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
#define GAUDI_NUMBER_OF_RR_REGS 24
#define GAUDI_NUMBER_OF_LBW_RANGES 12
@@ -447,8 +447,7 @@ static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
* gaudi_set_block_as_protected - set the given block as protected
*
* @hdev: pointer to hl_device structure
- * @block: block base address
- *
+ * @base: block base address
*/
static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
{
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
index bd769083628e..b3f3b7b96683 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
goya/goya_coresight.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 88460b2138d8..85030759b2af 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -6,10 +6,10 @@
*/
#include "goyaP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_0.h"
-#include "include/goya/asic_reg/goya_masks.h"
-#include "include/goya/goya_reg_map.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_0.h"
+#include "../include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
@@ -338,11 +338,19 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-void goya_get_fixed_properties(struct hl_device *hdev)
+int goya_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
+ prop->max_queues = GOYA_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
+
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
@@ -362,9 +370,6 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].requires_kernel_cb = 0;
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -427,6 +432,10 @@ void goya_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+
+ prop->max_pending_cs = GOYA_MAX_PENDING_CS;
+
+ return 0;
}
/*
@@ -457,6 +466,7 @@ static int goya_pci_bars_map(struct hl_device *hdev)
static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -464,7 +474,10 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 1 - Bar 4 - Point to DDR */
- rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = DDR_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
if (rc)
return U64_MAX;
@@ -486,8 +499,35 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
*/
static int goya_init_iatu(struct hl_device *hdev)
{
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = DDR_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
/*
@@ -508,7 +548,11 @@ static int goya_early_init(struct hl_device *hdev)
u32 val;
int rc;
- goya_get_fixed_properties(hdev);
+ rc = goya_get_fixed_properties(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get fixed properties\n");
+ return rc;
+ }
/* Check BAR sizes */
if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -518,7 +562,8 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
@@ -528,14 +573,15 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
MSIX_BAR_ID),
MSIX_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -545,6 +591,10 @@ static int goya_early_init(struct hl_device *hdev)
}
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
/*
@@ -557,6 +607,7 @@ static int goya_early_init(struct hl_device *hdev)
*/
static int goya_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -593,11 +644,36 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
+ u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
+ u32 od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
int goya_late_init(struct hl_device *hdev)
@@ -2165,29 +2241,15 @@ static void goya_disable_timestamp(struct hl_device *hdev)
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
- }
-
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
goya_stop_external_queues(hdev);
goya_stop_internal_queues(hdev);
@@ -2492,14 +2554,26 @@ disable_queues:
static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct goya_device *goya = hdev->asic_specific;
- u32 reset_timeout_ms, status;
+ u32 reset_timeout_ms, cpu_timeout_ms, status;
- if (hdev->pldm)
+ if (hdev->pldm) {
reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
- else
+ cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ } else {
reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
+ }
if (hard_reset) {
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
+
+ msleep(cpu_timeout_ms);
+
goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
goya_disable_clk_rlx(hdev);
goya_set_pll_refclk(hdev);
@@ -3701,9 +3775,8 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -3775,9 +3848,8 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -3946,8 +4018,7 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
*val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4003,8 +4074,7 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4048,9 +4118,8 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4092,9 +4161,8 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4627,7 +4695,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
sizeof(struct packet_msg_prot);
- cb = hl_cb_kernel_create(hdev, cb_size);
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
if (!cb)
return -ENOMEM;
@@ -5157,19 +5225,14 @@ static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+static int goya_ctx_init(struct hl_ctx *ctx)
{
- return cq_idx;
-}
-
-static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
-{
-
+ return 0;
}
-static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
-
+ return cq_idx;
}
static u32 goya_get_signal_cb_size(struct hl_device *hdev)
@@ -5280,13 +5343,12 @@ static const struct hl_asic_funcs goya_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
+ .ctx_init = goya_ctx_init,
.get_clk_rate = goya_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.read_device_fw_version = goya_read_device_fw_version,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
- .ext_queue_init = goya_ext_queue_init,
- .ext_queue_reset = goya_ext_queue_reset,
.get_signal_cb_size = goya_get_signal_cb_size,
.get_wait_cb_size = goya_get_wait_cb_size,
.gen_signal_cb = goya_gen_signal_cb,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index d36f8d90c9c9..bb7474ee9784 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -9,12 +9,12 @@
#define GOYAP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/goya/goya_packets.h"
-#include "include/goya/goya.h"
-#include "include/goya/goya_async_events.h"
-#include "include/goya/goya_fw_if.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/goya/goya_packets.h"
+#include "../include/goya/goya.h"
+#include "../include/goya/goya_async_events.h"
+#include "../include/goya/goya_fw_if.h"
#define NUMBER_OF_CMPLT_QUEUES 5
#define NUMBER_OF_EXT_HW_QUEUES 5
@@ -31,10 +31,6 @@
*/
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
-#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
-#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
-#endif
-
#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
#endif
@@ -57,6 +53,12 @@
#define GOYA_DEFAULT_CARD_NAME "HL1000"
+#define GOYA_MAX_PENDING_CS 64
+
+#if !IS_MAX_PENDING_CS_VALID(GOYA_MAX_PENDING_CS)
+#error "GOYA_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -164,7 +166,7 @@ struct goya_device {
u8 device_cpu_mmu_mappings_done;
};
-void goya_get_fixed_properties(struct hl_device *hdev);
+int goya_get_fixed_properties(struct hl_device *hdev);
int goya_mmu_init(struct hl_device *hdev);
void goya_init_dma_qmans(struct hl_device *hdev);
void goya_init_mme_qmans(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1258724ea510..b03912483de0 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -6,9 +6,9 @@
*/
#include "goyaP.h"
-#include "include/goya/goya_coresight.h"
-#include "include/goya/asic_reg/goya_regs.h"
-#include "include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_coresight.h"
+#include "../include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -232,6 +232,7 @@ static int goya_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -264,7 +265,10 @@ static int goya_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE20, 0xFFFFFFFF);
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
@@ -640,7 +644,6 @@ static int goya_config_spmu(struct hl_device *hdev,
int goya_debug_coresight(struct hl_device *hdev, void *data)
{
struct hl_debug_params *params = data;
- u32 val;
int rc = 0;
switch (params->op) {
@@ -672,7 +675,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index de8297001fea..14701836f92b 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -6,7 +6,7 @@
*/
#include "goyaP.h"
-#include "include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_regs.h"
/*
* goya_set_block_as_protected - set the given block as protected
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/common/armcp_if.h
index a34fc39ad87e..07f9972db28d 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/common/armcp_if.h
@@ -19,9 +19,19 @@ struct hl_eq_header {
__le32 ctl;
};
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
- __le64 data[7];
+ union {
+ struct hl_eq_ecc_data ecc_data;
+ __le64 data[7];
+ };
};
#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
@@ -276,6 +286,8 @@ struct armcp_packet {
/* For get Armcp info/EEPROM data */
__le32 data_max_size;
};
+
+ __le32 reserved;
};
struct armcp_unmask_irq_arr_packet {
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index c22d134e73af..bb67cafc6e00 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -44,6 +44,15 @@
* The NIC FW loading and initialization
* failed. This means NICs are not usable.
*
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -57,6 +66,8 @@
#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -79,7 +90,10 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
/* Last boot loader progress status, ready to receive commands */
CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/common/qman_if.h
index 0fdb49188ed7..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/common/qman_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 85e3b5148595..f92dc53af074 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -91,18 +91,16 @@
#include "psoc_pci_pll_regs.h"
#include "psoc_hbm_pll_regs.h"
+#include "psoc_cpu_pll_regs.h"
-#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
-#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
-#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
-#define GAUDI_ECC_SERR0_OFFSET 0xF30
-#define GAUDI_ECC_SERR1_OFFSET 0xF34
-#define GAUDI_ECC_SERR2_OFFSET 0xF38
-#define GAUDI_ECC_SERR3_OFFSET 0xF3C
-#define GAUDI_ECC_DERR0_OFFSET 0xF40
-#define GAUDI_ECC_DERR1_OFFSET 0xF44
-#define GAUDI_ECC_DERR2_OFFSET 0xF48
-#define GAUDI_ECC_DERR3_OFFSET 0xF4C
+#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
+#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
+#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
+#define GAUDI_ECC_MEM_INFO_CLR_OFFSET 0xF28
+#define GAUDI_ECC_MEM_INFO_CLR_SERR_MASK BIT(8)
+#define GAUDI_ECC_MEM_INFO_CLR_DERR_MASK BIT(9)
+#define GAUDI_ECC_SERR0_OFFSET 0xF30
+#define GAUDI_ECC_DERR0_OFFSET 0xF40
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000
@@ -294,6 +292,7 @@
#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
new file mode 100644
index 000000000000..2585c70f59ef
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_CPU_PLL_REGS_H_
+#define ASIC_REG_PSOC_CPU_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_CPU_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_CPU_PLL_NR 0xC70100
+
+#define mmPSOC_CPU_PLL_NF 0xC70104
+
+#define mmPSOC_CPU_PLL_OD 0xC70108
+
+#define mmPSOC_CPU_PLL_NB 0xC7010C
+
+#define mmPSOC_CPU_PLL_CFG 0xC70110
+
+#define mmPSOC_CPU_PLL_LOSE_MASK 0xC70120
+
+#define mmPSOC_CPU_PLL_LOCK_INTR 0xC70128
+
+#define mmPSOC_CPU_PLL_LOCK_BYPASS 0xC7012C
+
+#define mmPSOC_CPU_PLL_DATA_CHNG 0xC70130
+
+#define mmPSOC_CPU_PLL_RST 0xC70134
+
+#define mmPSOC_CPU_PLL_SLIP_WD_CNTR 0xC70150
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_0 0xC70200
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_1 0xC70204
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_2 0xC70208
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_3 0xC7020C
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_0 0xC70220
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_1 0xC70224
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_2 0xC70228
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_3 0xC7022C
+
+#define mmPSOC_CPU_PLL_DIV_SEL_0 0xC70280
+
+#define mmPSOC_CPU_PLL_DIV_SEL_1 0xC70284
+
+#define mmPSOC_CPU_PLL_DIV_SEL_2 0xC70288
+
+#define mmPSOC_CPU_PLL_DIV_SEL_3 0xC7028C
+
+#define mmPSOC_CPU_PLL_DIV_EN_0 0xC702A0
+
+#define mmPSOC_CPU_PLL_DIV_EN_1 0xC702A4
+
+#define mmPSOC_CPU_PLL_DIV_EN_2 0xC702A8
+
+#define mmPSOC_CPU_PLL_DIV_EN_3 0xC702AC
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_0 0xC702C0
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_1 0xC702C4
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_2 0xC702C8
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_3 0xC702CC
+
+#define mmPSOC_CPU_PLL_CLK_GATER 0xC70300
+
+#define mmPSOC_CPU_PLL_CLK_RLX_0 0xC70310
+
+#define mmPSOC_CPU_PLL_CLK_RLX_1 0xC70314
+
+#define mmPSOC_CPU_PLL_CLK_RLX_2 0xC70318
+
+#define mmPSOC_CPU_PLL_CLK_RLX_3 0xC7031C
+
+#define mmPSOC_CPU_PLL_REF_CNTR_PERIOD 0xC70400
+
+#define mmPSOC_CPU_PLL_REF_LOW_THRESHOLD 0xC70410
+
+#define mmPSOC_CPU_PLL_REF_HIGH_THRESHOLD 0xC70420
+
+#define mmPSOC_CPU_PLL_PLL_NOT_STABLE 0xC70430
+
+#define mmPSOC_CPU_PLL_FREQ_CALC_EN 0xC70440
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_CFG 0xC70500
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_0 0xC70510
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_1 0xC70514
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_2 0xC70518
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_3 0xC7051C
+
+#endif /* ASIC_REG_PSOC_CPU_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 96f08050ef0f..13ef6b2887fd 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -455,4 +455,7 @@ enum axi_id {
QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
+#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index 0f0cd067bb43..f30f2c0458d7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -85,7 +85,7 @@ struct packet_msg_long {
};
#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
-#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x0000EFFF
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x00007FFF
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
@@ -141,7 +141,7 @@ struct packet_msg_prot {
#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30
-#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC000000
+#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC0000000
#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 927309b86bab..fea3ae9d8686 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -207,7 +207,7 @@ static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
static inline int fifo_sz(int nr_entry)
{
/* size of a fifo is determined by the number of entries it contains */
- return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
+ return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
}
static void fifo_setup(void *base_addr, int nr_entry)
@@ -256,7 +256,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
memset_io(device_ccb, 0, sizeof(struct ccb));
/* free resources used to back send/recv queues */
- pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
+ dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
+ data->dma_pa);
}
static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
@@ -272,16 +273,14 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
2 * desc_mem_sz(NR_QENTRY) +
ILO_START_ALIGN + ILO_CACHE_SZ;
- data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
- &data->dma_pa);
+ data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
+ &data->dma_pa, GFP_ATOMIC);
if (!data->dma_va)
return -ENOMEM;
dma_va = (char *)data->dma_va;
dma_pa = data->dma_pa;
- memset(dma_va, 0, data->dma_size);
-
dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
dma_pa = roundup(dma_pa, ILO_START_ALIGN);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 1aa433a7f66c..f69ff645cac9 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -160,23 +160,23 @@ struct ccb_data {
#define ILO_START_ALIGN 4096
#define ILO_CACHE_SZ 128
struct fifo {
- u64 nrents; /* user requested number of fifo entries */
- u64 imask; /* mask to extract valid fifo index */
- u64 merge; /* O/C bits to merge in during enqueue operation */
- u64 reset; /* set to non-zero when the target device resets */
- u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
+ u64 nrents; /* user requested number of fifo entries */
+ u64 imask; /* mask to extract valid fifo index */
+ u64 merge; /* O/C bits to merge in during enqueue operation */
+ u64 reset; /* set to non-zero when the target device resets */
+ u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
- u64 head;
- u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 head;
+ u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
- u64 tail;
- u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 tail;
+ u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
- u64 fifobar[1];
+ u64 fifobar[];
};
/* convert between struct fifo, and the fifobar, which is saved in the ccb */
-#define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64))
+#define FIFOHANDLESIZE (sizeof(struct fifo))
#define FIFOBARTOHANDLE(_fifo) \
((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE))
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 2863657fa268..733dd30fbacc 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -94,7 +94,7 @@ static inline void do_exec_command(struct service_processor *sp)
}
}
-/**
+/*
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
@@ -140,7 +140,7 @@ static void exec_next_command(struct service_processor *sp)
}
}
-/**
+/*
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
@@ -153,7 +153,7 @@ void ibmasm_wait_for_response(struct command *cmd, int timeout)
timeout * HZ);
}
-/**
+/*
* receive_command_response
* called by the interrupt handler when a dot command of type command_response
* was received.
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 70273a4cb352..df389bd4c9df 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -10,7 +10,7 @@
#include "ibmasm.h"
#include "dot_command.h"
-/**
+/*
* Dispatch an incoming message to the specific handler for the message.
* Called from interrupt context.
*/
@@ -48,7 +48,7 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
#define INIT_BUFFER_SIZE 32
-/**
+/*
* send the 4.3.5.10 dot command (driver VPD) to the service processor
*/
int ibmasm_send_driver_vpd(struct service_processor *sp)
@@ -99,7 +99,7 @@ struct os_state_command {
unsigned char data;
};
-/**
+/*
* send the 4.3.6 dot command (os state) to the service processor
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 974d63f5a4dd..40ce75f8970c 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -31,7 +31,7 @@ static void wake_up_event_readers(struct service_processor *sp)
wake_up_interruptible(&reader->wait);
}
-/**
+/*
* receive_event
* Called by the interrupt handler when a dot command of type sp_event is
* received.
@@ -68,7 +68,7 @@ static inline int event_available(struct event_buffer *b, struct event_reader *r
return (r->next_serial_number < b->next_serial_number);
}
-/**
+/*
* get_next_event
* Called by event readers (initiated from user space through the file
* system).
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 6567df638ea9..21c9b6a6f2c3 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -39,7 +39,7 @@ void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_
rhb->stopped = 0;
}
-/**
+/*
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 2ed23c99f59f..c0d139c26505 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -760,7 +760,7 @@ static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
* @adapter: crq_server_adapter struct
* @buffer: ibmvmc_buffer struct
* @hmc: ibmvmc_hmc struct
- * @msg_length: message length field
+ * @msg_len: message length field
*
* This command is sent between the management partition and the hypervisor
* in order to signal the arrival of an HMC protocol message. The command
@@ -1028,7 +1028,7 @@ static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
* ibmvmc_write - Write
*
* @file: file struct
- * @buf: Character buffer
+ * @buffer: Character buffer
* @count: Count field
* @ppos: Offset
*
@@ -1347,7 +1347,7 @@ static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
/**
* ibmvmc_ioctl - IOCTL
*
- * @session: ibmvmc_file_session struct
+ * @file: file information
* @cmd: cmd field
* @arg: Argument field
*
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 884485c3f723..5eaf74447ca1 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -67,7 +67,6 @@ static void firmware_load(const struct firmware *fw, void *context)
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = spi_get_drvdata(spi);
u8 *buffer;
- int ret;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
@@ -92,7 +91,7 @@ static void firmware_load(const struct firmware *fw, void *context)
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
jedec_id = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
@@ -110,7 +109,7 @@ static void firmware_load(const struct firmware *fw, void *context)
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
@@ -130,20 +129,20 @@ static void firmware_load(const struct firmware *fw, void *context)
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
if (status == FPGA_STATUS_CLEARED)
break;
@@ -160,13 +159,13 @@ static void firmware_load(const struct firmware *fw, void *context)
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
- ret = spi_write(spi, buffer, fw->size + 8);
+ spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 736675f0a246..4dfbfd51bdf7 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
#include <asm/desc.h>
#endif
@@ -118,9 +118,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing char array ...\n");
+ pr_info("Corrupting stack containing char array ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
/* Same as above but will only get a canary with -fstack-protector-strong */
@@ -131,9 +130,8 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
unsigned long *ptr;
} data __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing union ...\n");
+ pr_info("Corrupting stack containing union ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -248,6 +246,7 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
+ pr_err("FAIL: survived array bounds overflow!\n");
}
void lkdtm_CORRUPT_LIST_ADD(void)
@@ -334,7 +333,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
byte = *ptr;
- pr_err("FAIL: accessed page before stack!\n");
+ pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
@@ -348,7 +347,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
byte = *ptr;
- pr_err("FAIL: accessed page after stack!\n");
+ pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
void lkdtm_UNSET_SMEP(void)
@@ -419,7 +418,7 @@ void lkdtm_UNSET_SMEP(void)
void lkdtm_DOUBLE_FAULT(void)
{
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -454,38 +453,42 @@ void lkdtm_DOUBLE_FAULT(void)
#endif
}
-#ifdef CONFIG_ARM64_PTR_AUTH
+#ifdef CONFIG_ARM64
static noinline void change_pac_parameters(void)
{
- /* Reset the keys of current task */
- ptrauth_thread_init_kernel(current);
- ptrauth_thread_switch_kernel(current);
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+ }
}
+#endif
-#define CORRUPT_PAC_ITERATE 10
noinline void lkdtm_CORRUPT_PAC(void)
{
+#ifdef CONFIG_ARM64
+#define CORRUPT_PAC_ITERATE 10
int i;
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+
if (!system_supports_address_auth()) {
- pr_err("FAIL: arm64 pointer authentication feature not present\n");
+ pr_err("FAIL: CPU lacks pointer authentication feature\n");
return;
}
- pr_info("Change the PAC parameters to force function return failure\n");
+ pr_info("changing PAC parameters to force function return failure...\n");
/*
- * Pac is a hash value computed from input keys, return address and
+ * PAC is a hash value computed from input keys, return address and
* stack pointer. As pac has fewer bits so there is a chance of
* collision, so iterate few times to reduce the collision probability.
*/
for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
change_pac_parameters();
- pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
-}
-#else /* !CONFIG_ARM64_PTR_AUTH */
-noinline void lkdtm_CORRUPT_PAC(void)
-{
- pr_err("FAIL: arm64 pointer authentication config disabled\n");
-}
+ pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
+#else
+ pr_err("XFAIL: this test is arm64-only\n");
#endif
+}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 3c5cec85edce..1323bc16f113 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -58,11 +58,12 @@ void lkdtm_READ_AFTER_FREE(void)
int *base, *val, saw;
size_t len = 1024;
/*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
*/
- size_t offset = (len / sizeof(*base)) / 2;
+ size_t offset = sizeof(*base);
base = kmalloc(len, GFP_KERNEL);
if (!base) {
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 601a2156a0d4..8878538b2c13 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -31,9 +31,7 @@ void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void);
-#endif
void lkdtm_CORRUPT_PAC(void);
/* lkdtm_heap.c */
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 62f76d506f04..2dede2ef658f 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -57,6 +57,7 @@ static noinline void execute_location(void *dst, bool write)
}
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
static void execute_user_location(void *dst)
@@ -75,20 +76,22 @@ static void execute_user_location(void *dst)
return;
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
void lkdtm_WRITE_RO(void)
{
- /* Explicitly cast away "const" for the test. */
- unsigned long *ptr = (unsigned long *)&rodata;
+ /* Explicitly cast away "const" for the test and make volatile. */
+ volatile unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_RO_AFTER_INIT(void)
{
- unsigned long *ptr = &ro_after_init;
+ volatile unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
@@ -102,19 +105,21 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_KERN(void)
{
size_t size;
- unsigned char *ptr;
+ volatile unsigned char *ptr;
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %px\n", size, ptr);
- memcpy(ptr, (unsigned char *)do_nothing, size);
+ memcpy((void *)ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ pr_err("FAIL: survived bad write\n");
do_overwritten();
}
@@ -193,9 +198,11 @@ void lkdtm_ACCESS_USERSPACE(void)
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
vm_munmap(user_addr, PAGE_SIZE);
}
@@ -203,19 +210,20 @@ void lkdtm_ACCESS_USERSPACE(void)
void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
- unsigned long *ptr = (unsigned long *)NULL;
+ volatile unsigned long *ptr = (unsigned long *)NULL;
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
-
}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index e172719dd86d..b833367a45d0 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -304,19 +304,22 @@ void lkdtm_USERCOPY_KERNEL(void)
return;
}
- pr_info("attempting good copy_to_user from kernel rodata\n");
+ pr_info("attempting good copy_to_user from kernel rodata: %px\n",
+ test_text);
if (copy_to_user((void __user *)user_addr, test_text,
unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
- pr_info("attempting bad copy_to_user from kernel text\n");
+ pr_info("attempting bad copy_to_user from kernel text: %px\n",
+ vm_mmap);
if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
+ pr_err("FAIL: survived bad copy_to_user()\n");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 9d7b3719bfa0..f5fd5b786607 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -9,7 +9,7 @@ config INTEL_MEI
if selected /dev/mei misc device will be created.
For more information see
- <http://software.intel.com/en-us/manageability/>
+ <https://software.intel.com/en-us/manageability/>
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 910f059b3384..07ba16d46690 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -159,17 +159,17 @@ static int mei_osver(struct mei_cl_device *cldev)
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
- struct mkhi_msg *req;
+ struct mkhi_msg req;
+ struct mkhi_msg *rsp;
struct mkhi_fw_ver *fwver;
int bytes_recv, ret, i;
memset(buf, 0, sizeof(buf));
- req = (struct mkhi_msg *)buf;
- req->hdr.group_id = MKHI_GEN_GROUP_ID;
- req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
+ req.hdr.group_id = MKHI_GEN_GROUP_ID;
+ req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
- ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
+ ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req),
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
@@ -188,7 +188,8 @@ static int mei_fwver(struct mei_cl_device *cldev)
return -EIO;
}
- fwver = (struct mkhi_fw_ver *)req->data;
+ rsp = (struct mkhi_msg *)buf;
+ fwver = (struct mkhi_fw_ver *)rsp->data;
memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
@@ -329,16 +330,14 @@ static int mei_nfc_if_version(struct mei_cl *cl,
WARN_ON(mutex_is_locked(&bus->device_lock));
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
- MEI_CL_IO_TX_BLOCKING);
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd\n");
return ret;
}
/* to be sure on the stack we alloc memory */
- if_version_length = sizeof(struct mei_nfc_reply) +
- sizeof(struct mei_nfc_if_version);
+ if_version_length = sizeof(*reply) + sizeof(*ver);
reply = kzalloc(if_version_length, GFP_KERNEL);
if (!reply)
@@ -352,7 +351,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
goto err;
}
- memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+ memcpy(ver, reply->data, sizeof(*ver));
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index f476dbc7252b..a6dfc3ce1db2 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -931,7 +931,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_cl_device *cldev;
struct mei_cl *cl;
- cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+ cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
if (!cldev)
return NULL;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index b32c825a0945..2572887d99b6 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -369,7 +369,7 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
{
struct mei_cl_cb *cb;
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -552,7 +552,7 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
*/
static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
- memset(cl, 0, sizeof(struct mei_cl));
+ memset(cl, 0, sizeof(*cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
@@ -575,7 +575,7 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return NULL;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a44094cdbc36..308caee86920 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
@@ -257,22 +257,21 @@ int mei_hbm_start_wait(struct mei_device *dev)
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_version_request start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
+ struct hbm_host_version_request req;
int ret;
mei_hbm_reset(dev);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
/* host start message */
- memset(&start_req, 0, len);
- start_req.hbm_cmd = HOST_START_REQ_CMD;
- start_req.host_version.major_version = HBM_MAJOR_VERSION;
- start_req.host_version.minor_version = HBM_MINOR_VERSION;
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_START_REQ_CMD;
+ req.host_version.major_version = HBM_MAJOR_VERSION;
+ req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_hbm_write_message(dev, &mei_hdr, &start_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -295,13 +294,12 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_dma_setup_request req;
- const size_t len = sizeof(struct hbm_dma_setup_request);
unsigned int i;
int ret;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
for (i = 0; i < DMA_DSCR_NUM; i++) {
phys_addr_t paddr;
@@ -337,21 +335,19 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_enum_request enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
+ struct hbm_host_enum_request req;
int ret;
/* enumerate clients */
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&enum_req, 0, len);
- enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req.flags |= dev->hbm_f_dc_supported ?
- MEI_HBM_ENUM_F_ALLOW_ADD : 0;
- enum_req.flags |= dev->hbm_f_ie_supported ?
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_ENUM_REQ_CMD;
+ req.flags |= dev->hbm_f_dc_supported ? MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -380,7 +376,7 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
mei_me_cl_rm_by_uuid(dev, uuid);
- me_cl = kzalloc(sizeof(struct mei_me_client), GFP_KERNEL);
+ me_cl = kzalloc(sizeof(*me_cl), GFP_KERNEL);
if (!me_cl)
return -ENOMEM;
@@ -408,14 +404,13 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
struct mei_msg_hdr mei_hdr;
struct hbm_add_client_response resp;
- const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(resp));
- memset(&resp, 0, sizeof(struct hbm_add_client_response));
+ memset(&resp, 0, sizeof(resp));
resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp.me_addr = addr;
resp.status = status;
@@ -469,11 +464,10 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_msg_hdr mei_hdr;
struct hbm_notification_request req;
- const size_t len = sizeof(struct hbm_notification_request);
int ret;
- mei_hbm_hdr(&mei_hdr, len);
- mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, sizeof(req));
req.start = start;
@@ -580,8 +574,7 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_props_request prop_req;
- const size_t len = sizeof(struct hbm_props_request);
+ struct hbm_props_request req;
unsigned long addr;
int ret;
@@ -591,18 +584,17 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
if (addr == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
mei_host_client_init(dev);
-
return 0;
}
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&prop_req, 0, sizeof(struct hbm_props_request));
+ memset(&req, 0, sizeof(req));
- prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req.me_addr = addr;
+ req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ req.me_addr = addr;
- ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -628,15 +620,14 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
struct mei_msg_hdr mei_hdr;
struct hbm_power_gate req;
- const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = pg_cmd;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
@@ -657,11 +648,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_stop_request req;
- const size_t len = sizeof(struct hbm_host_stop_request);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 4c596c646ac0..d1d3e025ca0e 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -572,7 +572,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in.streams, data->streams,
- (data->k * sizeof(struct hdcp2_streamid_type)));
+ array_size(data->k, sizeof(*data->streams)));
verify_mprime_in.k = cpu_to_be16(data->k);
@@ -852,7 +852,7 @@ static int mei_hdcp_remove(struct mei_cl_device *cldev)
#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
-static struct mei_cl_device_id mei_hdcp_tbl[] = {
+static const struct mei_cl_device_id mei_hdcp_tbl[] = {
{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
{ }
};
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index 18ffc773fa18..834757f5e072 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -358,7 +358,7 @@ struct wired_cmd_repeater_auth_stream_req_in {
u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
u8 m_prime[HDCP_2_2_MPRIME_LEN];
__be16 k;
- struct hdcp2_streamid_type streams[1];
+ struct hdcp2_streamid_type streams[];
} __packed;
struct wired_cmd_repeater_auth_stream_req_out {
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7becfc768bbc..9cf8d8f60cfe 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -59,6 +59,7 @@
#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_3 0x9D3E /* Sunrise Point 3 (iToutch) */
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
@@ -73,6 +74,7 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+#define MEI_DEV_ID_KBP_3 0xA2BE /* Kaby Point 3 (iTouch) */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */
@@ -106,6 +108,8 @@
/* Host Firmware Status Registers in PCI Config Space */
#define PCI_CFG_HFS_1 0x40
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
+# define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */
+# define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 7649710a2ab9..cda0829ac589 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1353,11 +1353,24 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.read = mei_me_read_slots
};
-static bool mei_me_fw_type_nm(struct pci_dev *pdev)
+/**
+ * mei_me_fw_type_nm() - check for nm sku
+ *
+ * Read ME FW Status register to check for the Node Manager (NM) Firmware.
+ * The NM FW is only signaled in PCI function 0.
+ * __Note__: Deprecated by PCH8 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of NM firmware
+ */
+static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
{
u32 reg;
+ unsigned int devfn;
- pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
@@ -1366,20 +1379,26 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
-static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
+/**
+ * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in the PCI function 0.
+ * __Note__: Deprecated by SPS 5.0 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
- /*
- * Read ME FW Status register to check for SPS Firmware
- * The SPS FW is only signaled in pci function 0
- */
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
- /* if bits [19:16] = 15, running SPS Firmware */
- return (reg & 0xf0000) == 0xf0000;
+ return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
}
#define MEI_CFG_FW_SPS_4 \
@@ -1395,7 +1414,7 @@ static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
*
* Return: true in case of SPS firmware
*/
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
{
u32 reg;
u32 fw_type;
@@ -1411,6 +1430,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
}
+#define MEI_CFG_KIND_ITOUCH \
+ .kind = "itouch"
+
#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
@@ -1480,6 +1502,13 @@ static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_FW_VER_SUPP,
};
+/* PCH8 Lynx Point and newer devices - iTouch */
+static const struct mei_cfg mei_me_pch8_itouch_cfg = {
+ MEI_CFG_KIND_ITOUCH,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
@@ -1509,10 +1538,11 @@ static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_FW_SPS,
};
-/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
- * w/o DMA support
+/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support.
*/
-static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
+static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
+ MEI_CFG_KIND_ITOUCH,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
@@ -1547,11 +1577,12 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
+ [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
- [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
+ [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};
@@ -1581,8 +1612,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(parent, sizeof(struct mei_device) +
- sizeof(struct mei_me_hw), GFP_KERNEL);
+ dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1596,6 +1626,8 @@ struct mei_device *mei_me_dev_init(struct device *parent,
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+ dev->kind = cfg->kind;
+
return dev;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 6a8973649c49..00a7132ac7a2 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -19,13 +19,15 @@
*
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
+ * @kind: MEI head kind
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
* @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
- bool (*quirk_probe)(struct pci_dev *pdev);
+ bool (*quirk_probe)(const struct pci_dev *pdev);
+ const char *kind;
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
u32 hw_trc_supported:1;
@@ -76,6 +78,8 @@ struct mei_me_hw {
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
+ * @MEI_ME_PCH8_ITOUCH_CFG:Platform Controller Hub Gen8 and newer
+ * client platforms (iTouch).
* @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
@@ -100,11 +104,12 @@ enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
+ MEI_ME_PCH8_ITOUCH_CFG,
MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
- MEI_ME_PCH12_SPS_NODMA_CFG,
+ MEI_ME_PCH12_SPS_ITOUCH_CFG,
MEI_ME_PCH15_CFG,
MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 785b260b3ae9..a4e854b9b9e6 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1201,8 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
- sizeof(struct mei_txe_hw), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b1a8d5ec88b3..26fa92cb7f7a 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -319,7 +319,7 @@ struct hbm_props_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
struct mei_client_properties client_properties;
} __packed;
@@ -352,7 +352,7 @@ struct hbm_add_client_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
@@ -461,7 +461,7 @@ struct hbm_notification {
u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index f17297f2943d..86ef5c1a7928 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -476,7 +476,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
case IOCTL_MEI_CONNECT_CLIENT:
dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
if (copy_from_user(&connect_data, (char __user *)data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
@@ -488,7 +488,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
/* if all is ok, copying the data back to user. */
if (copy_to_user((char __user *)data, &connect_data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
@@ -885,6 +885,30 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
}
}
+/**
+ * kind_show - display device kind
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t kind_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ ssize_t ret;
+
+ if (dev->kind)
+ ret = sprintf(buf, "%s\n", dev->kind);
+ else
+ ret = sprintf(buf, "%s\n", "mei");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(kind);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
@@ -893,6 +917,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
&dev_attr_trc.attr,
+ &dev_attr_kind.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 3a29db07211d..d3a4f54c0ae7 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -445,6 +445,8 @@ struct mei_fw_version {
* @device_list : mei client bus list
* @cl_bus_lock : client bus list lock
*
+ * @kind : kind of mei device
+ *
* @dbgfs_dir : debugfs mei root directory
*
* @ops: : hw specific operations
@@ -528,6 +530,8 @@ struct mei_device {
struct list_head device_list;
struct mutex cl_bus_lock;
+ const char *kind;
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2a3f2fd5df50..1de9ef7a272b 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -68,6 +68,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
@@ -81,17 +82,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index b58608829b18..4c326e8f4d99 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -24,7 +24,7 @@
/* Debugfs parent dir */
static struct dentry *mic_dbg;
-/**
+/*
* mic_intr_show - Send interrupts to host.
*/
static int mic_intr_show(struct seq_file *s, void *unused)
@@ -46,7 +46,7 @@ static int mic_intr_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(mic_intr);
-/**
+/*
* mic_create_card_debug_dir - Initialize MIC debugfs entries.
*/
void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
@@ -60,7 +60,7 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
&mic_intr_fops);
}
-/**
+/*
* mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_card_debug_dir(struct mic_driver *mdrv)
@@ -68,7 +68,7 @@ void mic_delete_card_debug_dir(struct mic_driver *mdrv)
debugfs_remove_recursive(mdrv->dbg_dir);
}
-/**
+/*
* mic_init_card_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_card_debugfs(void)
@@ -76,7 +76,7 @@ void __init mic_init_card_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_card_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_card_debugfs(void)
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 68a731fd86de..cb55653cf1f9 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -15,7 +15,7 @@
/* Debugfs parent dir */
static struct dentry *cosm_dbg;
-/**
+/*
* log_buf_show - Display MIC kernel log buffer
*
* log_buf addr/len is read from System.map by user space
@@ -68,7 +68,7 @@ done:
DEFINE_SHOW_ATTRIBUTE(log_buf);
-/**
+/*
* force_reset_show - Force MIC reset
*
* Invokes the force_reset COSM bus op instead of the standard reset
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index f9133c4f6105..ebb0eac43754 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -26,6 +26,7 @@ static atomic_t g_num_dev;
/**
* cosm_hw_reset - Issue a HW reset for the MIC device
* @cdev: pointer to cosm_device instance
+ * @force: force a MIC to reset even if it is already reset and ready
*/
static void cosm_hw_reset(struct cosm_device *cdev, bool force)
{
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index ab0db7a2ac8c..ffda740e20d5 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -101,7 +101,7 @@ static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
-/**
+/*
* mic_create_debug_dir - Initialize MIC debugfs entries.
*/
void mic_create_debug_dir(struct mic_device *mdev)
@@ -124,7 +124,7 @@ void mic_create_debug_dir(struct mic_device *mdev)
&mic_msi_irq_info_fops);
}
-/**
+/*
* mic_delete_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_debug_dir(struct mic_device *mdev)
@@ -132,7 +132,7 @@ void mic_delete_debug_dir(struct mic_device *mdev)
debugfs_remove_recursive(mdev->dbg_dir);
}
-/**
+/*
* mic_init_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_debugfs(void)
@@ -140,7 +140,7 @@ void __init mic_init_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_debugfs(void)
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index 433d35dc1721..85b3221b5d40 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -37,6 +37,8 @@ static irqreturn_t mic_thread_fn(int irq, void *dev)
/**
* mic_interrupt - Generic interrupt handler for
* MSI and INTx based interrupts.
+ * @irq: interrupt to handle (unused)
+ * @dev: pointer to the mic_device instance
*/
static irqreturn_t mic_interrupt(int irq, void *dev)
{
@@ -180,7 +182,7 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
* mic_setup_msix - Initializes MSIx interrupts.
*
* @mdev: pointer to mic_device instance
- *
+ * @pdev: PCI device structure
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index be0784fd1635..ea4608527ea0 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -164,7 +164,6 @@ static int mic_probe(struct pci_dev *pdev,
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
rc = -ENOMEM;
- dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc);
goto mdev_alloc_fail;
}
mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index d18cda966912..f5536c1ad607 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -17,6 +17,15 @@
#include "mic_x100.h"
#include "mic_smpt.h"
+static const u16 mic_x100_intr_init[] = {
+ MIC_X100_DOORBELL_IDX_START,
+ MIC_X100_DMA_IDX_START,
+ MIC_X100_ERR_IDX_START,
+ MIC_X100_NUM_DOORBELL,
+ MIC_X100_NUM_DMA,
+ MIC_X100_NUM_ERR,
+};
+
/**
* mic_x100_write_spad - write to the scratchpad register
* @mdev: pointer to mic_device instance
@@ -112,6 +121,7 @@ static void mic_x100_disable_interrupts(struct mic_device *mdev)
/**
* mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_sbox_intr(struct mic_device *mdev,
int doorbell)
@@ -133,6 +143,7 @@ static void mic_x100_send_sbox_intr(struct mic_device *mdev,
/**
* mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
int doorbell)
@@ -494,6 +505,8 @@ static u32 mic_x100_get_postcode(struct mic_device *mdev)
/**
* mic_x100_smpt_set - Update an SMPT entry with a DMA address.
* @mdev: pointer to mic_device instance
+ * @dma_addr: DMA address to use
+ * @index: entry to write to
*
* RETURNS: none.
*/
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
index 1f727a6f609c..aebcaed6fa72 100644
--- a/drivers/misc/mic/host/mic_x100.h
+++ b/drivers/misc/mic/host/mic_x100.h
@@ -67,15 +67,6 @@
#define MIC_X100_FW_SIZE 5
#define MIC_X100_POSTCODE 0x242c
-static const u16 mic_x100_intr_init[] = {
- MIC_X100_DOORBELL_IDX_START,
- MIC_X100_DMA_IDX_START,
- MIC_X100_ERR_IDX_START,
- MIC_X100_NUM_DOORBELL,
- MIC_X100_NUM_DMA,
- MIC_X100_NUM_ERR,
-};
-
/* Host->Card(bootstrap) Interrupt Vector */
#define MIC_X100_BSP_INTERRUPT_VECTOR 229
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 781217c030a6..9cc6b2a6cf22 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -713,7 +713,7 @@ int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
}
EXPORT_SYMBOL_GPL(scif_connect);
-/**
+/*
* scif_accept() - Accept a connection request from the remote node
*
* The function accepts a connection request from the remote node. Successful
@@ -997,7 +997,6 @@ static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
{
- int read_size;
struct scif_endpt *ep = (struct scif_endpt *)epd;
struct scifmsg notif_msg;
int curr_recv_len = 0, remaining_len = len, read_count;
@@ -1017,8 +1016,7 @@ static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
* important for the Non Blocking case.
*/
curr_recv_len = min(remaining_len, read_count);
- read_size = scif_rb_get_next(&qp->inbound_q,
- msg, curr_recv_len);
+ scif_rb_get_next(&qp->inbound_q, msg, curr_recv_len);
if (ep->state == SCIFEP_CONNECTED) {
/*
* Update the read pointer only if the endpoint
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index c7c873409184..401b98e5ad79 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -99,7 +99,7 @@ int scif_reserve_dma_chan(struct scif_endpt *ep)
}
#ifdef CONFIG_MMU_NOTIFIER
-/**
+/*
* scif_rma_destroy_tcw:
*
* This routine destroys temporary cached windows
@@ -332,6 +332,7 @@ static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
* @epd: End Point Descriptor.
* @addr: virtual address to/from which to copy
* @len: length of range to copy
+ * @prot: read/write protection
* @out_offset: computed offset returned by reference.
* @out_window: allocated registered window returned by reference.
*
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
index 590baca9dc7b..426687f6696b 100644
--- a/drivers/misc/mic/scif/scif_epd.c
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -104,6 +104,7 @@ void scif_cleanup_zombie_epd(void)
/**
* scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* This message is initiated by the remote node to request a connection
@@ -155,6 +156,7 @@ conreq_sendrej:
/**
* scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* An accept() on the remote node has occurred and sent this message
@@ -181,6 +183,7 @@ void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request has finished mapping the local memory.
@@ -203,6 +206,7 @@ void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request failed to map the local memory it was sent.
@@ -221,6 +225,7 @@ void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote end has rejected the connection request. Set the end
@@ -240,6 +245,7 @@ void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_discnct() - Respond to SCIF_DISCNCT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote node has indicated close() has been called on its end
@@ -301,6 +307,7 @@ discnct_ack:
/**
* scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side has indicated it has not more references to local resources
@@ -317,6 +324,7 @@ void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
@@ -333,6 +341,7 @@ void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index 657fd4a20656..4fedf6183951 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -11,6 +11,7 @@
/**
* scif_recv_mark: Handle SCIF_MARK request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a mark.
@@ -33,6 +34,7 @@ void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_MARK message.
@@ -56,6 +58,7 @@ void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait: Handle SCIF_WAIT request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested waiting on a fence.
@@ -93,6 +96,7 @@ void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_WAIT message.
@@ -114,6 +118,7 @@ void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a local offset.
@@ -135,6 +140,7 @@ void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a remote offset.
@@ -156,6 +162,7 @@ void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a signal request.
@@ -280,12 +287,12 @@ alloc_fail:
return err;
}
-/*
+/**
* scif_prog_signal:
- * @epd - Endpoint Descriptor
- * @offset - registered address to write @val to
- * @val - Value to be written at @offset
- * @type - Type of the window.
+ * @epd: Endpoint Descriptor
+ * @offset: registered address to write @val to
+ * @val: Value to be written at @offset
+ * @type: Type of the window.
*
* Arrange to write a value to the registered offset after ensuring that the
* offset provided is indeed valid.
@@ -501,12 +508,12 @@ retry:
/**
* scif_send_fence_signal:
- * @epd - endpoint descriptor
- * @loff - local offset
- * @lval - local value to write to loffset
- * @roff - remote offset
- * @rval - remote value to write to roffset
- * @flags - flags
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
*
* Sends a remote fence signal request
*/
@@ -577,10 +584,11 @@ static void scif_fence_mark_cb(void *arg)
atomic_dec(&ep->rma_info.fence_refcount);
}
-/*
+/**
* _scif_fence_mark:
+ * @epd: endpoint descriptor
+ * @mark: DMA mark to set-up
*
- * @epd - endpoint descriptor
* Set up a mark for this endpoint and return the value of the mark.
*/
int _scif_fence_mark(scif_epd_t epd, int *mark)
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
index c537df84539a..c4d9422082b7 100644
--- a/drivers/misc/mic/scif/scif_nm.c
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -14,6 +14,8 @@
/**
* scif_invalidate_ep() - Set state for all connected endpoints
* to disconnected and wake up all send/recv waitqueues
+ *
+ * @node: Node to invalidate
*/
static void scif_invalidate_ep(int node)
{
@@ -99,11 +101,10 @@ void scif_send_acks(struct scif_dev *dev)
}
}
-/*
- * scif_cleanup_scifdev
- *
+/**
+ * scif_cleanup_scifdev - Uninitialize SCIF data structures for remote
+ * SCIF device.
* @dev: Remote SCIF device.
- * Uninitialize SCIF data structures for remote SCIF device.
*/
void scif_cleanup_scifdev(struct scif_dev *dev)
{
@@ -136,8 +137,8 @@ void scif_cleanup_scifdev(struct scif_dev *dev)
scif_cleanup_qp(dev);
}
-/*
- * scif_remove_node:
+/**
+ * scif_remove_node
*
* @node: Node to remove
*/
@@ -162,9 +163,9 @@ static int scif_send_rmnode_msg(int node, int remove_node)
}
/**
- * scif_node_disconnect:
+ * scif_node_disconnect
*
- * @node_id[in]: source node id.
+ * @node_id: source node id [in]
* @mgmt_initiated: Disconnection initiated from the mgmt node
*
* Disconnect a node from the scif network.
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index ea084626fe11..e0748be373f1 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -443,6 +443,7 @@ static void scif_deinit_p2p_info(struct scif_dev *scifdev,
/**
* scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
+ * @scifdev: SCIF device
* @dst: Destination node
*
* Connect the src and dst node by setting up the p2p connection
@@ -719,7 +720,7 @@ scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_exit() - Respond to SCIF_EXIT interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
* This function stops the SCIF interface for the node which sent
* the SCIF_EXIT message and starts waiting for that node to
@@ -740,7 +741,7 @@ scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
/**
* scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
*/
static __always_inline void
@@ -930,6 +931,7 @@ local_error:
/**
* scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* SCIF_NODE_ADD failed, so inform the waiting wq.
@@ -946,8 +948,9 @@ scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
}
}
-/*
+/**
* scif_node_remove: Handle SCIF_NODE_REMOVE message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Handle node removal.
@@ -962,8 +965,9 @@ scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
scif_handle_remove_node(node);
}
-/*
+/**
* scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* The peer has acked a SCIF_NODE_REMOVE message.
@@ -979,6 +983,7 @@ scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Retrieve node info i.e maxid and total from the mgmt node.
@@ -1058,6 +1063,7 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
scif_recv_sig_resp, /* SCIF_SIG_NACK */
};
+static int scif_max_msg_id = SCIF_MAX_MSG;
/**
* scif_nodeqp_msg_handler() - Common handler for node messages
* @scifdev: Remote device to respond to
@@ -1067,8 +1073,6 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
* This routine calls the appropriate routine to handle a Node Qp
* message receipt
*/
-static int scif_max_msg_id = SCIF_MAX_MSG;
-
static void
scif_nodeqp_msg_handler(struct scif_dev *scifdev,
struct scif_qp *qp, struct scifmsg *msg)
@@ -1117,7 +1121,7 @@ void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
/**
* scif_loopb_wq_handler - Loopback Workqueue Handler.
- * @work: loop back work
+ * @unused: loop back work (unused)
*
* This work queue routine is invoked by the loopback work queue handler.
* It grabs the recv lock, dequeues any available messages from the head
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
index 547a71285069..4bdb5ef9a139 100644
--- a/drivers/misc/mic/scif/scif_ports.c
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -14,11 +14,11 @@
struct idr scif_ports;
-/*
+/**
* struct scif_port - SCIF port information
*
- * @ref_cnt - Reference count since there can be multiple endpoints
- * created via scif_accept(..) simultaneously using a port.
+ * @ref_cnt: Reference count since there can be multiple endpoints
+ * created via scif_accept(..) simultaneously using a port.
*/
struct scif_port {
int ref_cnt;
@@ -27,7 +27,8 @@ struct scif_port {
/**
* __scif_get_port - Reserve a specified port # for SCIF and add it
* to the global list.
- * @port : port # to be reserved.
+ * @start: lowest port # to be reserved (inclusive).
+ * @end: highest port # to be reserved (exclusive).
*
* @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
* On memory allocation failure, returns -ENOMEM.
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 406cd5abfa72..de8f61efaef5 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -458,7 +458,7 @@ static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
/**
* scif_create_remote_window:
- * @ep: end point
+ * @scifdev: SCIF device
* @nr_pages: number of pages in window
*
* Allocate and prepare a remote registration window.
@@ -500,7 +500,6 @@ error_ret:
/**
* scif_destroy_remote_window:
- * @ep: end point
* @window: remote registration window
*
* Deallocate resources for remote window.
@@ -1037,6 +1036,7 @@ void scif_free_window_offset(struct scif_endpt *ep,
/**
* scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is requesting a memory allocation.
@@ -1072,6 +1072,7 @@ error:
/**
* scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side responded to a memory allocation.
@@ -1096,6 +1097,7 @@ void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Free up memory kmalloc'd earlier.
@@ -1134,6 +1136,7 @@ scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
/**
* scif_recv_reg: Respond to SCIF_REGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Update remote window list with a new registered window.
@@ -1170,6 +1173,7 @@ void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remove window from remote registration list;
@@ -1235,6 +1239,7 @@ error:
/**
* scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete registration.
@@ -1253,6 +1258,7 @@ void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that registration
@@ -1272,6 +1278,7 @@ void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete unregistration.
@@ -1290,6 +1297,7 @@ void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that unregistration
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 85942f6717c5..55e7f21e51f4 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -438,7 +438,7 @@ error:
/*
* The config ops structure as defined by virtio config
*/
-static struct virtio_config_ops vop_vq_config_ops = {
+static const struct virtio_config_ops vop_vq_config_ops = {
.get_features = vop_get_features,
.finalize_features = vop_finalize_features,
.get = vop_get,
@@ -546,7 +546,7 @@ static int vop_match_desc(struct device *dev, void *data)
return vdev->desc == (void __iomem *)data;
}
-static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
+static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl __iomem *dc)
{
return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
}
@@ -614,7 +614,6 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
struct mic_device_desc __iomem *d;
struct mic_device_ctrl __iomem *dc;
struct device *dev;
- int ret;
for (i = sizeof(struct mic_bootparam);
i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
@@ -644,7 +643,7 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
&dc->config_change);
put_device(dev);
_vop_handle_config_change(d, i, vpdev);
- ret = _vop_remove_device(d, i, vpdev);
+ _vop_remove_device(d, i, vpdev);
if (remove) {
iowrite8(0, &dc->config_change);
iowrite8(0, &dc->guest_ack);
@@ -763,7 +762,7 @@ static void vop_driver_remove(struct vop_device *vpdev)
kfree(vi);
}
-static struct vop_device_id id_table[] = {
+static const struct vop_device_id id_table[] = {
{ VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index c8e19bfb5ef9..e3b99a39d207 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -273,11 +273,11 @@ static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn,
}
/**
- * Read the template version from the AFU
- * dev: the device for the AFU
- * fn: the AFU offsets
- * len: outputs the template length
- * version: outputs the major<<8,minor version
+ * read_template_version - Read the template version from the AFU
+ * @dev: the device for the AFU
+ * @fn: the AFU offsets
+ * @len: outputs the template length
+ * @version: outputs the major<<8,minor version
*
* Returns 0 on success, negative on failure
*/
@@ -476,10 +476,10 @@ static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu)
}
/**
- * Populate AFU metadata regarding LPC memory
- * dev: the device for the AFU
- * fn: the AFU offsets
- * afu: the AFU struct to populate the LPC metadata into
+ * read_afu_lpc_memory_info - Populate AFU metadata regarding LPC memory
+ * @dev: the device for the AFU
+ * @fn: the AFU offsets
+ * @afu: the AFU struct to populate the LPC metadata into
*
* Returns 0 on success, negative on failure
*/
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 60828af7506a..8d2b7135738e 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -135,6 +135,7 @@ static DEFINE_MUTEX(pch_phub_mutex);
/**
* pch_phub_read_modify_write_reg() - Reading modifying and writing register
+ * @chip: Pointer to the PHUB register structure
* @reg_addr_offset: Register offset address value.
* @data: Writing value.
* @mask: Mask value.
@@ -147,9 +148,8 @@ static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
}
-#ifdef CONFIG_PM
/* pch_phub_save_reg_conf - saves register configuration */
-static void pch_phub_save_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_save_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -210,7 +210,7 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev)
}
/* pch_phub_restore_reg_conf - restore register configuration */
-static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_restore_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -270,10 +270,10 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
-#endif
/**
* pch_phub_read_serial_rom() - Reading Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address to read.
* @data: Read buffer for specified Serial ROM value.
*/
@@ -288,6 +288,7 @@ static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom() - Writing Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address.
* @data: Serial ROM value to write.
*/
@@ -326,6 +327,7 @@ static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_read_serial_rom_val() - Read Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value to read.
*/
@@ -342,6 +344,7 @@ static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom_val() - writing Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value.
*/
@@ -443,7 +446,7 @@ static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip)
/**
* pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Buffer of the Gigabit Ethernet MAC address value.
*/
static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -455,7 +458,7 @@ static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
/**
* pch_phub_write_gbe_mac_addr() - Write MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Gigabit Ethernet MAC address value.
*/
static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -835,48 +838,19 @@ static void pch_phub_remove(struct pci_dev *pdev)
kfree(chip);
}
-#ifdef CONFIG_PM
-
-static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_phub_suspend(struct device *dev_d)
{
- int ret;
-
- pch_phub_save_reg_conf(pdev);
- ret = pci_save_state(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- " %s -pci_save_state returns %d\n", __func__, ret);
- return ret;
- }
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int pch_phub_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_phub_resume(struct device *dev_d)
{
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
- return ret;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pch_phub_restore_reg_conf(pdev);
+ device_wakeup_disable(dev_d);
return 0;
}
-#else
-#define pch_phub_suspend NULL
-#define pch_phub_resume NULL
-#endif /* CONFIG_PM */
static const struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
@@ -888,13 +862,14 @@ static const struct pci_device_id pch_phub_pcidev_id[] = {
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
+static SIMPLE_DEV_PM_OPS(pch_phub_pm_ops, pch_phub_suspend, pch_phub_resume);
+
static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
.id_table = pch_phub_pcidev_id,
.probe = pch_phub_probe,
.remove = pch_phub_remove,
- .suspend = pch_phub_suspend,
- .resume = pch_phub_resume
+ .driver.pm = &pch_phub_pm_ops,
};
module_pci_driver(pch_phub_driver);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 6a5ed0e25ff1..ce72e46a2e73 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -457,31 +457,26 @@ static void phantom_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int phantom_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused phantom_suspend(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
- synchronize_irq(pdev->irq);
+ synchronize_irq(to_pci_dev(dev_d)->irq);
return 0;
}
-static int phantom_resume(struct pci_dev *pdev)
+static int __maybe_unused phantom_resume(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
return 0;
}
-#else
-#define phantom_suspend NULL
-#define phantom_resume NULL
-#endif
static struct pci_device_id phantom_pci_tbl[] = {
{ .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
@@ -491,13 +486,14 @@ static struct pci_device_id phantom_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
+static SIMPLE_DEV_PM_OPS(phantom_pm_ops, phantom_suspend, phantom_resume);
+
static struct pci_driver phantom_pci_driver = {
.name = "phantom",
.id_table = phantom_pci_tbl,
.probe = phantom_probe,
.remove = phantom_remove,
- .suspend = phantom_suspend,
- .resume = phantom_resume
+ .driver.pm = &phantom_pm_ops,
};
static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index b7f510676cd6..7236ae527b19 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -496,9 +496,8 @@ static void pti_tty_cleanup(struct tty_struct *tty)
* pti_tty_driver_write()- Write trace debugging data through the char
* interface to the PTI HW. Part of the misc device implementation.
*
- * @filp: Contains private data which is used to obtain
- * master, channel write ID.
- * @data: trace data to be written.
+ * @tty: tty struct containing pti information.
+ * @buf: trace data to be written.
* @len: # of byte to write.
*
* Returns:
@@ -734,8 +733,8 @@ static struct console pti_console = {
* pti_port_activate()- Used to start/initialize any items upon
* first opening of tty_port().
*
- * @port- The tty port number of the PTI device.
- * @tty- The tty struct associated with this device.
+ * @port: The tty port number of the PTI device.
+ * @tty: The tty struct associated with this device.
*
* Returns:
* always returns 0
@@ -755,7 +754,7 @@ static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
* pti_port_shutdown()- Used to stop/shutdown any items upon the
* last tty port close.
*
- * @port- The tty port number of the PTI device.
+ * @port: The tty port number of the PTI device.
*
* Notes: The primary purpose of the PTI tty port 0 is to hook
* the syslog daemon to it; thus this port will be open for a
@@ -781,8 +780,8 @@ static const struct tty_port_operations tty_port_ops = {
* pti_pci_probe()- Used to detect pti on the pci bus and set
* things up in the driver.
*
- * @pdev- pci_dev struct values for pti.
- * @ent- pci_device_id struct for pti driver.
+ * @pdev: pci_dev struct values for pti.
+ * @ent: pci_device_id struct for pti driver.
*
* Returns:
* 0 for success
@@ -899,7 +898,6 @@ static struct pci_driver pti_pci_driver = {
};
/**
- *
* pti_init()- Overall entry/init call to the pti driver.
* It starts the registration process with the kernel.
*
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 5fd94d836070..61b03fcefb13 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -223,7 +223,7 @@ xpc_disconnect(int ch_number)
}
EXPORT_SYMBOL_GPL(xpc_disconnect);
-int __init
+static int __init
xp_init(void)
{
enum xp_retval ret;
@@ -246,7 +246,7 @@ xp_init(void)
module_init(xp_init);
-void __exit
+static void __exit
xp_exit(void)
{
if (is_uv())
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index cb57ac6ab4c3..6cc31789b38d 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -1,7 +1,7 @@
/*
* SRAM protect-exec region helper functions
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 14136d2cc8f9..f4ddd1e67015 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -18,7 +18,8 @@
extern void st_kim_recv(void *, const unsigned char *, long);
void st_int_recv(void *, const unsigned char *, long);
-/* function pointer pointing to either,
+/*
+ * function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
*/
@@ -60,7 +61,8 @@ int st_get_uart_wr_room(struct st_data_s *st_gdata)
return tty->ops->write_room(tty);
}
-/* can be called in from
+/*
+ * can be called in from
* -- KIM (during fw download)
* -- ST Core (during st_write)
*
@@ -100,7 +102,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
kfree_skb(st_gdata->rx_skb);
return;
}
- /* this cannot fail
+ /*
+ * this cannot fail
* this shouldn't take long
* - should be just skb_queue_tail for the
* protocol stack driver
@@ -121,9 +124,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
return;
}
-/**
- * st_reg_complete -
- * to call registration complete callbacks
+/*
+ * st_reg_complete - to call registration complete callbacks
* of all protocol stack drivers
* This function is being called with spin lock held, protocol drivers are
* only expected to complete their waits and do nothing more than that.
@@ -156,21 +158,24 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
pr_debug("len %d room %d", len, room);
if (!len) {
- /* Received packet has only packet header and
+ /*
+ * Received packet has only packet header and
* has zero length payload. So, ask ST CORE to
* forward the packet to protocol driver (BT/FM/GPS)
*/
st_send_frame(chnl_id, st_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(st_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
st_gdata->rx_state = ST_W4_DATA;
@@ -178,8 +183,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return len;
}
- /* Change ST state to continue to process next
- * packet */
+ /* Change ST state to continue to process next packet */
st_gdata->rx_state = ST_W4_PACKET_TYPE;
st_gdata->rx_skb = NULL;
st_gdata->rx_count = 0;
@@ -188,7 +192,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return 0;
}
-/**
+/*
* st_wakeup_ack - internal function for action when wake-up ack
* received
*/
@@ -199,7 +203,8 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned long flags = 0;
spin_lock_irqsave(&st_gdata->lock, flags);
- /* de-Q from waitQ and Q in txQ now that the
+ /*
+ * de-Q from waitQ and Q in txQ now that the
* chip is awake
*/
while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
@@ -213,7 +218,7 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
st_tx_wakeup(st_gdata);
}
-/**
+/*
* st_int_recv - ST's internal receive function.
* Decodes received RAW data and forwards to corresponding
* client drivers (Bluetooth,FM,GPS..etc).
@@ -262,8 +267,10 @@ void st_int_recv(void *disc_data,
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
- /* Ask ST CORE to forward
- * the packet to protocol driver */
+ /*
+ * Ask ST CORE to forward
+ * the packet to protocol driver
+ */
st_send_frame(st_gdata->rx_chnl, st_gdata);
st_gdata->rx_state = ST_W4_PACKET_TYPE;
@@ -276,7 +283,7 @@ void st_int_recv(void *disc_data,
&st_gdata->rx_skb->data
[proto->offset_len_in_hdr];
pr_debug("plen pointing to %x\n", *plen);
- if (proto->len_size == 1)/* 1 byte len field */
+ if (proto->len_size == 1) /* 1 byte len field */
payload_len = *(unsigned char *)plen;
else if (proto->len_size == 2)
payload_len =
@@ -294,18 +301,23 @@ void st_int_recv(void *disc_data,
}
/* end of if rx_count */
- /* Check first byte of packet and identify module
- * owner (BT/FM/GPS) */
+
+ /*
+ * Check first byte of packet and identify module
+ * owner (BT/FM/GPS)
+ */
switch (*ptr) {
case LL_SLEEP_IND:
case LL_SLEEP_ACK:
case LL_WAKE_UP_IND:
pr_debug("PM packet");
- /* this takes appropriate action based on
+ /*
+ * this takes appropriate action based on
* sleep state received --
*/
st_ll_sleep_state(st_gdata, *ptr);
- /* if WAKEUP_IND collides copy from waitq to txq
+ /*
+ * if WAKEUP_IND collides copy from waitq to txq
* and assume chip awake
*/
spin_unlock_irqrestore(&st_gdata->lock, flags);
@@ -331,7 +343,8 @@ void st_int_recv(void *disc_data,
default:
type = *ptr;
- /* Default case means non-HCILL packets,
+ /*
+ * Default case means non-HCILL packets,
* possibilities are packets for:
* (a) valid protocol - Supported Protocols within
* the ST_MAX_CHANNELS.
@@ -377,7 +390,7 @@ done:
return;
}
-/**
+/*
* st_int_dequeue - internal de-Q function.
* If the previous data set was not written
* completely, return that skb which has the pending data.
@@ -396,7 +409,7 @@ static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
return skb_dequeue(&st_gdata->txq);
}
-/**
+/*
* st_int_enqueue - internal Q-ing function.
* Will either Q the skb to txq or the tx_waitq
* depending on the ST LL state.
@@ -561,7 +574,8 @@ long st_register(struct st_proto_s *new_proto)
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* this may take a while to complete
+ /*
+ * this may take a while to complete
* since it involves BT fw download
*/
err = st_kim_start(st_gdata->kim_data);
@@ -583,7 +597,8 @@ long st_register(struct st_proto_s *new_proto)
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
- /* this is where all pending registration
+ /*
+ * this is where all pending registration
* are signalled to be complete by calling callback functions
*/
if ((st_gdata->protos_registered != ST_EMPTY) &&
@@ -593,7 +608,8 @@ long st_register(struct st_proto_s *new_proto)
}
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
- /* check for already registered once more,
+ /*
+ * check for already registered once more,
* since the above check is old
*/
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
@@ -622,7 +638,8 @@ long st_register(struct st_proto_s *new_proto)
}
EXPORT_SYMBOL_GPL(st_register);
-/* to unregister a protocol -
+/*
+ * to unregister a protocol -
* to be called from protocol stack driver
*/
long st_unregister(struct st_proto_s *proto)
@@ -742,7 +759,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_info("%s ", __func__);
- /* TODO:
+ /*
+ * TODO:
* if a protocol has been registered & line discipline
* un-installed for some reason - what should be done ?
*/
@@ -795,7 +813,8 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
pr_debug("done %s", __func__);
}
-/* wake-up function called in from the TTY layer
+/*
+ * wake-up function called in from the TTY layer
* inside the internal wakeup function will be called
*/
static void st_tty_wakeup(struct tty_struct *tty)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index a36ed1ff5967..f2f6cab97c08 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -30,7 +30,7 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
-/**
+/*
* st_get_plat_device -
* function which returns the reference to the platform device
* requested by id. As of now only 1 such device exists (id=0)
@@ -43,7 +43,7 @@ static struct platform_device *st_get_plat_device(int id)
return st_kim_devices[id];
}
-/**
+/*
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
@@ -55,7 +55,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
if (!skb)
return;
- /* these magic numbers are the position in the response buffer which
+ /*
+ * these magic numbers are the position in the response buffer which
* allows us to distinguish whether the response is for the read
* version info. command
*/
@@ -79,7 +80,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
kfree_skb(skb);
}
-/* check for data len received inside kim_int_recv
+/*
+ * check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
@@ -91,14 +93,16 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
if (!len) {
validate_firmware_response(kim_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(kim_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
kim_gdata->rx_state = ST_W4_DATA;
@@ -106,8 +110,10 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return len;
}
- /* Change ST LL state to continue to process next
- * packet */
+ /*
+ * Change ST LL state to continue to process next
+ * packet
+ */
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
@@ -115,7 +121,7 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return 0;
}
-/**
+/*
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
@@ -216,7 +222,8 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
- /* the positions 12 & 13 in the response buffer provide with the
+ /*
+ * the positions 12 & 13 in the response buffer provide with the
* chip, major & minor numbers
*/
@@ -263,7 +270,7 @@ static void skip_change_remote_baud(unsigned char **ptr, long *len)
}
}
-/**
+/*
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
@@ -295,7 +302,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
- /* bts_header to remove out magic number and
+ /*
+ * bts_header to remove out magic number and
* version
*/
ptr += sizeof(struct bts_header);
@@ -313,8 +321,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
0xFF36)) {
- /* ignore remote change
- * baud rate HCI VS command */
+ /*
+ * ignore remote change
+ * baud rate HCI VS command
+ */
pr_warn("change remote baud"
" rate command in firmware");
skip_change_remote_baud(&ptr, &len);
@@ -346,7 +356,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
- /* reinit completion before sending for the
+ /*
+ * reinit completion before sending for the
* relevant wait
*/
reinit_completion(&kim_gdata->kim_rcvd);
@@ -418,14 +429,16 @@ void st_kim_recv(void *disc_data, const unsigned char *data, long count)
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
- /* proceed to gather all data and distinguish read fw version response
+ /*
+ * proceed to gather all data and distinguish read fw version response
* from other fw responses when data gathering is complete
*/
kim_int_recv(kim_gdata, data, count);
return;
}
-/* to signal completion of line discipline installation
+/*
+ * to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
void st_kim_complete(void *kim_data)
@@ -434,7 +447,7 @@ void st_kim_complete(void *kim_data)
complete(&kim_gdata->ldisc_installed);
}
-/**
+/*
* st_kim_start - called from ST Core upon 1st registration
* This involves toggling the chip enable gpio, reading
* the firmware version from chip, forming the fw file name
@@ -472,8 +485,10 @@ long st_kim_start(void *kim_data)
err = wait_for_completion_interruptible_timeout(
&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME));
if (!err) {
- /* ldisc installation timeout,
- * flush uart, power cycle BT_EN */
+ /*
+ * ldisc installation timeout,
+ * flush uart, power cycle BT_EN
+ */
pr_err("ldisc installation timeout");
err = st_kim_stop(kim_gdata);
continue;
@@ -482,8 +497,10 @@ long st_kim_start(void *kim_data)
pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
- /* ldisc installed but fw download failed,
- * flush uart & power cycle BT_EN */
+ /*
+ * ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN
+ */
pr_err("download firmware failed");
err = st_kim_stop(kim_gdata);
continue;
@@ -495,7 +512,7 @@ long st_kim_start(void *kim_data)
return err;
}
-/**
+/*
* st_kim_stop - stop communication with chip.
* This can be called from ST Core/KIM, on the-
* (a) last un-register when chip need not be powered there-after,
@@ -650,7 +667,7 @@ static const struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
-/**
+/*
* st_kim_ref - reference the core's data
* This references the per-ST platform device in the arch/xx/
* board-xx.c file.
@@ -729,8 +746,7 @@ static int kim_probe(struct platform_device *pdev)
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
goto err_sysfs_group;
}
- /* get reference of pdev for request_firmware
- */
+ /* get reference of pdev for request_firmware */
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
@@ -772,7 +788,8 @@ static int kim_remove(struct platform_device *pdev)
kim_gdata = platform_get_drvdata(pdev);
- /* Free the Bluetooth/FM/GPIO
+ /*
+ * Free the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(pdata->nshutdown_gpio);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index e6b40aa8fb42..228f2eb1d476 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -207,10 +207,9 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
spin_unlock_irqrestore(&fm->lock, flags);
}
-#ifdef CONFIG_PM
-
-static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
@@ -221,15 +220,13 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
}
- pci_save_state(dev);
- pci_enable_wake(dev, pci_choose_state(dev, state), 0);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int tifm_7xx1_resume(struct pci_dev *dev)
+static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
unsigned long timeout;
@@ -242,11 +239,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
return -ENXIO;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
pci_set_master(dev);
dev_dbg(&dev->dev, "resuming host\n");
@@ -297,13 +289,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
return 0;
}
-#else
-
-#define tifm_7xx1_suspend NULL
-#define tifm_7xx1_resume NULL
-
-#endif /* CONFIG_PM */
-
static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
@@ -424,13 +409,14 @@ static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume);
+
static struct pci_driver tifm_7xx1_driver = {
.name = DRIVER_NAME,
.id_table = tifm_7xx1_pci_tbl,
.probe = tifm_7xx1_probe,
.remove = tifm_7xx1_remove,
- .suspend = tifm_7xx1_suspend,
- .resume = tifm_7xx1_resume,
+ .driver.pm = &tifm_7xx1_pm_ops,
};
module_pci_driver(tifm_7xx1_driver);
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 107028e77ca3..aa91f69a5fa9 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -179,14 +179,6 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
return 0;
}
-static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
-{
- if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
- return VM_FAULT_SIGBUS;
-
- return 0;
-}
-
static void uacce_vma_close(struct vm_area_struct *vma)
{
struct uacce_queue *q = vma->vm_private_data;
@@ -199,7 +191,6 @@ static void uacce_vma_close(struct vm_area_struct *vma)
}
static const struct vm_operations_struct uacce_vm_ops = {
- .fault = uacce_vma_fault,
.close = uacce_vma_close,
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 5a71f6678fd3..bce910de8b4d 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1336,6 +1336,8 @@ static void init_extra_caps(struct realtek_pci_sdmmc *host)
mmc->caps |= MMC_CAP_1_8V_DDR;
if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT)
mmc->caps |= MMC_CAP_8_BIT_DATA;
+ if (pcr->extra_caps & EXTRA_CAPS_NO_MMC)
+ mmc->caps2 |= MMC_CAP2_NO_MMC;
}
static void realtek_init_host(struct realtek_pci_sdmmc *host)
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 89096f10f4c4..12466b06692c 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -6,9 +6,9 @@
*/
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
-#include <linux/of_platform.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
@@ -59,7 +59,7 @@ static int adgs1408_probe(struct spi_device *spi)
s32 idle_state;
int ret;
- chip_id = (enum adgs1408_chip_id)of_device_get_match_data(dev);
+ chip_id = (enum adgs1408_chip_id)device_get_match_data(dev);
if (!chip_id)
chip_id = spi_get_device_id(spi)->driver_data;
@@ -119,7 +119,7 @@ MODULE_DEVICE_TABLE(of, adgs1408_of_match);
static struct spi_driver adgs1408_driver = {
.driver = {
.name = "adgs1408",
- .of_match_table = of_match_ptr(adgs1408_of_match),
+ .of_match_table = adgs1408_of_match,
},
.probe = adgs1408_probe,
.id_table = adgs1408_spi_id,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index d7b7f6d688e7..954d3b4a52ab 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -7,9 +7,6 @@ menuconfig NVMEM
This framework is designed to provide a generic interface to NVMEM
from both the Linux Kernel and the userspace.
- This driver can also be built as a module. If so, the module
- will be called nvmem_core.
-
If unsure, say no.
if NVMEM
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 927eb5f6003f..6cd3edb2eaf6 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -135,6 +135,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
if (pos >= nvmem->size)
return 0;
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
if (count < nvmem->word_size)
return -EINVAL;
@@ -172,6 +175,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
if (pos >= nvmem->size)
return -EFBIG;
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
if (count < nvmem->word_size)
return -EINVAL;
@@ -567,7 +573,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
- * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @config: nvmem device configuration with which nvmem device is created.
*
@@ -629,12 +635,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!config->no_of_node)
nvmem->dev.of_node = config->dev->of_node;
- if (config->id == -1 && config->name) {
+ switch (config->id) {
+ case NVMEM_DEVID_NONE:
dev_set_name(&nvmem->dev, "%s", config->name);
- } else {
+ break;
+ case NVMEM_DEVID_AUTO:
+ dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+ break;
+ default:
dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
+ break;
}
nvmem->read_only = device_property_present(config->dev, "read-only") ||
@@ -722,7 +734,7 @@ static void devm_nvmem_release(struct device *dev, void *res)
/**
* devm_nvmem_register() - Register a managed nvmem device for given
* nvmem_config.
- * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @dev: Device that uses the nvmem device.
* @config: nvmem device configuration with which nvmem device is created.
@@ -766,7 +778,7 @@ static int devm_nvmem_match(struct device *dev, void *res, void *data)
* @dev: Device that uses the nvmem device.
* @nvmem: Pointer to previously registered nvmem device.
*
- * Return: Will be an negative on error or a zero on success.
+ * Return: Will be negative on error or zero on success.
*/
int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
{
@@ -1369,7 +1381,22 @@ static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
}
/**
- * nvmem_cell_read_u16() - Read a cell value as an u16
+ * nvmem_cell_read_u8() - Read a cell value as a u8
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
+
+/**
+ * nvmem_cell_read_u16() - Read a cell value as a u16
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
@@ -1384,7 +1411,7 @@ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
- * nvmem_cell_read_u32() - Read a cell value as an u32
+ * nvmem_cell_read_u32() - Read a cell value as a u32
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
@@ -1399,7 +1426,7 @@ int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
- * nvmem_cell_read_u64() - Read a cell value as an u64
+ * nvmem_cell_read_u64() - Read a cell value as a u64
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 8682cda448d6..a72704cd0468 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
@@ -141,7 +141,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.dev = &pdev->dev;
sdam->sdam_config.name = "spmi_sdam";
- sdam->sdam_config.id = pdev->id;
+ sdam->sdam_config.id = NVMEM_DEVID_AUTO;
sdam->sdam_config.owner = THIS_MODULE,
sdam->sdam_config.stride = 1;
sdam->sdam_config.word_size = 1;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 8a91717600be..5e9e60e2e591 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -3,57 +3,350 @@
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
*/
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* Blow timer clock frequency in Mhz */
+#define QFPROM_BLOW_TIMER_OFFSET 0x03c
+
+/* Amount of time required to hold charge to blow fuse in micro-seconds */
+#define QFPROM_FUSE_BLOW_POLL_US 100
+#define QFPROM_FUSE_BLOW_TIMEOUT_US 1000
+
+#define QFPROM_BLOW_STATUS_OFFSET 0x048
+#define QFPROM_BLOW_STATUS_BUSY 0x1
+#define QFPROM_BLOW_STATUS_READY 0x0
+
+#define QFPROM_ACCEL_OFFSET 0x044
+
+#define QFPROM_VERSION_OFFSET 0x0
+#define QFPROM_MAJOR_VERSION_SHIFT 28
+#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT)
+#define QFPROM_MINOR_VERSION_SHIFT 16
+#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT)
+
+static bool read_raw_data;
+module_param(read_raw_data, bool, 0644);
+MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
+/**
+ * struct qfprom_soc_data - config that varies from SoC to SoC.
+ *
+ * @accel_value: Should contain qfprom accel value.
+ * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
+ * @qfprom_blow_set_freq: The frequency required to set when we start the
+ * fuse blowing.
+ */
+struct qfprom_soc_data {
+ u32 accel_value;
+ u32 qfprom_blow_timer_value;
+ u32 qfprom_blow_set_freq;
+};
+
+/**
+ * struct qfprom_priv - structure holding qfprom attributes
+ *
+ * @qfpraw: iomapped memory space for qfprom-efuse raw address space.
+ * @qfpconf: iomapped memory space for qfprom-efuse configuration address
+ * space.
+ * @qfpcorrected: iomapped memory space for qfprom corrected address space.
+ * @qfpsecurity: iomapped memory space for qfprom security control space.
+ * @dev: qfprom device structure.
+ * @secclk: Clock supply.
+ * @vcc: Regulator supply.
+ * @soc_data: Data that for things that varies from SoC to SoC.
+ */
struct qfprom_priv {
- void __iomem *base;
+ void __iomem *qfpraw;
+ void __iomem *qfpconf;
+ void __iomem *qfpcorrected;
+ void __iomem *qfpsecurity;
+ struct device *dev;
+ struct clk *secclk;
+ struct regulator *vcc;
+ const struct qfprom_soc_data *soc_data;
+};
+
+/**
+ * struct qfprom_touched_values - saved values to restore after blowing
+ *
+ * @clk_rate: The rate the clock was at before blowing.
+ * @accel_val: The value of the accel reg before blowing.
+ * @timer_val: The value of the timer before blowing.
+ */
+struct qfprom_touched_values {
+ unsigned long clk_rate;
+ u32 accel_val;
+ u32 timer_val;
};
+/**
+ * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
+ * @priv: Our driver data.
+ * @old: The data that was stashed from before fuse blowing.
+ *
+ * Resets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors but doesn't return an error code
+ * since there's not much we can do upon failure.
+ */
+static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
+ const struct qfprom_touched_values *old)
+{
+ int ret;
+
+ ret = regulator_disable(priv->vcc);
+ if (ret)
+ dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
+
+ ret = clk_set_rate(priv->secclk, old->clk_rate);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to set clock rate for disable (ignoring)\n");
+
+ clk_disable_unprepare(priv->secclk);
+
+ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
+}
+
+/**
+ * qfprom_enable_fuse_blowing() - Enable fuse blowing.
+ * @priv: Our driver data.
+ * @old: We'll stash stuff here to use when disabling.
+ *
+ * Sets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors so caller doesn't need to.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
+ struct qfprom_touched_values *old)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->secclk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ old->clk_rate = clk_get_rate(priv->secclk);
+ ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set clock rate for enable\n");
+ goto err_clk_prepared;
+ }
+
+ ret = regulator_enable(priv->vcc);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable regulator\n");
+ goto err_clk_rate_set;
+ }
+
+ old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET);
+ writel(priv->soc_data->qfprom_blow_timer_value,
+ priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(priv->soc_data->accel_value,
+ priv->qfpconf + QFPROM_ACCEL_OFFSET);
+
+ return 0;
+
+err_clk_rate_set:
+ clk_set_rate(priv->secclk, old->clk_rate);
+err_clk_prepared:
+ clk_disable_unprepare(priv->secclk);
+ return ret;
+}
+
+/**
+ * qfprom_efuse_reg_write() - Write to fuses.
+ * @context: Our driver data.
+ * @reg: The offset to write at.
+ * @_val: Pointer to data to write.
+ * @bytes: The number of bytes to write.
+ *
+ * Writes to fuses. WARNING: THIS IS PERMANENT.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_reg_write(void *context, unsigned int reg, void *_val,
+ size_t bytes)
+{
+ struct qfprom_priv *priv = context;
+ struct qfprom_touched_values old;
+ int words = bytes / 4;
+ u32 *value = _val;
+ u32 blow_status;
+ int ret;
+ int i;
+
+ dev_dbg(priv->dev,
+ "Writing to raw qfprom region : %#010x of size: %zu\n",
+ reg, bytes);
+
+ /*
+ * The hardware only allows us to write word at a time, but we can
+ * read byte at a time. Until the nvmem framework allows a separate
+ * word_size and stride for reading vs. writing, we'll enforce here.
+ */
+ if (bytes % 4) {
+ dev_err(priv->dev,
+ "%zu is not an integral number of words\n", bytes);
+ return -EINVAL;
+ }
+ if (reg % 4) {
+ dev_err(priv->dev,
+ "Invalid offset: %#x. Must be word aligned\n", reg);
+ return -EINVAL;
+ }
+
+ ret = qfprom_enable_fuse_blowing(priv, &old);
+ if (ret)
+ return ret;
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(priv->dev,
+ "Timeout waiting for initial ready; aborting.\n");
+ goto exit_enabled_fuse_blowing;
+ }
+
+ for (i = 0; i < words; i++)
+ writel(value[i], priv->qfpraw + reg + (i * 4));
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ /* Give an error, but not much we can do in this case */
+ if (ret)
+ dev_err(priv->dev, "Timeout waiting for finish.\n");
+
+exit_enabled_fuse_blowing:
+ qfprom_disable_fuse_blowing(priv, &old);
+
+ return ret;
+}
+
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
+ void __iomem *base = priv->qfpcorrected;
+
+ if (read_raw_data && priv->qfpraw)
+ base = priv->qfpraw;
while (words--)
- *val++ = readb(priv->base + reg + i++);
+ *val++ = readb(base + reg + i++);
return 0;
}
-static struct nvmem_config econfig = {
- .name = "qfprom",
- .stride = 1,
- .word_size = 1,
- .reg_read = qfprom_reg_read,
+static const struct qfprom_soc_data qfprom_7_8_data = {
+ .accel_value = 0xD10,
+ .qfprom_blow_timer_value = 25,
+ .qfprom_blow_set_freq = 4800000,
};
static int qfprom_probe(struct platform_device *pdev)
{
+ struct nvmem_config econfig = {
+ .name = "qfprom",
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+ .reg_read = qfprom_reg_read,
+ };
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct qfprom_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ /* The corrected section is always provided */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ priv->qfpcorrected = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpcorrected))
+ return PTR_ERR(priv->qfpcorrected);
econfig.size = resource_size(res);
econfig.dev = dev;
econfig.priv = priv;
+ priv->dev = dev;
+
+ /*
+ * If more than one region is provided then the OS has the ability
+ * to write.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ u32 version;
+ int major_version, minor_version;
+
+ priv->qfpraw = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpraw))
+ return PTR_ERR(priv->qfpraw);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->qfpconf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpconf))
+ return PTR_ERR(priv->qfpconf);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ priv->qfpsecurity = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpsecurity))
+ return PTR_ERR(priv->qfpsecurity);
+
+ version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET);
+ major_version = (version & QFPROM_MAJOR_VERSION_MASK) >>
+ QFPROM_MAJOR_VERSION_SHIFT;
+ minor_version = (version & QFPROM_MINOR_VERSION_MASK) >>
+ QFPROM_MINOR_VERSION_SHIFT;
+
+ if (major_version == 7 && minor_version == 8)
+ priv->soc_data = &qfprom_7_8_data;
+
+ priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(priv->vcc))
+ return PTR_ERR(priv->vcc);
+
+ priv->secclk = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->secclk)) {
+ ret = PTR_ERR(priv->secclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %d\n", ret);
+ return ret;
+ }
+
+ /* Only enable writing if we have SoC data. */
+ if (priv->soc_data)
+ econfig.reg_write = qfprom_reg_write;
+ }
+
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index ab5e7e0bc3d8..c825fc902d10 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -4,12 +4,14 @@
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/nvmem-provider.h>
/* PMIC global registers definition */
#define SC27XX_MODULE_EN 0xc08
+#define SC2730_MODULE_EN 0x1808
#define SC27XX_EFUSE_EN BIT(6)
/* Efuse controller registers definition */
@@ -49,12 +51,29 @@
#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
#define SC27XX_EFUSE_POLL_DELAY_US 10000
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address , we should save address in the device data structure.
+ */
+struct sc27xx_efuse_variant_data {
+ u32 module_en;
+};
+
struct sc27xx_efuse {
struct device *dev;
struct regmap *regmap;
struct hwspinlock *hwlock;
struct mutex mutex;
u32 base;
+ const struct sc27xx_efuse_variant_data *var_data;
+};
+
+static const struct sc27xx_efuse_variant_data sc2731_edata = {
+ .module_en = SC27XX_MODULE_EN,
+};
+
+static const struct sc27xx_efuse_variant_data sc2730_edata = {
+ .module_en = SC2730_MODULE_EN,
};
/*
@@ -119,7 +138,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
return ret;
/* Enable the efuse controller. */
- ret = regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN,
+ ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en,
SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
if (ret)
goto unlock_efuse;
@@ -169,7 +188,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
disable_efuse:
/* Disable the efuse controller after reading. */
- regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, SC27XX_EFUSE_EN, 0);
+ regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0);
unlock_efuse:
sc27xx_efuse_unlock(efuse);
@@ -219,6 +238,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
+ efuse->var_data = of_device_get_match_data(&pdev->dev);
econfig.stride = 1;
econfig.word_size = 1;
@@ -238,7 +258,8 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
}
static const struct of_device_id sc27xx_efuse_of_match[] = {
- { .compatible = "sprd,sc2731-efuse" },
+ { .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata},
+ { .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata},
{ }
};
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 925feb21d5ad..59523245db8a 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -378,8 +378,8 @@ static int sprd_efuse_probe(struct platform_device *pdev)
return -ENOMEM;
efuse->base = devm_platform_ioremap_resource(pdev, 0);
- if (!efuse->base)
- return -ENOMEM;
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
ret = of_hwspin_lock_get_id(np, 0);
if (ret < 0) {
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b3ed94b98d9b..de9362c25c07 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -70,5 +70,6 @@ source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
source "drivers/phy/intel/Kconfig"
+source "drivers/phy/xilinx/Kconfig"
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 310c149a9df5..c27408e4daae 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -8,24 +8,25 @@ obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
-obj-$(CONFIG_ARCH_SUNXI) += allwinner/
-obj-$(CONFIG_ARCH_MESON) += amlogic/
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
-obj-$(CONFIG_ARCH_RENESAS) += renesas/
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-y += broadcom/ \
+obj-y += allwinner/ \
+ amlogic/ \
+ broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
intel/ \
lantiq/ \
marvell/ \
+ mediatek/ \
motorola/ \
mscc/ \
qualcomm/ \
ralink/ \
+ renesas/ \
+ rockchip/ \
samsung/ \
socionext/ \
st/ \
- ti/
+ tegra/ \
+ ti/ \
+ xilinx/
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index e760d89d3976..fb584518b2d0 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -22,7 +22,7 @@ config PHY_SUN4I_USB
config PHY_SUN6I_MIPI_DPHY
tristate "Allwinner A31 MIPI D-PHY Support"
depends on ARCH_SUNXI || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && COMMON_CLK
depends on RESET_CONTROLLER
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index e5842e48a5e0..651d5e2a25ce 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -7,7 +7,7 @@
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*
- * Modelled after: Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Modelled after: Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
*/
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
index 79c8af5c7c1d..1fa761ba6cbb 100644
--- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
+++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
@@ -233,7 +233,7 @@ static int sun6i_dphy_exit(struct phy *phy)
}
-static struct phy_ops sun6i_dphy_ops = {
+static const struct phy_ops sun6i_dphy_ops = {
.configure = sun6i_dphy_configure,
.power_on = sun6i_dphy_power_on,
.power_off = sun6i_dphy_power_off,
@@ -241,7 +241,7 @@ static struct phy_ops sun6i_dphy_ops = {
.exit = sun6i_dphy_exit,
};
-static struct regmap_config sun6i_dphy_regmap_config = {
+static const struct regmap_config sun6i_dphy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index b29f11c19155..a1f1a9c90d0d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -2,6 +2,14 @@
#
# Phy drivers for Broadcom platforms
#
+config PHY_BCM63XX_USBH
+ tristate "BCM63xx USBH PHY driver"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ select GENERIC_PHY
+ help
+ Enable this to support the BCM63xx USBH PHY driver.
+ If unsure, say N.
+
config PHY_CYGNUS_PCIE
tristate "Broadcom Cygnus PCIe PHY driver"
depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST)
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index c78de546135c..7024127f86ad 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_BCM63XX_USBH) += phy-bcm63xx-usbh.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
new file mode 100644
index 000000000000..6c05ba8b08be
--- /dev/null
+++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM6328 USBH PHY Controller Driver
+ *
+ * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2015 Simon Arlott
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/kernel/linux/arch/mips/bcm963xx/setup.c:
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * Derived from OpenWrt patches:
+ * Copyright (C) 2013 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright (C) 2013 Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* USBH control register offsets */
+enum usbh_regs {
+ USBH_BRT_CONTROL1 = 0,
+ USBH_BRT_CONTROL2,
+ USBH_BRT_STATUS1,
+ USBH_BRT_STATUS2,
+ USBH_UTMI_CONTROL1,
+#define USBH_UC1_DEV_MODE_SEL BIT(0)
+ USBH_TEST_PORT_CONTROL,
+ USBH_PLL_CONTROL1,
+#define USBH_PLLC_REFCLKSEL_SHIFT 0
+#define USBH_PLLC_REFCLKSEL_MASK (0x3 << USBH_PLLC_REFCLKSEL_SHIFT)
+#define USBH_PLLC_CLKSEL_SHIFT 2
+#define USBH_PLLC_CLKSEL_MASK (0x3 << USBH_PLLC_CLKSEL_MASK)
+#define USBH_PLLC_XTAL_PWRDWNB BIT(4)
+#define USBH_PLLC_PLL_PWRDWNB BIT(5)
+#define USBH_PLLC_PLL_CALEN BIT(6)
+#define USBH_PLLC_PHYPLL_BYP BIT(7)
+#define USBH_PLLC_PLL_RESET BIT(8)
+#define USBH_PLLC_PLL_IDDQ_PWRDN BIT(9)
+#define USBH_PLLC_PLL_PWRDN_DELAY BIT(10)
+#define USBH_6318_PLLC_PLL_SUSPEND_EN BIT(27)
+#define USBH_6318_PLLC_PHYPLL_BYP BIT(29)
+#define USBH_6318_PLLC_PLL_RESET BIT(30)
+#define USBH_6318_PLLC_PLL_IDDQ_PWRDN BIT(31)
+ USBH_SWAP_CONTROL,
+#define USBH_SC_OHCI_DATA_SWAP BIT(0)
+#define USBH_SC_OHCI_ENDIAN_SWAP BIT(1)
+#define USBH_SC_OHCI_LOGICAL_ADDR_EN BIT(2)
+#define USBH_SC_EHCI_DATA_SWAP BIT(3)
+#define USBH_SC_EHCI_ENDIAN_SWAP BIT(4)
+#define USBH_SC_EHCI_LOGICAL_ADDR_EN BIT(5)
+#define USBH_SC_USB_DEVICE_SEL BIT(6)
+ USBH_GENERIC_CONTROL,
+#define USBH_GC_PLL_SUSPEND_EN BIT(1)
+ USBH_FRAME_ADJUST_VALUE,
+ USBH_SETUP,
+#define USBH_S_IOC BIT(4)
+#define USBH_S_IPP BIT(5)
+ USBH_MDIO,
+ USBH_MDIO32,
+ USBH_USB_SIM_CONTROL,
+#define USBH_USC_LADDR_SEL BIT(5)
+
+ __USBH_ENUM_SIZE
+};
+
+struct bcm63xx_usbh_phy_variant {
+ /* Registers */
+ long regs[__USBH_ENUM_SIZE];
+
+ /* PLLC bits to set/clear for power on */
+ u32 power_pllc_clr;
+ u32 power_pllc_set;
+
+ /* Setup bits to set/clear for power on */
+ u32 setup_clr;
+ u32 setup_set;
+
+ /* Swap Control bits to set */
+ u32 swapctl_dev_set;
+
+ /* Test Port Control value to set if non-zero */
+ u32 tpc_val;
+
+ /* USB Sim Control bits to set */
+ u32 usc_set;
+
+ /* UTMI Control 1 bits to set */
+ u32 utmictl1_dev_set;
+};
+
+struct bcm63xx_usbh_phy {
+ void __iomem *base;
+ struct clk *usbh_clk;
+ struct clk *usb_ref_clk;
+ struct reset_control *reset;
+ const struct bcm63xx_usbh_phy_variant *variant;
+ bool device_mode;
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6318 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = -1,
+ [USBH_BRT_CONTROL2] = -1,
+ [USBH_BRT_STATUS1] = -1,
+ [USBH_BRT_STATUS2] = -1,
+ [USBH_UTMI_CONTROL1] = 0x2c,
+ [USBH_TEST_PORT_CONTROL] = 0x1c,
+ [USBH_PLL_CONTROL1] = 0x04,
+ [USBH_SWAP_CONTROL] = 0x0c,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = 0x08,
+ [USBH_SETUP] = 0x00,
+ [USBH_MDIO] = 0x14,
+ [USBH_MDIO32] = 0x18,
+ [USBH_USB_SIM_CONTROL] = 0x20,
+ },
+ .power_pllc_clr = USBH_6318_PLLC_PLL_IDDQ_PWRDN,
+ .power_pllc_set = USBH_6318_PLLC_PLL_SUSPEND_EN,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .usc_set = USBH_USC_LADDR_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6328 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = 0x20,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6358 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = -1,
+ [USBH_BRT_CONTROL2] = -1,
+ [USBH_BRT_STATUS1] = -1,
+ [USBH_BRT_STATUS2] = -1,
+ [USBH_UTMI_CONTROL1] = -1,
+ [USBH_TEST_PORT_CONTROL] = 0x24,
+ [USBH_PLL_CONTROL1] = -1,
+ [USBH_SWAP_CONTROL] = 0x00,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = -1,
+ [USBH_SETUP] = -1,
+ [USBH_MDIO] = -1,
+ [USBH_MDIO32] = -1,
+ [USBH_USB_SIM_CONTROL] = -1,
+ },
+ /*
+ * The magic value comes for the original vendor BSP
+ * and is needed for USB to work. Datasheet does not
+ * help, so the magic value is used as-is.
+ */
+ .tpc_val = 0x1c0020,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6368 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .power_pllc_clr = USBH_PLLC_PLL_IDDQ_PWRDN | USBH_PLLC_PLL_PWRDN_DELAY,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm63268 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = 0x20,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .power_pllc_clr = USBH_PLLC_PLL_IDDQ_PWRDN | USBH_PLLC_PLL_PWRDN_DELAY,
+ .setup_clr = USBH_S_IPP,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static inline bool usbh_has_reg(struct bcm63xx_usbh_phy *usbh, int reg)
+{
+ return (usbh->variant->regs[reg] >= 0);
+}
+
+static inline u32 usbh_readl(struct bcm63xx_usbh_phy *usbh, int reg)
+{
+ return __raw_readl(usbh->base + usbh->variant->regs[reg]);
+}
+
+static inline void usbh_writel(struct bcm63xx_usbh_phy *usbh, int reg,
+ u32 value)
+{
+ __raw_writel(value, usbh->base + usbh->variant->regs[reg]);
+}
+
+static int bcm63xx_usbh_phy_init(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(usbh->usbh_clk);
+ if (ret) {
+ dev_err(&phy->dev, "unable to enable usbh clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(usbh->usb_ref_clk);
+ if (ret) {
+ dev_err(&phy->dev, "unable to enable usb_ref clock: %d\n", ret);
+ clk_disable_unprepare(usbh->usbh_clk);
+ return ret;
+ }
+
+ ret = reset_control_reset(usbh->reset);
+ if (ret) {
+ dev_err(&phy->dev, "unable to reset device: %d\n", ret);
+ clk_disable_unprepare(usbh->usb_ref_clk);
+ clk_disable_unprepare(usbh->usbh_clk);
+ return ret;
+ }
+
+ /* Configure to work in native CPU endian */
+ if (usbh_has_reg(usbh, USBH_SWAP_CONTROL)) {
+ u32 val = usbh_readl(usbh, USBH_SWAP_CONTROL);
+
+ val |= USBH_SC_EHCI_DATA_SWAP;
+ val &= ~USBH_SC_EHCI_ENDIAN_SWAP;
+
+ val |= USBH_SC_OHCI_DATA_SWAP;
+ val &= ~USBH_SC_OHCI_ENDIAN_SWAP;
+
+ if (usbh->device_mode && usbh->variant->swapctl_dev_set)
+ val |= usbh->variant->swapctl_dev_set;
+
+ usbh_writel(usbh, USBH_SWAP_CONTROL, val);
+ }
+
+ if (usbh_has_reg(usbh, USBH_SETUP)) {
+ u32 val = usbh_readl(usbh, USBH_SETUP);
+
+ val |= usbh->variant->setup_set;
+ val &= ~usbh->variant->setup_clr;
+
+ usbh_writel(usbh, USBH_SETUP, val);
+ }
+
+ if (usbh_has_reg(usbh, USBH_USB_SIM_CONTROL)) {
+ u32 val = usbh_readl(usbh, USBH_USB_SIM_CONTROL);
+
+ val |= usbh->variant->usc_set;
+
+ usbh_writel(usbh, USBH_USB_SIM_CONTROL, val);
+ }
+
+ if (usbh->variant->tpc_val &&
+ usbh_has_reg(usbh, USBH_TEST_PORT_CONTROL))
+ usbh_writel(usbh, USBH_TEST_PORT_CONTROL,
+ usbh->variant->tpc_val);
+
+ if (usbh->device_mode &&
+ usbh_has_reg(usbh, USBH_UTMI_CONTROL1) &&
+ usbh->variant->utmictl1_dev_set) {
+ u32 val = usbh_readl(usbh, USBH_UTMI_CONTROL1);
+
+ val |= usbh->variant->utmictl1_dev_set;
+
+ usbh_writel(usbh, USBH_UTMI_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_power_on(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ if (usbh_has_reg(usbh, USBH_PLL_CONTROL1)) {
+ u32 val = usbh_readl(usbh, USBH_PLL_CONTROL1);
+
+ val |= usbh->variant->power_pllc_set;
+ val &= ~usbh->variant->power_pllc_clr;
+
+ usbh_writel(usbh, USBH_PLL_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_power_off(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ if (usbh_has_reg(usbh, USBH_PLL_CONTROL1)) {
+ u32 val = usbh_readl(usbh, USBH_PLL_CONTROL1);
+
+ val &= ~usbh->variant->power_pllc_set;
+ val |= usbh->variant->power_pllc_clr;
+
+ usbh_writel(usbh, USBH_PLL_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_exit(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(usbh->usbh_clk);
+ clk_disable_unprepare(usbh->usb_ref_clk);
+
+ return 0;
+}
+
+static const struct phy_ops bcm63xx_usbh_phy_ops = {
+ .exit = bcm63xx_usbh_phy_exit,
+ .init = bcm63xx_usbh_phy_init,
+ .power_off = bcm63xx_usbh_phy_power_off,
+ .power_on = bcm63xx_usbh_phy_power_on,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *bcm63xx_usbh_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct bcm63xx_usbh_phy *usbh = dev_get_drvdata(dev);
+
+ usbh->device_mode = !!args->args[0];
+
+ return of_phy_simple_xlate(dev, args);
+}
+
+static int __init bcm63xx_usbh_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_usbh_phy *usbh;
+ const struct bcm63xx_usbh_phy_variant *variant;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+
+ usbh = devm_kzalloc(dev, sizeof(*usbh), GFP_KERNEL);
+ if (!usbh)
+ return -ENOMEM;
+
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+ usbh->variant = variant;
+
+ usbh->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usbh->base))
+ return PTR_ERR(usbh->base);
+
+ usbh->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(usbh->reset)) {
+ if (PTR_ERR(usbh->reset) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(usbh->reset);
+ }
+
+ usbh->usbh_clk = devm_clk_get_optional(dev, "usbh");
+ if (IS_ERR(usbh->usbh_clk))
+ return PTR_ERR(usbh->usbh_clk);
+
+ usbh->usb_ref_clk = devm_clk_get_optional(dev, "usb_ref");
+ if (IS_ERR(usbh->usb_ref_clk))
+ return PTR_ERR(usbh->usb_ref_clk);
+
+ phy = devm_phy_create(dev, NULL, &bcm63xx_usbh_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ platform_set_drvdata(pdev, usbh);
+ phy_set_drvdata(phy, usbh);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ bcm63xx_usbh_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ dev_dbg(dev, "Registered BCM63xx USB PHY driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id bcm63xx_usbh_phy_ids[] __initconst = {
+ { .compatible = "brcm,bcm6318-usbh-phy", .data = &usbh_bcm6318 },
+ { .compatible = "brcm,bcm6328-usbh-phy", .data = &usbh_bcm6328 },
+ { .compatible = "brcm,bcm6358-usbh-phy", .data = &usbh_bcm6358 },
+ { .compatible = "brcm,bcm6362-usbh-phy", .data = &usbh_bcm6368 },
+ { .compatible = "brcm,bcm6368-usbh-phy", .data = &usbh_bcm6368 },
+ { .compatible = "brcm,bcm63268-usbh-phy", .data = &usbh_bcm63268 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_usbh_phy_ids);
+
+static struct platform_driver bcm63xx_usbh_phy_driver __refdata = {
+ .driver = {
+ .name = "bcm63xx-usbh-phy",
+ .of_match_table = bcm63xx_usbh_phy_ids,
+ },
+ .probe = bcm63xx_usbh_phy_probe,
+};
+module_platform_driver(bcm63xx_usbh_phy_driver);
+
+MODULE_DESCRIPTION("BCM63xx USBH PHY driver");
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_AUTHOR("Simon Arlott");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index 1ecbb964cd21..016514e4aa54 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -88,7 +88,7 @@
#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
/* TB_ADDR_TX_RCVDETSC_CTRL */
-#define RXDET_IN_P3_32KHZ BIT(1)
+#define RXDET_IN_P3_32KHZ BIT(0)
struct cdns_reg_pairs {
u16 val;
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 6960dfd8ad8c..0fe408964334 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -41,6 +41,7 @@ struct a38x_comphy_lane {
struct a38x_comphy {
void __iomem *base;
+ void __iomem *conf;
struct device *dev;
struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
};
@@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
{ 0, 0, 3 },
};
+static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable)
+{
+ struct a38x_comphy *priv = lane->priv;
+ u32 conf;
+
+ if (priv->conf) {
+ conf = readl_relaxed(priv->conf);
+ if (enable)
+ conf |= BIT(lane->port);
+ else
+ conf &= ~BIT(lane->port);
+ writel(conf, priv->conf);
+ }
+}
+
static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane,
unsigned int offset, u32 mask, u32 value)
{
@@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
{
struct a38x_comphy_lane *lane = phy_get_drvdata(phy);
unsigned int gen;
+ int ret;
if (mode != PHY_MODE_ETHERNET)
return -EINVAL;
@@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
return -EINVAL;
}
+ a38x_set_conf(lane, false);
+
a38x_comphy_set_speed(lane, gen, gen);
- return a38x_comphy_poll(lane, COMPHY_STAT1,
- COMPHY_STAT1_PLL_RDY_TX |
- COMPHY_STAT1_PLL_RDY_RX,
- COMPHY_STAT1_PLL_RDY_TX |
- COMPHY_STAT1_PLL_RDY_RX);
+ ret = a38x_comphy_poll(lane, COMPHY_STAT1,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX);
+
+ if (ret == 0)
+ a38x_set_conf(lane, true);
+
+ return ret;
}
static const struct phy_ops a38x_comphy_ops = {
@@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->dev = &pdev->dev;
priv->base = base;
+ /* Optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf");
+ if (res) {
+ priv->conf = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->conf))
+ return PTR_ERR(priv->conf);
+ }
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct phy *phy;
int ret;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index 23bc3bf5c4c0..8834436bc9db 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -72,7 +72,7 @@ struct mvebu_a3700_utmi_caps {
* struct mvebu_a3700_utmi - PHY driver data
*
* @regs: PHY registers
- * @usb_mis: Regmap with USB miscellaneous registers including PHY ones
+ * @usb_misc: Regmap with USB miscellaneous registers including PHY ones
* @caps: PHY capabilities
* @phy: PHY handle
*/
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 94a34cf75eb3..5172971f4c36 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -178,6 +178,7 @@ static const struct phy_ops gpio_usb_ops = {
/**
* phy_mdm6600_cmd() - send a command request to mdm6600
* @ddata: device driver data
+ * @val: value of cmd to be set
*
* Configures the three command request GPIOs to the specified value.
*/
@@ -194,7 +195,7 @@ static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
/**
* phy_mdm6600_status() - read mdm6600 status lines
- * @ddata: device driver data
+ * @work: work structure
*/
static void phy_mdm6600_status(struct work_struct *work)
{
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a27b8d578d7f..71cb10826326 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -1062,6 +1062,7 @@ EXPORT_SYMBOL_GPL(__of_phy_provider_register);
* __devm_of_phy_provider_register() - create/register phy provider with the
* framework
* @dev: struct device of the phy provider
+ * @children: device node containing children (if different from dev->of_node)
* @owner: the module owner containing of_xlate
* @of_xlate: function pointer to obtain phy instance from phy provider
*
@@ -1117,12 +1118,14 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
/**
* devm_of_phy_provider_unregister() - remove phy provider from the framework
* @dev: struct device of the phy provider
+ * @phy_provider: phy provider returned by of_phy_provider_register()
*
* destroys the devres associated with this phy provider and invokes
* of_phy_provider_unregister to unregister the phy provider.
*/
void devm_of_phy_provider_unregister(struct device *dev,
- struct phy_provider *phy_provider) {
+ struct phy_provider *phy_provider)
+{
int r;
r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 7a33ec12f71b..b88922e7de1d 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1615,7 +1615,7 @@ static struct phy *xgene_phy_xlate(struct device *dev,
if (args->args_count <= 0)
return ERR_PTR(-EINVAL);
- if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX)
+ if (args->args[0] >= MODE_MAX)
return ERR_PTR(-EINVAL);
ctx->mode = args->args[0];
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index ca9ce7e84a5c..928db510b86c 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -59,30 +59,6 @@ config PHY_QCOM_QUSB2
PHY which is usually paired with either the ChipIdea or Synopsys DWC3
USB IPs on MSM SOCs.
-config PHY_QCOM_UFS
- tristate "Qualcomm UFS PHY driver"
- depends on OF && ARCH_QCOM
- select GENERIC_PHY
- help
- Support for UFS PHY on QCOM chipsets.
-
-if PHY_QCOM_UFS
-
-config PHY_QCOM_UFS_14NM
- tristate
- default PHY_QCOM_UFS
- help
- Support for 14nm UFS QMP phy present on QCOM chipsets.
-
-config PHY_QCOM_UFS_20NM
- tristate
- default PHY_QCOM_UFS
- depends on BROKEN
- help
- Support for 20nm UFS QMP phy present on QCOM chipsets.
-
-endif
-
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
@@ -128,3 +104,13 @@ config PHY_QCOM_USB_SS
help
Enable this to support the Super-Speed USB transceiver on various
Qualcomm chipsets.
+
+config PHY_QCOM_IPQ806X_USB
+ tristate "Qualcomm IPQ806x DWC3 USB PHY driver"
+ depends on HAS_IOMEM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for the Synopsis PHYs present inside the
+ Qualcomm USB3.0 DWC3 controller on ipq806x SoC. This driver supports
+ both HS and SS PHY controllers.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 86fb32efab79..47acbd7daa3a 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -6,11 +6,9 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
obj-$(CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2)+= phy-qcom-snps-femto-v2.o
+obj-$(CONFIG_PHY_QCOM_IPQ806X_USB) += phy-qcom-ipq806x-usb.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
new file mode 100644
index 000000000000..71f257b4a7f5
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/* USB QSCRATCH Hardware registers */
+#define QSCRATCH_GENERAL_CFG (0x08)
+#define HSUSB_PHY_CTRL_REG (0x10)
+
+/* PHY_CTRL_REG */
+#define HSUSB_CTRL_DMSEHV_CLAMP BIT(24)
+#define HSUSB_CTRL_USB2_SUSPEND BIT(23)
+#define HSUSB_CTRL_UTMI_CLK_EN BIT(21)
+#define HSUSB_CTRL_UTMI_OTG_VBUS_VALID BIT(20)
+#define HSUSB_CTRL_USE_CLKCORE BIT(18)
+#define HSUSB_CTRL_DPSEHV_CLAMP BIT(17)
+#define HSUSB_CTRL_COMMONONN BIT(11)
+#define HSUSB_CTRL_ID_HV_CLAMP BIT(9)
+#define HSUSB_CTRL_OTGSESSVLD_CLAMP BIT(8)
+#define HSUSB_CTRL_CLAMP_EN BIT(7)
+#define HSUSB_CTRL_RETENABLEN BIT(1)
+#define HSUSB_CTRL_POR BIT(0)
+
+/* QSCRATCH_GENERAL_CFG */
+#define HSUSB_GCFG_XHCI_REV BIT(2)
+
+/* USB QSCRATCH Hardware registers */
+#define SSUSB_PHY_CTRL_REG (0x00)
+#define SSUSB_PHY_PARAM_CTRL_1 (0x04)
+#define SSUSB_PHY_PARAM_CTRL_2 (0x08)
+#define CR_PROTOCOL_DATA_IN_REG (0x0c)
+#define CR_PROTOCOL_DATA_OUT_REG (0x10)
+#define CR_PROTOCOL_CAP_ADDR_REG (0x14)
+#define CR_PROTOCOL_CAP_DATA_REG (0x18)
+#define CR_PROTOCOL_READ_REG (0x1c)
+#define CR_PROTOCOL_WRITE_REG (0x20)
+
+/* PHY_CTRL_REG */
+#define SSUSB_CTRL_REF_USE_PAD BIT(28)
+#define SSUSB_CTRL_TEST_POWERDOWN BIT(27)
+#define SSUSB_CTRL_LANE0_PWR_PRESENT BIT(24)
+#define SSUSB_CTRL_SS_PHY_EN BIT(8)
+#define SSUSB_CTRL_SS_PHY_RESET BIT(7)
+
+/* SSPHY control registers - Does this need 0x30? */
+#define SSPHY_CTRL_RX_OVRD_IN_HI(lane) (0x1006 + 0x100 * (lane))
+#define SSPHY_CTRL_TX_OVRD_DRV_LO(lane) (0x1002 + 0x100 * (lane))
+
+/* SSPHY SoC version specific values */
+#define SSPHY_RX_EQ_VALUE 4 /* Override value for rx_eq */
+/* Override value for transmit preemphasis */
+#define SSPHY_TX_DEEMPH_3_5DB 23
+/* Override value for mpll */
+#define SSPHY_MPLL_VALUE 0
+
+/* QSCRATCH PHY_PARAM_CTRL1 fields */
+#define PHY_PARAM_CTRL1_TX_FULL_SWING_MASK GENMASK(26, 19)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK GENMASK(19, 13)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK GENMASK(13, 7)
+#define PHY_PARAM_CTRL1_LOS_BIAS_MASK GENMASK(7, 2)
+
+#define PHY_PARAM_CTRL1_MASK \
+ (PHY_PARAM_CTRL1_TX_FULL_SWING_MASK | \
+ PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK | \
+ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK | \
+ PHY_PARAM_CTRL1_LOS_BIAS_MASK)
+
+#define PHY_PARAM_CTRL1_TX_FULL_SWING(x) \
+ (((x) << 20) & PHY_PARAM_CTRL1_TX_FULL_SWING_MASK)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB(x) \
+ (((x) << 14) & PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(x) \
+ (((x) << 8) & PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK)
+#define PHY_PARAM_CTRL1_LOS_BIAS(x) \
+ (((x) << 3) & PHY_PARAM_CTRL1_LOS_BIAS_MASK)
+
+/* RX OVRD IN HI bits */
+#define RX_OVRD_IN_HI_RX_RESET_OVRD BIT(13)
+#define RX_OVRD_IN_HI_RX_RX_RESET BIT(12)
+#define RX_OVRD_IN_HI_RX_EQ_OVRD BIT(11)
+#define RX_OVRD_IN_HI_RX_EQ_MASK GENMASK(10, 7)
+#define RX_OVRD_IN_HI_RX_EQ(x) ((x) << 8)
+#define RX_OVRD_IN_HI_RX_EQ_EN_OVRD BIT(7)
+#define RX_OVRD_IN_HI_RX_EQ_EN BIT(6)
+#define RX_OVRD_IN_HI_RX_LOS_FILTER_OVRD BIT(5)
+#define RX_OVRD_IN_HI_RX_LOS_FILTER_MASK GENMASK(4, 2)
+#define RX_OVRD_IN_HI_RX_RATE_OVRD BIT(2)
+#define RX_OVRD_IN_HI_RX_RATE_MASK GENMASK(2, 0)
+
+/* TX OVRD DRV LO register bits */
+#define TX_OVRD_DRV_LO_AMPLITUDE_MASK GENMASK(6, 0)
+#define TX_OVRD_DRV_LO_PREEMPH_MASK GENMASK(13, 6)
+#define TX_OVRD_DRV_LO_PREEMPH(x) ((x) << 7)
+#define TX_OVRD_DRV_LO_EN BIT(14)
+
+/* MPLL bits */
+#define SSPHY_MPLL_MASK GENMASK(8, 5)
+#define SSPHY_MPLL(x) ((x) << 5)
+
+/* SS CAP register bits */
+#define SS_CR_CAP_ADDR_REG BIT(0)
+#define SS_CR_CAP_DATA_REG BIT(0)
+#define SS_CR_READ_REG BIT(0)
+#define SS_CR_WRITE_REG BIT(0)
+
+struct usb_phy {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *xo_clk;
+ struct clk *ref_clk;
+ u32 rx_eq;
+ u32 tx_deamp_3_5db;
+ u32 mpll;
+};
+
+struct phy_drvdata {
+ struct phy_ops ops;
+ u32 clk_rate;
+};
+
+/**
+ * Write register and read back masked value to confirm it is written
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask specifying what should be updated
+ * @val - value to write.
+ */
+static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
+ u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val, tmp = readl(phy_dwc3->base + offset);
+
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | val;
+
+ writel(write_val, phy_dwc3->base + offset);
+
+ /* Read back to see if val was written */
+ tmp = readl(phy_dwc3->base + offset);
+ tmp &= mask; /* clear other bits */
+
+ if (tmp != val)
+ dev_err(phy_dwc3->dev, "write: %x to QSCRATCH: %x FAILED\n", val, offset);
+}
+
+static int wait_for_latch(void __iomem *addr)
+{
+ u32 retry = 10;
+
+ while (true) {
+ if (!readl(addr))
+ break;
+
+ if (--retry == 0)
+ return -ETIMEDOUT;
+
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+/**
+ * Write SSPHY register
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @addr - SSPHY address to write.
+ * @val - value to write.
+ */
+static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
+ u32 addr, u32 val)
+{
+ int ret;
+
+ writel(addr, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_ADDR_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+ if (ret)
+ goto err_wait;
+
+ writel(val, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_DATA_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG);
+ if (ret)
+ goto err_wait;
+
+ writel(SS_CR_WRITE_REG, phy_dwc3->base + CR_PROTOCOL_WRITE_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_WRITE_REG);
+
+err_wait:
+ if (ret)
+ dev_err(phy_dwc3->dev, "timeout waiting for latch\n");
+ return ret;
+}
+
+/**
+ * Read SSPHY register.
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @addr - SSPHY address to read.
+ */
+static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
+ u32 addr, u32 *val)
+{
+ int ret;
+
+ writel(addr, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_ADDR_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+ if (ret)
+ goto err_wait;
+
+ /*
+ * Due to hardware bug, first read of SSPHY register might be
+ * incorrect. Hence as workaround, SW should perform SSPHY register
+ * read twice, but use only second read and ignore first read.
+ */
+ writel(SS_CR_READ_REG, phy_dwc3->base + CR_PROTOCOL_READ_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_READ_REG);
+ if (ret)
+ goto err_wait;
+
+ /* throwaway read */
+ readl(phy_dwc3->base + CR_PROTOCOL_DATA_OUT_REG);
+
+ writel(SS_CR_READ_REG, phy_dwc3->base + CR_PROTOCOL_READ_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_READ_REG);
+ if (ret)
+ goto err_wait;
+
+ *val = readl(phy_dwc3->base + CR_PROTOCOL_DATA_OUT_REG);
+
+err_wait:
+ return ret;
+}
+
+static int qcom_ipq806x_usb_hs_phy_init(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+ int ret;
+ u32 val;
+
+ ret = clk_prepare_enable(phy_dwc3->xo_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy_dwc3->ref_clk);
+ if (ret) {
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+ return ret;
+ }
+
+ /*
+ * HSPHY Initialization: Enable UTMI clock, select 19.2MHz fsel
+ * enable clamping, and disable RETENTION (power-on default is ENABLED)
+ */
+ val = HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_DMSEHV_CLAMP |
+ HSUSB_CTRL_RETENABLEN | HSUSB_CTRL_COMMONONN |
+ HSUSB_CTRL_OTGSESSVLD_CLAMP | HSUSB_CTRL_ID_HV_CLAMP |
+ HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_UTMI_OTG_VBUS_VALID |
+ HSUSB_CTRL_UTMI_CLK_EN | HSUSB_CTRL_CLAMP_EN | 0x70;
+
+ /* use core clock if external reference is not present */
+ if (!phy_dwc3->xo_clk)
+ val |= HSUSB_CTRL_USE_CLKCORE;
+
+ writel(val, phy_dwc3->base + HSUSB_PHY_CTRL_REG);
+ usleep_range(2000, 2200);
+
+ /* Disable (bypass) VBUS and ID filters */
+ writel(HSUSB_GCFG_XHCI_REV, phy_dwc3->base + QSCRATCH_GENERAL_CFG);
+
+ return 0;
+}
+
+static int qcom_ipq806x_usb_hs_phy_exit(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(phy_dwc3->ref_clk);
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+
+ return 0;
+}
+
+static int qcom_ipq806x_usb_ss_phy_init(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+ int ret;
+ u32 data;
+
+ ret = clk_prepare_enable(phy_dwc3->xo_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy_dwc3->ref_clk);
+ if (ret) {
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+ return ret;
+ }
+
+ /* reset phy */
+ data = readl(phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+ writel(data | SSUSB_CTRL_SS_PHY_RESET,
+ phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+ usleep_range(2000, 2200);
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /* clear REF_PAD if we don't have XO clk */
+ if (!phy_dwc3->xo_clk)
+ data &= ~SSUSB_CTRL_REF_USE_PAD;
+ else
+ data |= SSUSB_CTRL_REF_USE_PAD;
+
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /* wait for ref clk to become stable, this can take up to 30ms */
+ msleep(30);
+
+ data |= SSUSB_CTRL_SS_PHY_EN | SSUSB_CTRL_LANE0_PWR_PRESENT;
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3, 0x102D, &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data |= (1 << 7);
+ ret = usb_ss_write_phycreg(phy_dwc3, 0x102D, data);
+ if (ret)
+ goto err_phy_trans;
+
+ ret = usb_ss_read_phycreg(phy_dwc3, 0x1010, &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~0xff0;
+ data |= 0x20;
+ ret = usb_ss_write_phycreg(phy_dwc3, 0x1010, data);
+ if (ret)
+ goto err_phy_trans;
+
+ /*
+ * Fix RX Equalization setting as follows
+ * LANE0.RX_OVRD_IN_HI. RX_EQ_EN set to 0
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_EN_OVRD set to 1
+ * LANE0.RX_OVRD_IN_HI.RX_EQ set based on SoC version
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_OVRD set to 1
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3, SSPHY_CTRL_RX_OVRD_IN_HI(0), &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~RX_OVRD_IN_HI_RX_EQ_EN;
+ data |= RX_OVRD_IN_HI_RX_EQ_EN_OVRD;
+ data &= ~RX_OVRD_IN_HI_RX_EQ_MASK;
+ data |= RX_OVRD_IN_HI_RX_EQ(phy_dwc3->rx_eq);
+ data |= RX_OVRD_IN_HI_RX_EQ_OVRD;
+ ret = usb_ss_write_phycreg(phy_dwc3,
+ SSPHY_CTRL_RX_OVRD_IN_HI(0), data);
+ if (ret)
+ goto err_phy_trans;
+
+ /*
+ * Set EQ and TX launch amplitudes as follows
+ * LANE0.TX_OVRD_DRV_LO.PREEMPH set based on SoC version
+ * LANE0.TX_OVRD_DRV_LO.AMPLITUDE set to 110
+ * LANE0.TX_OVRD_DRV_LO.EN set to 1.
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3,
+ SSPHY_CTRL_TX_OVRD_DRV_LO(0), &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~TX_OVRD_DRV_LO_PREEMPH_MASK;
+ data |= TX_OVRD_DRV_LO_PREEMPH(phy_dwc3->tx_deamp_3_5db);
+ data &= ~TX_OVRD_DRV_LO_AMPLITUDE_MASK;
+ data |= 0x6E;
+ data |= TX_OVRD_DRV_LO_EN;
+ ret = usb_ss_write_phycreg(phy_dwc3,
+ SSPHY_CTRL_TX_OVRD_DRV_LO(0), data);
+ if (ret)
+ goto err_phy_trans;
+
+ data = 0;
+ data &= ~SSPHY_MPLL_MASK;
+ data |= SSPHY_MPLL(phy_dwc3->mpll);
+ usb_ss_write_phycreg(phy_dwc3, 0x30, data);
+
+ /*
+ * Set the QSCRATCH PHY_PARAM_CTRL1 parameters as follows
+ * TX_FULL_SWING [26:20] amplitude to 110
+ * TX_DEEMPH_6DB [19:14] to 32
+ * TX_DEEMPH_3_5DB [13:8] set based on SoC version
+ * LOS_BIAS [7:3] to 9
+ */
+ data = readl(phy_dwc3->base + SSUSB_PHY_PARAM_CTRL_1);
+
+ data &= ~PHY_PARAM_CTRL1_MASK;
+
+ data |= PHY_PARAM_CTRL1_TX_FULL_SWING(0x6e) |
+ PHY_PARAM_CTRL1_TX_DEEMPH_6DB(0x20) |
+ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(phy_dwc3->tx_deamp_3_5db) |
+ PHY_PARAM_CTRL1_LOS_BIAS(0x9);
+
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_PARAM_CTRL_1,
+ PHY_PARAM_CTRL1_MASK, data);
+
+err_phy_trans:
+ return ret;
+}
+
+static int qcom_ipq806x_usb_ss_phy_exit(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+
+ /* Sequence to put SSPHY in low power state:
+ * 1. Clear REF_PHY_EN in PHY_CTRL_REG
+ * 2. Clear REF_USE_PAD in PHY_CTRL_REG
+ * 3. Set TEST_POWERED_DOWN in PHY_CTRL_REG to enable PHY retention
+ */
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_SS_PHY_EN, 0x0);
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_REF_USE_PAD, 0x0);
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_TEST_POWERDOWN, 0x0);
+
+ clk_disable_unprepare(phy_dwc3->ref_clk);
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+
+ return 0;
+}
+
+static const struct phy_drvdata qcom_ipq806x_usb_hs_drvdata = {
+ .ops = {
+ .init = qcom_ipq806x_usb_hs_phy_init,
+ .exit = qcom_ipq806x_usb_hs_phy_exit,
+ .owner = THIS_MODULE,
+ },
+ .clk_rate = 60000000,
+};
+
+static const struct phy_drvdata qcom_ipq806x_usb_ss_drvdata = {
+ .ops = {
+ .init = qcom_ipq806x_usb_ss_phy_init,
+ .exit = qcom_ipq806x_usb_ss_phy_exit,
+ .owner = THIS_MODULE,
+ },
+ .clk_rate = 125000000,
+};
+
+static const struct of_device_id qcom_ipq806x_usb_phy_table[] = {
+ { .compatible = "qcom,ipq806x-usb-phy-hs",
+ .data = &qcom_ipq806x_usb_hs_drvdata },
+ { .compatible = "qcom,ipq806x-usb-phy-ss",
+ .data = &qcom_ipq806x_usb_ss_drvdata },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ipq806x_usb_phy_table);
+
+static int qcom_ipq806x_usb_phy_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ resource_size_t size;
+ struct phy *generic_phy;
+ struct usb_phy *phy_dwc3;
+ const struct phy_drvdata *data;
+ struct phy_provider *phy_provider;
+
+ phy_dwc3 = devm_kzalloc(&pdev->dev, sizeof(*phy_dwc3), GFP_KERNEL);
+ if (!phy_dwc3)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ phy_dwc3->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ size = resource_size(res);
+ phy_dwc3->base = devm_ioremap(phy_dwc3->dev, res->start, size);
+
+ if (IS_ERR(phy_dwc3->base)) {
+ dev_err(phy_dwc3->dev, "failed to map reg\n");
+ return PTR_ERR(phy_dwc3->base);
+ }
+
+ phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref");
+ if (IS_ERR(phy_dwc3->ref_clk)) {
+ dev_dbg(phy_dwc3->dev, "cannot get reference clock\n");
+ return PTR_ERR(phy_dwc3->ref_clk);
+ }
+
+ clk_set_rate(phy_dwc3->ref_clk, data->clk_rate);
+
+ phy_dwc3->xo_clk = devm_clk_get(phy_dwc3->dev, "xo");
+ if (IS_ERR(phy_dwc3->xo_clk)) {
+ dev_dbg(phy_dwc3->dev, "cannot get TCXO clock\n");
+ phy_dwc3->xo_clk = NULL;
+ }
+
+ /* Parse device node to probe HSIO settings */
+ if (device_property_read_u32(&pdev->dev, "qcom,rx-eq",
+ &phy_dwc3->rx_eq))
+ phy_dwc3->rx_eq = SSPHY_RX_EQ_VALUE;
+
+ if (device_property_read_u32(&pdev->dev, "qcom,tx-deamp_3_5db",
+ &phy_dwc3->tx_deamp_3_5db))
+ phy_dwc3->tx_deamp_3_5db = SSPHY_TX_DEEMPH_3_5DB;
+
+ if (device_property_read_u32(&pdev->dev, "qcom,mpll", &phy_dwc3->mpll))
+ phy_dwc3->mpll = SSPHY_MPLL_VALUE;
+
+ generic_phy = devm_phy_create(phy_dwc3->dev, pdev->dev.of_node, &data->ops);
+
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy_dwc3);
+ platform_set_drvdata(pdev, phy_dwc3);
+
+ phy_provider = devm_of_phy_provider_register(phy_dwc3->dev,
+ of_phy_simple_xlate);
+
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static struct platform_driver qcom_ipq806x_usb_phy_driver = {
+ .probe = qcom_ipq806x_usb_phy_probe,
+ .driver = {
+ .name = "qcom-ipq806x-usb-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_ipq806x_usb_phy_table,
+ },
+};
+
+module_platform_driver(qcom_ipq806x_usb_phy_driver);
+
+MODULE_ALIAS("platform:phy-qcom-ipq806x-usb");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
+MODULE_DESCRIPTION("DesignWare USB3 QCOM PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index e91040af3394..562053ce9455 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -82,20 +82,34 @@ struct qmp_phy_init_tbl {
* register part of layout ?
* if yes, then offset gives index in the reg-layout
*/
- int in_layout;
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
};
#define QMP_PHY_INIT_CFG(o, v) \
{ \
.offset = o, \
.val = v, \
+ .lane_mask = 0xff, \
}
#define QMP_PHY_INIT_CFG_L(o, v) \
{ \
.offset = o, \
.val = v, \
- .in_layout = 1, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
}
/* set of registers with offsets different per-PHY */
@@ -185,6 +199,17 @@ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x44,
[QPHY_PCS_STATUS] = 0x14,
[QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x308,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x314,
+};
+
+static const unsigned int qmp_v4_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x608,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x614,
};
static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
@@ -198,6 +223,81 @@ static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
};
+static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -1399,6 +1499,250 @@ static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x95),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
@@ -1567,6 +1911,11 @@ static const char * const qmp_v4_phy_clk_l[] = {
"aux", "ref_clk_src", "ref", "com_aux",
};
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -1593,6 +1942,30 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = ipq8074_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+ .pcs_tbl = ipq8074_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+};
+
static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.type = PHY_TYPE_PCIE,
.nlanes = 3,
@@ -1986,10 +2359,98 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_configure(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8150_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8150_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8150_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8250_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8250_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
{
int i;
const struct qmp_phy_init_tbl *t = tbl;
@@ -1998,6 +2459,9 @@ static void qcom_qmp_phy_configure(void __iomem *base,
return;
for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
if (t->in_layout)
writel(t->val, base + regs[t->offset]);
else
@@ -2005,6 +2469,14 @@ static void qcom_qmp_phy_configure(void __iomem *base,
}
}
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_configure_lane(base, regs, tbl, num, 0xff);
+}
+
static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
@@ -2219,16 +2691,18 @@ static int qcom_qmp_phy_enable(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ qcom_qmp_phy_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
/* Configuration for other LANE for USB-DP combo PHY */
if (cfg->is_dual_lane_phy)
- qcom_qmp_phy_configure(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num);
+ qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ qcom_qmp_phy_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
if (cfg->is_dual_lane_phy)
- qcom_qmp_phy_configure(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num);
+ qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
@@ -2699,6 +3173,9 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq8074-qmp-usb3-phy",
+ .data = &ipq8074_usb3phy_cfg,
+ }, {
.compatible = "qcom,msm8996-qmp-pcie-phy",
.data = &msm8996_pciephy_cfg,
}, {
@@ -2746,6 +3223,15 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sm8150-qmp-usb3-phy",
.data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-uni-phy",
+ .data = &sm8150_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-phy",
+ .data = &sm8250_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-uni-phy",
+ .data = &sm8250_usb3_uniphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 6d017a0c0c8d..4277f592684b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -363,7 +363,10 @@
/* Only for QMP V4 PHY - TX registers */
#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x3c
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x40
#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_LANE_MODE_2 0x88
#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
@@ -709,6 +712,10 @@
#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+/* Only for QMP V4 PHY - UNI has 0x300 offset for PCS_USB3 regs */
+#define QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x618
+#define QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x638
+
/* Only for QMP V4 PHY - PCS_MISC registers */
#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 393011a05b48..557547dabfd5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -810,6 +810,9 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq8074-qusb2-phy",
+ .data = &msm8996_phy_cfg,
+ }, {
.compatible = "qcom,msm8996-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index 4d74045271eb..ae4bac024c7b 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -77,6 +77,7 @@ static const char * const qcom_snps_hsphy_vreg_names[] = {
* @phy_reset: phy reset control
* @vregs: regulator supplies bulk data
* @phy_initialized: if PHY has been initialized correctly
+ * @mode: contains the current mode the PHY is in
*/
struct qcom_snps_hsphy {
struct phy *phy;
@@ -88,6 +89,7 @@ struct qcom_snps_hsphy {
struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
bool phy_initialized;
+ enum phy_mode mode;
};
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
@@ -104,6 +106,72 @@ static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
readl_relaxed(base + offset);
}
+static int qcom_snps_hsphy_suspend(struct qcom_snps_hsphy *hsphy)
+{
+ dev_dbg(&hsphy->phy->dev, "Suspend QCOM SNPS PHY\n");
+
+ if (hsphy->mode == PHY_MODE_USB_HOST) {
+ /* Enable auto-resume to meet remote wakeup timing */
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_AUTO_RESUME,
+ USB2_AUTO_RESUME);
+ usleep_range(500, 1000);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ 0, USB2_AUTO_RESUME);
+ }
+
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ return 0;
+}
+
+static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy)
+{
+ int ret;
+
+ dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n");
+
+ ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+ if (ret) {
+ dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused qcom_snps_hsphy_runtime_suspend(struct device *dev)
+{
+ struct qcom_snps_hsphy *hsphy = dev_get_drvdata(dev);
+
+ if (!hsphy->phy_initialized)
+ return 0;
+
+ qcom_snps_hsphy_suspend(hsphy);
+ return 0;
+}
+
+static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev)
+{
+ struct qcom_snps_hsphy *hsphy = dev_get_drvdata(dev);
+
+ if (!hsphy->phy_initialized)
+ return 0;
+
+ qcom_snps_hsphy_resume(hsphy);
+ return 0;
+}
+
+static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+
+ hsphy->mode = mode;
+ return 0;
+}
+
static int qcom_snps_hsphy_init(struct phy *phy)
{
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
@@ -201,6 +269,7 @@ static int qcom_snps_hsphy_exit(struct phy *phy)
static const struct phy_ops qcom_snps_hsphy_gen_ops = {
.init = qcom_snps_hsphy_init,
.exit = qcom_snps_hsphy_exit,
+ .set_mode = qcom_snps_hsphy_set_mode,
.owner = THIS_MODULE,
};
@@ -212,6 +281,11 @@ static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_of_match_table);
+static const struct dev_pm_ops qcom_snps_hsphy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_snps_hsphy_runtime_suspend,
+ qcom_snps_hsphy_runtime_resume, NULL)
+};
+
static int qcom_snps_hsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -255,6 +329,14 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
generic_phy = devm_phy_create(dev, NULL, &qcom_snps_hsphy_gen_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
@@ -269,6 +351,8 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (!IS_ERR(phy_provider))
dev_dbg(dev, "Registered Qcom-SNPS HS phy\n");
+ else
+ pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
}
@@ -277,6 +361,7 @@ static struct platform_driver qcom_snps_hsphy_driver = {
.probe = qcom_snps_hsphy_probe,
.driver = {
.name = "qcom-snps-hs-femto-v2-phy",
+ .pm = &qcom_snps_hsphy_pm_ops,
.of_match_table = qcom_snps_hsphy_of_match_table,
},
};
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
deleted file mode 100644
index 9bf973a0d8c3..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_I_H_
-#define UFS_QCOM_PHY_I_H_
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-
-#define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \
- { \
- .reg_offset = reg, \
- .cfg_value = val, \
- }
-
-#define UFS_QCOM_PHY_NAME_LEN 30
-
-enum {
- MASK_SERDES_START = 0x1,
- MASK_PCS_READY = 0x1,
-};
-
-enum {
- OFFSET_SERDES_START = 0x0,
-};
-
-struct ufs_qcom_phy_stored_attributes {
- u32 att;
- u32 value;
-};
-
-
-struct ufs_qcom_phy_calibration {
- u32 reg_offset;
- u32 cfg_value;
-};
-
-struct ufs_qcom_phy_vreg {
- const char *name;
- struct regulator *reg;
- int max_uA;
- int min_uV;
- int max_uV;
- bool enabled;
-};
-
-struct ufs_qcom_phy {
- struct list_head list;
- struct device *dev;
- void __iomem *mmio;
- void __iomem *dev_ref_clk_ctrl_mmio;
- struct clk *tx_iface_clk;
- struct clk *rx_iface_clk;
- bool is_iface_clk_enabled;
- struct clk *ref_clk_src;
- struct clk *ref_clk_parent;
- struct clk *ref_clk;
- bool is_ref_clk_enabled;
- bool is_dev_ref_clk_enabled;
- struct ufs_qcom_phy_vreg vdda_pll;
- struct ufs_qcom_phy_vreg vdda_phy;
- struct ufs_qcom_phy_vreg vddp_ref_clk;
- unsigned int quirks;
-
- /*
- * If UFS link is put into Hibern8 and if UFS PHY analog hardware is
- * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8
- * exit might fail even after powering on UFS PHY analog hardware.
- * Enabling this quirk will help to solve above issue by doing
- * custom PHY settings just before PHY analog power collapse.
- */
- #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE BIT(0)
-
- u8 host_ctrl_rev_major;
- u16 host_ctrl_rev_minor;
- u16 host_ctrl_rev_step;
-
- char name[UFS_QCOM_PHY_NAME_LEN];
- struct ufs_qcom_phy_calibration *cached_regs;
- int cached_regs_table_size;
- struct ufs_qcom_phy_specific_ops *phy_spec_ops;
-
- enum phy_mode mode;
- struct reset_control *ufs_reset;
-};
-
-/**
- * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
- * specific implementation per phy. Each UFS phy, should implement
- * those functions according to its spec and requirements
- * @start_serdes: pointer to a function that starts the serdes
- * @is_physical_coding_sublayer_ready: pointer to a function that
- * checks pcs readiness. returns 0 for success and non-zero for error.
- * @set_tx_lane_enable: pointer to a function that enable tx lanes
- * @power_control: pointer to a function that controls analog rail of phy
- * and writes to QSERDES_RX_SIGDET_CNTRL attribute
- */
-struct ufs_qcom_phy_specific_ops {
- int (*calibrate)(struct ufs_qcom_phy *ufs_qcom_phy, bool is_rate_B);
- void (*start_serdes)(struct ufs_qcom_phy *phy);
- int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
- void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
- void (*power_control)(struct ufs_qcom_phy *phy, bool val);
-};
-
-struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
-int ufs_qcom_phy_power_on(struct phy *generic_phy);
-int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops);
-int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
- struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
- bool is_rate_B);
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
deleted file mode 100644
index 54b355bfc24c..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-qmp-14nm.h"
-
-#define UFS_PHY_NAME "ufs_phy_qmp_14nm"
-#define UFS_PHY_VDDA_PHY_UV (925000)
-
-static
-int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- bool is_rate_B)
-{
- int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
- int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
- int err;
-
- err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
- tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
-
- if (err)
- dev_err(ufs_qcom_phy->dev,
- "%s: ufs_qcom_phy_calibrate() failed %d\n",
- __func__, err);
- return err;
-}
-
-static
-void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
-{
- phy_common->quirks =
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-}
-
-static
-int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy,
- enum phy_mode mode, int submode)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->mode = PHY_MODE_INVALID;
-
- if (mode > 0)
- phy_common->mode = mode;
-
- return 0;
-}
-
-static
-void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
-{
- writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * Before any transactions involving PHY, ensure PHY knows
- * that it's analog rail is powered ON (or OFF).
- */
- mb();
-}
-
-static inline
-void ufs_qcom_phy_qmp_14nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
-{
- /*
- * 14nm PHY does not have TX_LANE_ENABLE register.
- * Implement this function so as not to propagate error to caller.
- */
-}
-
-static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
-{
- u32 tmp;
-
- tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
- tmp &= ~MASK_SERDES_START;
- tmp |= (1 << OFFSET_SERDES_START);
- writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
- /* Ensure register value is committed */
- mb();
-}
-
-static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
-{
- int err = 0;
- u32 val;
-
- err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
- val, (val & MASK_PCS_READY), 10, 1000000);
- if (err)
- dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
- __func__, err);
- return err;
-}
-
-static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
- .power_on = ufs_qcom_phy_power_on,
- .power_off = ufs_qcom_phy_power_off,
- .set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
- .owner = THIS_MODULE,
-};
-
-static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
- .calibrate = ufs_qcom_phy_qmp_14nm_phy_calibrate,
- .start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
- .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
- .set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
- .power_control = ufs_qcom_phy_qmp_14nm_power_control,
-};
-
-static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct ufs_qcom_phy_qmp_14nm *phy;
- struct ufs_qcom_phy *phy_common;
- int err = 0;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- err = -ENOMEM;
- goto out;
- }
- phy_common = &phy->common_cfg;
-
- generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
- &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
-
- if (!generic_phy) {
- err = -EIO;
- goto out;
- }
-
- err = ufs_qcom_phy_init_clks(phy_common);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vregulators(phy_common);
- if (err)
- goto out;
-
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-
- phy_set_drvdata(generic_phy, phy);
-
- strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
-
-out:
- return err;
-}
-
-static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
- {.compatible = "qcom,ufs-phy-qmp-14nm"},
- {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
-
-static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
- .probe = ufs_qcom_phy_qmp_14nm_probe,
- .driver = {
- .of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
- .name = "ufs_qcom_phy_qmp_14nm",
- },
-};
-
-module_platform_driver(ufs_qcom_phy_qmp_14nm_driver);
-
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 14nm");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
deleted file mode 100644
index ceca654b9338..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_QMP_14NM_H_
-#define UFS_QCOM_PHY_QMP_14NM_H_
-
-#include "phy-qcom-ufs-i.h"
-
-/* QCOM UFS PHY control registers */
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
-
-/* UFS PHY QSERDES COM registers */
-#define QSERDES_COM_BG_TIMER COM_OFF(0x0C)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34)
-#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C)
-#define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0x4C)
-#define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0x50)
-#define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0x54)
-#define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0x58)
-#define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0x5C)
-#define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0x60)
-#define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78)
-#define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C)
-#define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84)
-#define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88)
-#define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90)
-#define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94)
-#define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC)
-#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4)
-#define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8)
-#define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC)
-#define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0)
-#define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4)
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC)
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0)
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4)
-#define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8)
-#define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC)
-#define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114)
-#define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124)
-#define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128)
-#define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C)
-#define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130)
-#define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134)
-#define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138)
-#define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144)
-#define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148)
-#define QSERDES_COM_CLK_SELECT COM_OFF(0x174)
-#define QSERDES_COM_HSCLK_SEL COM_OFF(0x178)
-#define QSERDES_COM_CORECLK_DIV COM_OFF(0x184)
-#define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C)
-#define QSERDES_COM_CMN_CONFIG COM_OFF(0x194)
-#define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C)
-#define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC)
-
-/* UFS PHY registers */
-#define UFS_PHY_PHY_START PHY_OFF(0x00)
-#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04)
-#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168)
-
-/* UFS PHY TX registers */
-#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0, 0x68)
-#define QSERDES_TX_LANE_MODE TX_OFF(0, 0x94)
-
-/* UFS PHY RX registers */
-#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0, 0x40)
-#define QSERDES_RX_RX_TERM_BW RX_OFF(0, 0x90)
-#define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0, 0xC4)
-#define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0, 0xC8)
-#define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0, 0xCC)
-#define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0, 0xD0)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0, 0xD8)
-#define QSERDES_RX_SIGDET_CNTRL RX_OFF(0, 0x114)
-#define QSERDES_RX_SIGDET_LVL RX_OFF(0, 0x118)
-#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x11C)
-#define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0, 0x12C)
-
-/*
- * This structure represents the 14nm specific phy.
- * common_cfg MUST remain the first field in this structure
- * in case extra fields are added. This way, when calling
- * get_ufs_qcom_phy() of generic phy, we can extract the
- * common phy structure (struct ufs_qcom_phy) out of it
- * regardless of the relevant specific phy.
- */
-struct ufs_qcom_phy_qmp_14nm {
- struct ufs_qcom_phy common_cfg;
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
-
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
-
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
-};
-
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
deleted file mode 100644
index 3e9d8b71e995..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-qmp-20nm.h"
-
-#define UFS_PHY_NAME "ufs_phy_qmp_20nm"
-
-static
-int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- bool is_rate_B)
-{
- struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
- int tbl_size_A, tbl_size_B;
- u8 major = ufs_qcom_phy->host_ctrl_rev_major;
- u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
- u16 step = ufs_qcom_phy->host_ctrl_rev_step;
- int err;
-
- if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) {
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0);
- tbl_A = phy_cal_table_rate_A_1_2_0;
- } else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) {
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0);
- tbl_A = phy_cal_table_rate_A_1_3_0;
- } else {
- dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n",
- __func__);
- err = -ENODEV;
- goto out;
- }
-
- tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
- tbl_B = phy_cal_table_rate_B;
-
- err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A,
- tbl_B, tbl_size_B, is_rate_B);
-
- if (err)
- dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n",
- __func__, err);
-
-out:
- return err;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
-{
- phy_common->quirks =
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-}
-
-static
-int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy,
- enum phy_mode mode, int submode)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->mode = PHY_MODE_INVALID;
-
- if (mode > 0)
- phy_common->mode = mode;
-
- return 0;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
-{
- bool hibern8_exit_after_pwr_collapse = phy->quirks &
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-
- if (val) {
- writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * Before any transactions involving PHY, ensure PHY knows
- * that it's analog rail is powered ON.
- */
- mb();
-
- if (hibern8_exit_after_pwr_collapse) {
- /*
- * Give atleast 1us delay after restoring PHY analog
- * power.
- */
- usleep_range(1, 2);
- writel_relaxed(0x0A, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- writel_relaxed(0x08, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- /*
- * Make sure workaround is deactivated before proceeding
- * with normal PHY operations.
- */
- mb();
- }
- } else {
- if (hibern8_exit_after_pwr_collapse) {
- writel_relaxed(0x0A, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- writel_relaxed(0x02, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- /*
- * Make sure that above workaround is activated before
- * PHY analog power collapse.
- */
- mb();
- }
-
- writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * ensure that PHY knows its PHY analog rail is going
- * to be powered down
- */
- mb();
- }
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
-{
- writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK,
- phy->mmio + UFS_PHY_TX_LANE_ENABLE);
- mb();
-}
-
-static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
-{
- u32 tmp;
-
- tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
- tmp &= ~MASK_SERDES_START;
- tmp |= (1 << OFFSET_SERDES_START);
- writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
- mb();
-}
-
-static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
-{
- int err = 0;
- u32 val;
-
- err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
- val, (val & MASK_PCS_READY), 10, 1000000);
- if (err)
- dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
- __func__, err);
- return err;
-}
-
-static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
- .power_on = ufs_qcom_phy_power_on,
- .power_off = ufs_qcom_phy_power_off,
- .set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
- .owner = THIS_MODULE,
-};
-
-static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
- .calibrate = ufs_qcom_phy_qmp_20nm_phy_calibrate,
- .start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
- .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
- .set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
- .power_control = ufs_qcom_phy_qmp_20nm_power_control,
-};
-
-static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct ufs_qcom_phy_qmp_20nm *phy;
- struct ufs_qcom_phy *phy_common;
- int err = 0;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- err = -ENOMEM;
- goto out;
- }
- phy_common = &phy->common_cfg;
-
- generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
- &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
-
- if (!generic_phy) {
- err = -EIO;
- goto out;
- }
-
- err = ufs_qcom_phy_init_clks(phy_common);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vregulators(phy_common);
- if (err)
- goto out;
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-
- phy_set_drvdata(generic_phy, phy);
-
- strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
-
-out:
- return err;
-}
-
-static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
- {.compatible = "qcom,ufs-phy-qmp-20nm"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
-
-static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
- .probe = ufs_qcom_phy_qmp_20nm_probe,
- .driver = {
- .of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
- .name = "ufs_qcom_phy_qmp_20nm",
- },
-};
-
-module_platform_driver(ufs_qcom_phy_qmp_20nm_driver);
-
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
deleted file mode 100644
index 8ce79f524eed..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_QMP_20NM_H_
-#define UFS_QCOM_PHY_QMP_20NM_H_
-
-#include "phy-qcom-ufs-i.h"
-
-/* QCOM UFS PHY control registers */
-
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
-
-/* UFS PHY PLL block registers */
-#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x0)
-#define QSERDES_COM_PLL_VCOTAIL_EN COM_OFF(0x04)
-#define QSERDES_COM_PLL_CNTRL COM_OFF(0x14)
-#define QSERDES_COM_PLL_IP_SETI COM_OFF(0x24)
-#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL COM_OFF(0x28)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x30)
-#define QSERDES_COM_PLL_CP_SETI COM_OFF(0x34)
-#define QSERDES_COM_PLL_IP_SETP COM_OFF(0x38)
-#define QSERDES_COM_PLL_CP_SETP COM_OFF(0x3C)
-#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND COM_OFF(0x48)
-#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0x4C)
-#define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0x50)
-#define QSERDES_COM_PLLLOCK_CMP1 COM_OFF(0x90)
-#define QSERDES_COM_PLLLOCK_CMP2 COM_OFF(0x94)
-#define QSERDES_COM_PLLLOCK_CMP3 COM_OFF(0x98)
-#define QSERDES_COM_PLLLOCK_CMP_EN COM_OFF(0x9C)
-#define QSERDES_COM_BGTC COM_OFF(0xA0)
-#define QSERDES_COM_DEC_START1 COM_OFF(0xAC)
-#define QSERDES_COM_PLL_AMP_OS COM_OFF(0xB0)
-#define QSERDES_COM_RES_CODE_UP_OFFSET COM_OFF(0xD8)
-#define QSERDES_COM_RES_CODE_DN_OFFSET COM_OFF(0xDC)
-#define QSERDES_COM_DIV_FRAC_START1 COM_OFF(0x100)
-#define QSERDES_COM_DIV_FRAC_START2 COM_OFF(0x104)
-#define QSERDES_COM_DIV_FRAC_START3 COM_OFF(0x108)
-#define QSERDES_COM_DEC_START2 COM_OFF(0x10C)
-#define QSERDES_COM_PLL_RXTXEPCLK_EN COM_OFF(0x110)
-#define QSERDES_COM_PLL_CRCTRL COM_OFF(0x114)
-#define QSERDES_COM_PLL_CLKEPDIV COM_OFF(0x118)
-
-/* TX LANE n (0, 1) registers */
-#define QSERDES_TX_EMP_POST1_LVL(n) TX_OFF(n, 0x08)
-#define QSERDES_TX_DRV_LVL(n) TX_OFF(n, 0x0C)
-#define QSERDES_TX_LANE_MODE(n) TX_OFF(n, 0x54)
-
-/* RX LANE n (0, 1) registers */
-#define QSERDES_RX_CDR_CONTROL1(n) RX_OFF(n, 0x0)
-#define QSERDES_RX_CDR_CONTROL_HALF(n) RX_OFF(n, 0x8)
-#define QSERDES_RX_RX_EQ_GAIN1_LSB(n) RX_OFF(n, 0xA8)
-#define QSERDES_RX_RX_EQ_GAIN1_MSB(n) RX_OFF(n, 0xAC)
-#define QSERDES_RX_RX_EQ_GAIN2_LSB(n) RX_OFF(n, 0xB0)
-#define QSERDES_RX_RX_EQ_GAIN2_MSB(n) RX_OFF(n, 0xB4)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n) RX_OFF(n, 0xBC)
-#define QSERDES_RX_CDR_CONTROL_QUARTER(n) RX_OFF(n, 0xC)
-#define QSERDES_RX_SIGDET_CNTRL(n) RX_OFF(n, 0x100)
-
-/* UFS PHY registers */
-#define UFS_PHY_PHY_START PHY_OFF(0x00)
-#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x4)
-#define UFS_PHY_TX_LANE_ENABLE PHY_OFF(0x44)
-#define UFS_PHY_PWM_G1_CLK_DIVIDER PHY_OFF(0x08)
-#define UFS_PHY_PWM_G2_CLK_DIVIDER PHY_OFF(0x0C)
-#define UFS_PHY_PWM_G3_CLK_DIVIDER PHY_OFF(0x10)
-#define UFS_PHY_PWM_G4_CLK_DIVIDER PHY_OFF(0x14)
-#define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER PHY_OFF(0x34)
-#define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER PHY_OFF(0x38)
-#define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER PHY_OFF(0x3C)
-#define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER PHY_OFF(0x40)
-#define UFS_PHY_OMC_STATUS_RDVAL PHY_OFF(0x68)
-#define UFS_PHY_LINE_RESET_TIME PHY_OFF(0x28)
-#define UFS_PHY_LINE_RESET_GRANULARITY PHY_OFF(0x2C)
-#define UFS_PHY_TSYNC_RSYNC_CNTL PHY_OFF(0x48)
-#define UFS_PHY_PLL_CNTL PHY_OFF(0x50)
-#define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x54)
-#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x5C)
-#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL PHY_OFF(0x58)
-#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL PHY_OFF(0x60)
-#define UFS_PHY_CFG_CHANGE_CNT_VAL PHY_OFF(0x64)
-#define UFS_PHY_RX_SYNC_WAIT_TIME PHY_OFF(0x6C)
-#define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB4)
-#define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE0)
-#define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB8)
-#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE4)
-#define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xBC)
-#define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xE8)
-#define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY PHY_OFF(0xFC)
-#define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY PHY_OFF(0x100)
-#define UFS_PHY_RX_SIGDET_CTRL3 PHY_OFF(0x14c)
-#define UFS_PHY_RMMI_ATTR_CTRL PHY_OFF(0x160)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L1 (1 << 7)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L1 (1 << 6)
-#define UFS_PHY_RMMI_CFGWR_L1 (1 << 5)
-#define UFS_PHY_RMMI_CFGRD_L1 (1 << 4)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L0 (1 << 3)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L0 (1 << 2)
-#define UFS_PHY_RMMI_CFGWR_L0 (1 << 1)
-#define UFS_PHY_RMMI_CFGRD_L0 (1 << 0)
-#define UFS_PHY_RMMI_ATTRID PHY_OFF(0x164)
-#define UFS_PHY_RMMI_ATTRWRVAL PHY_OFF(0x168)
-#define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS PHY_OFF(0x16C)
-#define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS PHY_OFF(0x170)
-#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x174)
-
-#define UFS_PHY_TX_LANE_ENABLE_MASK 0x3
-
-/*
- * This structure represents the 20nm specific phy.
- * common_cfg MUST remain the first field in this structure
- * in case extra fields are added. This way, when calling
- * get_ufs_qcom_phy() of generic phy, we can extract the
- * common phy structure (struct ufs_qcom_phy) out of it
- * regardless of the relevant specific phy.
- */
-struct ufs_qcom_phy_qmp_20nm {
- struct ufs_qcom_phy common_cfg;
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e),
-};
-
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
deleted file mode 100644
index 763c8d396af1..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-i.h"
-
-#define MAX_PROP_NAME 32
-#define VDDA_PHY_MIN_UV 1000000
-#define VDDA_PHY_MAX_UV 1000000
-#define VDDA_PLL_MIN_UV 1800000
-#define VDDA_PLL_MAX_UV 1800000
-#define VDDP_REF_CLK_MIN_UV 1200000
-#define VDDP_REF_CLK_MAX_UV 1200000
-
-int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- struct ufs_qcom_phy_calibration *tbl_A,
- int tbl_size_A,
- struct ufs_qcom_phy_calibration *tbl_B,
- int tbl_size_B, bool is_rate_B)
-{
- int i;
- int ret = 0;
-
- if (!tbl_A) {
- dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
- ret = EINVAL;
- goto out;
- }
-
- for (i = 0; i < tbl_size_A; i++)
- writel_relaxed(tbl_A[i].cfg_value,
- ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
-
- /*
- * In case we would like to work in rate B, we need
- * to override a registers that were configured in rate A table
- * with registers of rate B table.
- * table.
- */
- if (is_rate_B) {
- if (!tbl_B) {
- dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
- __func__);
- ret = EINVAL;
- goto out;
- }
-
- for (i = 0; i < tbl_size_B; i++)
- writel_relaxed(tbl_B[i].cfg_value,
- ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
- }
-
- /* flush buffered writes */
- mb();
-
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-
-/*
- * This assumes the embedded phy structure inside generic_phy is of type
- * struct ufs_qcom_phy. In order to function properly it's crucial
- * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
- * as the first inside generic_phy.
- */
-struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
-{
- return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
-}
-EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
-
-static
-int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- int err = 0;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
- phy_common->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR((void const *)phy_common->mmio)) {
- err = PTR_ERR((void const *)phy_common->mmio);
- phy_common->mmio = NULL;
- dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
- __func__, err);
- return err;
- }
-
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dev_ref_clk_ctrl_mem");
- phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
- phy_common->dev_ref_clk_ctrl_mmio = NULL;
-
- return 0;
-}
-
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
-static int ufs_qcom_phy_get_reset(struct ufs_qcom_phy *phy_common)
-{
- struct reset_control *reset;
-
- if (phy_common->ufs_reset)
- return 0;
-
- reset = devm_reset_control_get_exclusive_by_index(phy_common->dev, 0);
- if (IS_ERR(reset))
- return PTR_ERR(reset);
-
- phy_common->ufs_reset = reset;
- return 0;
-}
-
-static int __ufs_qcom_phy_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool err_print)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- if (err_print)
- dev_err(dev, "failed to get %s err %d", name, err);
- } else {
- *clk_out = clk;
- }
-
- return err;
-}
-
-static int ufs_qcom_phy_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
-{
- return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
-}
-
-int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- if (of_device_is_compatible(phy_common->dev->of_node,
- "qcom,msm8996-ufs-phy-qmp-14nm"))
- goto skip_txrx_clk;
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
- &phy_common->tx_iface_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
- &phy_common->rx_iface_clk);
- if (err)
- goto out;
-
-skip_txrx_clk:
- err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
- &phy_common->ref_clk_src);
- if (err)
- goto out;
-
- /*
- * "ref_clk_parent" is optional hence don't abort init if it's not
- * found.
- */
- __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
- &phy_common->ref_clk_parent, false);
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
- &phy_common->ref_clk);
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-
-static int ufs_qcom_phy_init_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg,
- const char *name)
-{
- int err = 0;
-
- char prop_name[MAX_PROP_NAME];
-
- vreg->name = name;
- vreg->reg = devm_regulator_get(dev, name);
- if (IS_ERR(vreg->reg)) {
- err = PTR_ERR(vreg->reg);
- dev_err(dev, "failed to get %s, %d\n", name, err);
- goto out;
- }
-
- if (dev->of_node) {
- snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
- err = of_property_read_u32(dev->of_node,
- prop_name, &vreg->max_uA);
- if (err && err != -EINVAL) {
- dev_err(dev, "%s: failed to read %s\n",
- __func__, prop_name);
- goto out;
- } else if (err == -EINVAL || !vreg->max_uA) {
- if (regulator_count_voltages(vreg->reg) > 0) {
- dev_err(dev, "%s: %s is mandatory\n",
- __func__, prop_name);
- goto out;
- }
- err = 0;
- }
- }
-
- if (!strcmp(name, "vdda-pll")) {
- vreg->max_uV = VDDA_PLL_MAX_UV;
- vreg->min_uV = VDDA_PLL_MIN_UV;
- } else if (!strcmp(name, "vdda-phy")) {
- vreg->max_uV = VDDA_PHY_MAX_UV;
- vreg->min_uV = VDDA_PHY_MIN_UV;
- } else if (!strcmp(name, "vddp-ref-clk")) {
- vreg->max_uV = VDDP_REF_CLK_MAX_UV;
- vreg->min_uV = VDDP_REF_CLK_MIN_UV;
- }
-
-out:
- return err;
-}
-
-int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
- "vddp-ref-clk");
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int ufs_qcom_phy_cfg_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg, bool on)
-{
- int ret = 0;
- struct regulator *reg = vreg->reg;
- const char *name = vreg->name;
- int min_uV;
- int uA_load;
-
- if (regulator_count_voltages(reg) > 0) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
- __func__, name, ret);
- goto out;
- }
- uA_load = on ? vreg->max_uA : 0;
- ret = regulator_set_load(reg, uA_load);
- if (ret >= 0) {
- /*
- * regulator_set_load() returns new regulator
- * mode upon success.
- */
- ret = 0;
- } else {
- dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
- __func__, name, uA_load, ret);
- goto out;
- }
- }
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_enable_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || vreg->enabled)
- goto out;
-
- ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
- if (ret) {
- dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
- __func__, ret);
- goto out;
- }
-
- ret = regulator_enable(vreg->reg);
- if (ret) {
- dev_err(dev, "%s: enable failed, err=%d\n",
- __func__, ret);
- goto out;
- }
-
- vreg->enabled = true;
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
-{
- int ret = 0;
-
- if (phy->is_ref_clk_enabled)
- goto out;
-
- /*
- * reference clock is propagated in a daisy-chained manner from
- * source to phy, so ungate them at each stage.
- */
- ret = clk_prepare_enable(phy->ref_clk_src);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * "ref_clk_parent" is optional clock hence make sure that clk reference
- * is available before trying to enable the clock.
- */
- if (phy->ref_clk_parent) {
- ret = clk_prepare_enable(phy->ref_clk_parent);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
- __func__, ret);
- goto out_disable_src;
- }
- }
-
- ret = clk_prepare_enable(phy->ref_clk);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
- __func__, ret);
- goto out_disable_parent;
- }
-
- phy->is_ref_clk_enabled = true;
- goto out;
-
-out_disable_parent:
- if (phy->ref_clk_parent)
- clk_disable_unprepare(phy->ref_clk_parent);
-out_disable_src:
- clk_disable_unprepare(phy->ref_clk_src);
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_disable_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || !vreg->enabled)
- goto out;
-
- ret = regulator_disable(vreg->reg);
-
- if (!ret) {
- /* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(dev, vreg, false);
- vreg->enabled = false;
- } else {
- dev_err(dev, "%s: %s disable failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
-{
- if (phy->is_ref_clk_enabled) {
- clk_disable_unprepare(phy->ref_clk);
- /*
- * "ref_clk_parent" is optional clock hence make sure that clk
- * reference is available before trying to disable the clock.
- */
- if (phy->ref_clk_parent)
- clk_disable_unprepare(phy->ref_clk_parent);
- clk_disable_unprepare(phy->ref_clk_src);
- phy->is_ref_clk_enabled = false;
- }
-}
-
-/* Turn ON M-PHY RMMI interface clocks */
-static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
-{
- int ret = 0;
-
- if (phy->is_iface_clk_enabled)
- goto out;
-
- ret = clk_prepare_enable(phy->tx_iface_clk);
- if (ret) {
- dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
- __func__, ret);
- goto out;
- }
- ret = clk_prepare_enable(phy->rx_iface_clk);
- if (ret) {
- clk_disable_unprepare(phy->tx_iface_clk);
- dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
- __func__, ret);
- goto out;
- }
- phy->is_iface_clk_enabled = true;
-
-out:
- return ret;
-}
-
-/* Turn OFF M-PHY RMMI interface clocks */
-static void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
-{
- if (phy->is_iface_clk_enabled) {
- clk_disable_unprepare(phy->tx_iface_clk);
- clk_disable_unprepare(phy->rx_iface_clk);
- phy->is_iface_clk_enabled = false;
- }
-}
-
-static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
-{
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
- dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
- }
-
- return ret;
-}
-
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
- dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
- tx_lanes);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
-
-void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
- u8 major, u16 minor, u16 step)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- ufs_qcom_phy->host_ctrl_rev_major = major;
- ufs_qcom_phy->host_ctrl_rev_minor = minor;
- ufs_qcom_phy->host_ctrl_rev_step = step;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
-
-static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
-{
- if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
- dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
- __func__);
- return -ENOTSUPP;
- }
-
- return ufs_qcom_phy->phy_spec_ops->
- is_physical_coding_sublayer_ready(ufs_qcom_phy);
-}
-
-int ufs_qcom_phy_power_on(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
- struct device *dev = phy_common->dev;
- bool is_rate_B = false;
- int err;
-
- err = ufs_qcom_phy_get_reset(phy_common);
- if (err)
- return err;
-
- err = reset_control_assert(phy_common->ufs_reset);
- if (err)
- return err;
-
- if (phy_common->mode == PHY_MODE_UFS_HS_B)
- is_rate_B = true;
-
- err = phy_common->phy_spec_ops->calibrate(phy_common, is_rate_B);
- if (err)
- return err;
-
- err = reset_control_deassert(phy_common->ufs_reset);
- if (err) {
- dev_err(dev, "Failed to assert UFS PHY reset");
- return err;
- }
-
- err = ufs_qcom_phy_start_serdes(phy_common);
- if (err)
- return err;
-
- err = ufs_qcom_phy_is_pcs_ready(phy_common);
- if (err)
- return err;
-
- err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
- if (err) {
- dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
- __func__, err);
- goto out;
- }
-
- phy_common->phy_spec_ops->power_control(phy_common, true);
-
- /* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
- if (err) {
- dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
- __func__, err);
- goto out_disable_phy;
- }
-
- err = ufs_qcom_phy_enable_iface_clk(phy_common);
- if (err) {
- dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
- __func__, err);
- goto out_disable_pll;
- }
-
- err = ufs_qcom_phy_enable_ref_clk(phy_common);
- if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- goto out_disable_iface_clk;
- }
-
- /* enable device PHY ref_clk pad rail */
- if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(dev,
- &phy_common->vddp_ref_clk);
- if (err) {
- dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
- __func__, err);
- goto out_disable_ref_clk;
- }
- }
-
- goto out;
-
-out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(phy_common);
-out_disable_iface_clk:
- ufs_qcom_phy_disable_iface_clk(phy_common);
-out_disable_pll:
- ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
-out_disable_phy:
- ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
-
-int ufs_qcom_phy_power_off(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->phy_spec_ops->power_control(phy_common, false);
-
- if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(phy_common->dev,
- &phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(phy_common);
- ufs_qcom_phy_disable_iface_clk(phy_common);
-
- ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
- reset_control_assert(phy_common->ufs_reset);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
-
-MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
-MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index bfb22f868857..e34e4475027c 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -111,6 +111,7 @@ struct rcar_gen3_chan {
struct work_struct work;
struct mutex lock; /* protects rphys[...].powered */
enum usb_dr_mode dr_mode;
+ int irq;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
@@ -389,12 +390,40 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
rcar_gen3_device_recognition(ch);
}
+static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+{
+ struct rcar_gen3_chan *ch = _ch;
+ void __iomem *usb2_base = ch->base;
+ u32 status = readl(usb2_base + USB2_OBINTSTA);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (status & USB2_OBINT_BITS) {
+ dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
+ writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static int rcar_gen3_phy_usb2_init(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
+ int ret;
+
+ if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
+ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+ ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
+ IRQF_SHARED, dev_name(channel->dev), channel);
+ if (ret < 0) {
+ dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
+ return ret;
+ }
+ }
/* Initialize USB2 part */
val = readl(usb2_base + USB2_INT_ENABLE);
@@ -433,6 +462,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
val &= ~USB2_INT_ENABLE_UCOM_INTEN;
writel(val, usb2_base + USB2_INT_ENABLE);
+ if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
+ free_irq(channel->irq, channel);
+
return 0;
}
@@ -503,23 +535,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
.owner = THIS_MODULE,
};
-static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
-{
- struct rcar_gen3_chan *ch = _ch;
- void __iomem *usb2_base = ch->base;
- u32 status = readl(usb2_base + USB2_OBINTSTA);
- irqreturn_t ret = IRQ_NONE;
-
- if (status & USB2_OBINT_BITS) {
- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
- writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
- rcar_gen3_device_recognition(ch);
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
@@ -598,7 +613,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct resource *res;
const struct phy_ops *phy_usb2_ops;
- int irq, ret = 0, i;
+ int ret = 0, i;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -614,16 +629,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (IS_ERR(channel->base))
return PTR_ERR(channel->base);
- /* call request_irq for OTG */
- irq = platform_get_irq_optional(pdev, 0);
- if (irq >= 0) {
- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
- irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
- IRQF_SHARED, dev_name(dev), channel);
- if (irq < 0)
- dev_err(dev, "No irq handler (%d)\n", irq);
- }
-
+ /* get irq number here and request_irq for OTG in phy_init */
+ channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 24563160197f..70a31251b202 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -347,7 +347,7 @@ struct usb3phy_reg {
};
/**
- * struct rockchip_usb3phy_port_cfg: usb3-phy port configuration.
+ * struct rockchip_usb3phy_port_cfg - usb3-phy port configuration.
* @reg: the base address for usb3-phy config.
* @typec_conn_dir: the register of type-c connector direction.
* @usb3tousb2_en: the register of type-c force usb2 to usb2 enable.
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 9e483d1fdaf2..e20d2fcc9fe7 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -3,23 +3,23 @@
# Phy drivers for Samsung platforms
#
config PHY_EXYNOS_DP_VIDEO
- tristate "EXYNOS SoC series Display Port PHY driver"
+ tristate "Exynos SoC series Display Port PHY driver"
depends on OF
depends on ARCH_EXYNOS || COMPILE_TEST
default ARCH_EXYNOS
select GENERIC_PHY
help
- Support for Display Port PHY found on Samsung EXYNOS SoCs.
+ Support for Display Port PHY found on Samsung Exynos SoCs.
config PHY_EXYNOS_MIPI_VIDEO
- tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+ tristate "S5P/Exynos SoC series MIPI CSI-2/DSI PHY driver"
depends on HAS_IOMEM
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select GENERIC_PHY
default y if ARCH_S5PV210 || ARCH_EXYNOS
help
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
- and EXYNOS SoCs.
+ and Exynos SoCs.
config PHY_EXYNOS_PCIE
bool "Exynos PCIe PHY driver"
@@ -29,6 +29,15 @@ config PHY_EXYNOS_PCIE
Enable PCIe PHY support for Exynos SoC series.
This driver provides PHY interface for Exynos PCIe controller.
+config PHY_SAMSUNG_UFS
+ tristate "SAMSUNG SoC series UFS PHY driver"
+ depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support the Samsung UFS PHY driver for
+ Samsung SoCs. This driver provides the interface for UFS
+ host controller to do PHY related programming.
+
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
depends on HAS_IOMEM
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index db9b1aa0de6e..3959100fe8a2 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o
+obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-samsung-ufs.o
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
phy-exynos-usb2-y += phy-samsung-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index 6c607df1dc9a..2b670ef91deb 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS SoC series Display Port PHY driver
+ * Samsung Exynos SoC series Display Port PHY driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -115,5 +115,5 @@ static struct platform_driver exynos_dp_video_phy_driver = {
module_platform_driver(exynos_dp_video_phy_driver);
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung EXYNOS SoC DP PHY driver");
+MODULE_DESCRIPTION("Samsung Exynos SoC DP PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index bb51195f189f..c1df1ef3ee3c 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
*
* Copyright (C) 2013,2016 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -364,6 +364,6 @@ static struct platform_driver exynos_mipi_video_phy_driver = {
};
module_platform_driver(exynos_mipi_video_phy_driver);
-MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI CSI-2/DSI PHY driver");
+MODULE_DESCRIPTION("Samsung S5P/Exynos SoC MIPI CSI-2/DSI PHY driver");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 659e7ae0a6cf..7e28b1aea0d1 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS SoC series PCIe PHY driver
+ * Samsung Exynos SoC series PCIe PHY driver
*
* Phy provider for PCIe controller on Exynos SoC series
*
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index e510732afb8b..0d818b77a0d8 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS5 SoC series USB DRD PHY driver
+ * Samsung Exynos5 SoC series USB DRD PHY driver
*
* Phy provider for USB 3.0 DRD controller on Exynos5 SoC series
*
@@ -33,7 +33,7 @@
#define EXYNOS5_FSEL_24MHZ 0x5
#define EXYNOS5_FSEL_50MHZ 0x7
-/* EXYNOS5: USB 3.0 DRD PHY registers */
+/* Exynos5: USB 3.0 DRD PHY registers */
#define EXYNOS5_DRD_LINKSYSTEM 0x04
#define LINKSYSTEM_FLADJ_MASK (0x3f << 1)
@@ -180,14 +180,14 @@ struct exynos5_usbdrd_phy_drvdata {
* @utmiclk: clock for utmi+ phy
* @itpclk: clock for ITP generation
* @drv_data: pointer to SoC level driver data structure
- * @phys[]: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
+ * @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
* @extrefclk: frequency select settings when using 'separate
* reference clocks' for SS and HS operations
* @ref_clk: reference clock to PHY block from which PHY's
* operational clocks are derived
- * vbus: VBUS regulator for phy
- * vbus_boost: Boost regulator for VBUS present on few Exynos boards
+ * @vbus: VBUS regulator for phy
+ * @vbus_boost: Boost regulator for VBUS present on few Exynos boards
*/
struct exynos5_usbdrd_phy {
struct device *dev;
@@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
- return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
+ return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ return 0;
}
static const struct phy_ops exynos5_usbdrd_phy_ops = {
@@ -958,7 +960,7 @@ static struct platform_driver exynos5_usb3drd_phy = {
};
module_platform_driver(exynos5_usb3drd_phy);
-MODULE_DESCRIPTION("Samsung EXYNOS5 SoCs USB 3.0 DRD controller PHY driver");
+MODULE_DESCRIPTION("Samsung Exynos5 SoCs USB 3.0 DRD controller PHY driver");
MODULE_AUTHOR("Vivek Gautam <gautam.vivek@samsung.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos5_usb3drd_phy");
diff --git a/drivers/phy/samsung/phy-exynos7-ufs.h b/drivers/phy/samsung/phy-exynos7-ufs.h
new file mode 100644
index 000000000000..518923141958
--- /dev/null
+++ b/drivers/phy/samsung/phy-exynos7-ufs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS PHY driver data for Samsung EXYNOS7 SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ */
+#ifndef _PHY_EXYNOS7_UFS_H_
+#define _PHY_EXYNOS7_UFS_H_
+
+#include "phy-samsung-ufs.h"
+
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL 0x720
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+
+/* Calibration for phy initialization */
+static const struct samsung_ufs_phy_cfg exynos7_pre_init_cfg[] = {
+ PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x017, 0x84, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x035, 0x58, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x037, 0x40, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x03b, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04c, 0x5b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x05c, 0x14, PWR_MODE_ANY),
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B */
+static const struct samsung_ufs_phy_cfg exynos7_pre_pwr_hs_cfg[] = {
+ PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_HS_ANY),
+ /* Setting order: 1st(0x16, 2nd(0x15) */
+ PHY_COMN_REG_CFG(0x016, 0xff, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x015, 0x80, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x017, 0x94, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x037, 0x43, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x038, 0x3f, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x042, 0xbb, PWR_MODE_HS_G2_SER_B),
+ PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x034, 0x35, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x034, 0x36, PWR_MODE_HS_G2_SER_B),
+ PHY_TRSV_REG_CFG(0x035, 0x5b, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x035, 0x5c, PWR_MODE_HS_G2_SER_B),
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B atfer PMC */
+static const struct samsung_ufs_phy_cfg exynos7_post_pwr_hs_cfg[] = {
+ PHY_COMN_REG_CFG(0x015, 0x00, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_HS_ANY),
+ END_UFS_PHY_CFG
+};
+
+static const struct samsung_ufs_phy_cfg *exynos7_ufs_phy_cfgs[CFG_TAG_MAX] = {
+ [CFG_PRE_INIT] = exynos7_pre_init_cfg,
+ [CFG_PRE_PWR_HS] = exynos7_pre_pwr_hs_cfg,
+ [CFG_POST_PWR_HS] = exynos7_post_pwr_hs_cfg,
+};
+
+static struct samsung_ufs_phy_drvdata exynos7_ufs_phy = {
+ .cfg = exynos7_ufs_phy_cfgs,
+ .isol = {
+ .offset = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL,
+ .mask = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK,
+ .en = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN,
+ },
+ .has_symbol_clk = 1,
+};
+
+#endif /* _PHY_EXYNOS7_UFS_H_ */
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
new file mode 100644
index 000000000000..9832599a0283
--- /dev/null
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS PHY driver for Samsung SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ * Author: Seungwon Jeon <essuuj@gmail.com>
+ * Author: Alim Akhtar <alim.akhtar@samsung.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "phy-samsung-ufs.h"
+
+#define for_each_phy_lane(phy, i) \
+ for (i = 0; i < (phy)->lane_cnt; i++)
+#define for_each_phy_cfg(cfg) \
+ for (; (cfg)->id; (cfg)++)
+
+#define PHY_DEF_LANE_CNT 1
+
+static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
+ const struct samsung_ufs_phy_cfg *cfg,
+ u8 lane)
+{
+ enum {LANE_0, LANE_1}; /* lane index */
+
+ switch (lane) {
+ case LANE_0:
+ writel(cfg->val, (phy)->reg_pma + cfg->off_0);
+ break;
+ case LANE_1:
+ if (cfg->id == PHY_TRSV_BLK)
+ writel(cfg->val, (phy)->reg_pma + cfg->off_1);
+ break;
+ }
+}
+
+static int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ const unsigned int timeout_us = 100000;
+ const unsigned int sleep_us = 10;
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(
+ ufs_phy->reg_pma + PHY_APB_ADDR(PHY_PLL_LOCK_STATUS),
+ val, (val & PHY_PLL_LOCK_BIT), sleep_us, timeout_us);
+ if (err) {
+ dev_err(ufs_phy->dev,
+ "failed to get phy pll lock acquisition %d\n", err);
+ goto out;
+ }
+
+ err = readl_poll_timeout(
+ ufs_phy->reg_pma + PHY_APB_ADDR(PHY_CDR_LOCK_STATUS),
+ val, (val & PHY_CDR_LOCK_BIT), sleep_us, timeout_us);
+ if (err)
+ dev_err(ufs_phy->dev,
+ "failed to get phy cdr lock acquisition %d\n", err);
+out:
+ return err;
+}
+
+static int samsung_ufs_phy_calibrate(struct phy *phy)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ struct samsung_ufs_phy_cfg **cfgs = ufs_phy->cfg;
+ const struct samsung_ufs_phy_cfg *cfg;
+ int err = 0;
+ int i;
+
+ if (unlikely(ufs_phy->ufs_phy_state < CFG_PRE_INIT ||
+ ufs_phy->ufs_phy_state >= CFG_TAG_MAX)) {
+ dev_err(ufs_phy->dev, "invalid phy config index %d\n", ufs_phy->ufs_phy_state);
+ return -EINVAL;
+ }
+
+ cfg = cfgs[ufs_phy->ufs_phy_state];
+ if (!cfg)
+ goto out;
+
+ for_each_phy_cfg(cfg) {
+ for_each_phy_lane(ufs_phy, i) {
+ samsung_ufs_phy_config(ufs_phy, cfg, i);
+ }
+ }
+
+ if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS)
+ err = samsung_ufs_phy_wait_for_lock_acq(phy);
+
+ /**
+ * In Samsung ufshci, PHY need to be calibrated at different
+ * stages / state mainly before Linkstartup, after Linkstartup,
+ * before power mode change and after power mode change.
+ * Below state machine to make sure to calibrate PHY in each
+ * state. Here after configuring PHY in a given state, will
+ * change the state to next state so that next state phy
+ * calibration value can be programed
+ */
+out:
+ switch (ufs_phy->ufs_phy_state) {
+ case CFG_PRE_INIT:
+ ufs_phy->ufs_phy_state = CFG_POST_INIT;
+ break;
+ case CFG_POST_INIT:
+ ufs_phy->ufs_phy_state = CFG_PRE_PWR_HS;
+ break;
+ case CFG_PRE_PWR_HS:
+ ufs_phy->ufs_phy_state = CFG_POST_PWR_HS;
+ break;
+ case CFG_POST_PWR_HS:
+ /* Change back to INIT state */
+ ufs_phy->ufs_phy_state = CFG_PRE_INIT;
+ break;
+ default:
+ dev_err(ufs_phy->dev, "wrong state for phy calibration\n");
+ }
+
+ return err;
+}
+
+static int samsung_ufs_phy_symbol_clk_init(struct samsung_ufs_phy *phy)
+{
+ int ret;
+
+ phy->tx0_symbol_clk = devm_clk_get(phy->dev, "tx0_symbol_clk");
+ if (IS_ERR(phy->tx0_symbol_clk)) {
+ dev_err(phy->dev, "failed to get tx0_symbol_clk clock\n");
+ return PTR_ERR(phy->tx0_symbol_clk);
+ }
+
+ phy->rx0_symbol_clk = devm_clk_get(phy->dev, "rx0_symbol_clk");
+ if (IS_ERR(phy->rx0_symbol_clk)) {
+ dev_err(phy->dev, "failed to get rx0_symbol_clk clock\n");
+ return PTR_ERR(phy->rx0_symbol_clk);
+ }
+
+ phy->rx1_symbol_clk = devm_clk_get(phy->dev, "rx1_symbol_clk");
+ if (IS_ERR(phy->rx1_symbol_clk)) {
+ dev_err(phy->dev, "failed to get rx1_symbol_clk clock\n");
+ return PTR_ERR(phy->rx1_symbol_clk);
+ }
+
+ ret = clk_prepare_enable(phy->tx0_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: tx0_symbol_clk enable failed %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(phy->rx0_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: rx0_symbol_clk enable failed %d\n", __func__, ret);
+ goto out_disable_tx0_clk;
+ }
+
+ ret = clk_prepare_enable(phy->rx1_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: rx1_symbol_clk enable failed %d\n", __func__, ret);
+ goto out_disable_rx0_clk;
+ }
+
+ return 0;
+
+out_disable_rx0_clk:
+ clk_disable_unprepare(phy->rx0_symbol_clk);
+out_disable_tx0_clk:
+ clk_disable_unprepare(phy->tx0_symbol_clk);
+out:
+ return ret;
+}
+
+static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy)
+{
+ int ret;
+
+ phy->ref_clk = devm_clk_get(phy->dev, "ref_clk");
+ if (IS_ERR(phy->ref_clk))
+ dev_err(phy->dev, "failed to get ref_clk clock\n");
+
+ ret = clk_prepare_enable(phy->ref_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: ref_clk enable failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ dev_dbg(phy->dev, "UFS MPHY ref_clk_rate = %ld\n", clk_get_rate(phy->ref_clk));
+
+ return 0;
+}
+
+static int samsung_ufs_phy_init(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+ int ret;
+
+ ss_phy->lane_cnt = phy->attrs.bus_width;
+ ss_phy->ufs_phy_state = CFG_PRE_INIT;
+
+ if (ss_phy->drvdata->has_symbol_clk) {
+ ret = samsung_ufs_phy_symbol_clk_init(ss_phy);
+ if (ret)
+ dev_err(ss_phy->dev, "failed to set ufs phy symbol clocks\n");
+ }
+
+ ret = samsung_ufs_phy_clks_init(ss_phy);
+ if (ret)
+ dev_err(ss_phy->dev, "failed to set ufs phy clocks\n");
+
+ ret = samsung_ufs_phy_calibrate(phy);
+ if (ret)
+ dev_err(ss_phy->dev, "ufs phy calibration failed\n");
+
+ return ret;
+}
+
+static int samsung_ufs_phy_power_on(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ samsung_ufs_phy_ctrl_isol(ss_phy, false);
+ return 0;
+}
+
+static int samsung_ufs_phy_power_off(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ samsung_ufs_phy_ctrl_isol(ss_phy, true);
+ return 0;
+}
+
+static int samsung_ufs_phy_set_mode(struct phy *generic_phy,
+ enum phy_mode mode, int submode)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(generic_phy);
+
+ ss_phy->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ ss_phy->mode = mode;
+
+ return 0;
+}
+
+static int samsung_ufs_phy_exit(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ clk_disable_unprepare(ss_phy->ref_clk);
+
+ if (ss_phy->drvdata->has_symbol_clk) {
+ clk_disable_unprepare(ss_phy->tx0_symbol_clk);
+ clk_disable_unprepare(ss_phy->rx0_symbol_clk);
+ clk_disable_unprepare(ss_phy->rx1_symbol_clk);
+ }
+
+ return 0;
+}
+
+static struct phy_ops samsung_ufs_phy_ops = {
+ .init = samsung_ufs_phy_init,
+ .exit = samsung_ufs_phy_exit,
+ .power_on = samsung_ufs_phy_power_on,
+ .power_off = samsung_ufs_phy_power_off,
+ .calibrate = samsung_ufs_phy_calibrate,
+ .set_mode = samsung_ufs_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id samsung_ufs_phy_match[];
+
+static int samsung_ufs_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct samsung_ufs_phy *phy;
+ struct phy *gen_phy;
+ struct phy_provider *phy_provider;
+ const struct samsung_ufs_phy_drvdata *drvdata;
+ int err = 0;
+
+ match = of_match_node(samsung_ufs_phy_match, dev->of_node);
+ if (!match) {
+ err = -EINVAL;
+ dev_err(dev, "failed to get match_node\n");
+ goto out;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ phy->reg_pma = devm_platform_ioremap_resource_byname(pdev, "phy-pma");
+ if (IS_ERR(phy->reg_pma)) {
+ err = PTR_ERR(phy->reg_pma);
+ goto out;
+ }
+
+ phy->reg_pmu = syscon_regmap_lookup_by_phandle(
+ dev->of_node, "samsung,pmu-syscon");
+ if (IS_ERR(phy->reg_pmu)) {
+ err = PTR_ERR(phy->reg_pmu);
+ dev_err(dev, "failed syscon remap for pmu\n");
+ goto out;
+ }
+
+ gen_phy = devm_phy_create(dev, NULL, &samsung_ufs_phy_ops);
+ if (IS_ERR(gen_phy)) {
+ err = PTR_ERR(gen_phy);
+ dev_err(dev, "failed to create PHY for ufs-phy\n");
+ goto out;
+ }
+
+ drvdata = match->data;
+ phy->dev = dev;
+ phy->drvdata = drvdata;
+ phy->cfg = (struct samsung_ufs_phy_cfg **)drvdata->cfg;
+ phy->isol = &drvdata->isol;
+ phy->lane_cnt = PHY_DEF_LANE_CNT;
+
+ phy_set_drvdata(gen_phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy-provider\n");
+ goto out;
+ }
+out:
+ return err;
+}
+
+static const struct of_device_id samsung_ufs_phy_match[] = {
+ {
+ .compatible = "samsung,exynos7-ufs-phy",
+ .data = &exynos7_ufs_phy,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_ufs_phy_match);
+
+static struct platform_driver samsung_ufs_phy_driver = {
+ .probe = samsung_ufs_phy_probe,
+ .driver = {
+ .name = "samsung-ufs-phy",
+ .of_match_table = samsung_ufs_phy_match,
+ },
+};
+module_platform_driver(samsung_ufs_phy_driver);
+MODULE_DESCRIPTION("Samsung SoC UFS PHY Driver");
+MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
+MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h
new file mode 100644
index 000000000000..5de78710524c
--- /dev/null
+++ b/drivers/phy/samsung/phy-samsung-ufs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS PHY driver for Samsung EXYNOS SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ * Author: Seungwon Jeon <essuuj@gmail.com>
+ * Author: Alim Akhtar <alim.akhtar@samsung.com>
+ *
+ */
+#ifndef _PHY_SAMSUNG_UFS_
+#define _PHY_SAMSUNG_UFS_
+
+#define PHY_COMN_BLK 1
+#define PHY_TRSV_BLK 2
+#define END_UFS_PHY_CFG { 0 }
+#define PHY_TRSV_CH_OFFSET 0x30
+#define PHY_APB_ADDR(off) ((off) << 2)
+
+#define PHY_COMN_REG_CFG(o, v, d) { \
+ .off_0 = PHY_APB_ADDR((o)), \
+ .off_1 = 0, \
+ .val = (v), \
+ .desc = (d), \
+ .id = PHY_COMN_BLK, \
+}
+
+#define PHY_TRSV_REG_CFG(o, v, d) { \
+ .off_0 = PHY_APB_ADDR((o)), \
+ .off_1 = PHY_APB_ADDR((o) + PHY_TRSV_CH_OFFSET), \
+ .val = (v), \
+ .desc = (d), \
+ .id = PHY_TRSV_BLK, \
+}
+
+/* UFS PHY registers */
+#define PHY_PLL_LOCK_STATUS 0x1e
+#define PHY_CDR_LOCK_STATUS 0x5e
+
+#define PHY_PLL_LOCK_BIT BIT(5)
+#define PHY_CDR_LOCK_BIT BIT(4)
+
+/* description for PHY calibration */
+enum {
+ /* applicable to any */
+ PWR_DESC_ANY = 0,
+ /* mode */
+ PWR_DESC_PWM = 1,
+ PWR_DESC_HS = 2,
+ /* series */
+ PWR_DESC_SER_A = 1,
+ PWR_DESC_SER_B = 2,
+ /* gear */
+ PWR_DESC_G1 = 1,
+ PWR_DESC_G2 = 2,
+ PWR_DESC_G3 = 3,
+ /* field mask */
+ MD_MASK = 0x3,
+ SR_MASK = 0x3,
+ GR_MASK = 0x7,
+};
+
+#define PWR_MODE_HS_G1_ANY PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_ANY)
+#define PWR_MODE_HS_G1_SER_A PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G1_SER_B PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_SER_B)
+#define PWR_MODE_HS_G2_ANY PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_ANY)
+#define PWR_MODE_HS_G2_SER_A PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G2_SER_B PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_SER_B)
+#define PWR_MODE_HS_G3_ANY PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_ANY)
+#define PWR_MODE_HS_G3_SER_A PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G3_SER_B PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_SER_B)
+#define PWR_MODE(g, s, m) ((((g) & GR_MASK) << 4) |\
+ (((s) & SR_MASK) << 2) | ((m) & MD_MASK))
+#define PWR_MODE_PWM_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_PWM)
+#define PWR_MODE_HS(g, s) ((((g) & GR_MASK) << 4) |\
+ (((s) & SR_MASK) << 2) | PWR_DESC_HS)
+#define PWR_MODE_HS_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_HS)
+#define PWR_MODE_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_ANY)
+/* PHY calibration point/state */
+enum {
+ CFG_PRE_INIT,
+ CFG_POST_INIT,
+ CFG_PRE_PWR_HS,
+ CFG_POST_PWR_HS,
+ CFG_TAG_MAX,
+};
+
+struct samsung_ufs_phy_cfg {
+ u32 off_0;
+ u32 off_1;
+ u32 val;
+ u8 desc;
+ u8 id;
+};
+
+struct samsung_ufs_phy_drvdata {
+ const struct samsung_ufs_phy_cfg **cfg;
+ struct pmu_isol {
+ u32 offset;
+ u32 mask;
+ u32 en;
+ } isol;
+ bool has_symbol_clk;
+};
+
+struct samsung_ufs_phy {
+ struct device *dev;
+ void __iomem *reg_pma;
+ struct regmap *reg_pmu;
+ struct clk *ref_clk;
+ struct clk *ref_clk_parent;
+ struct clk *tx0_symbol_clk;
+ struct clk *rx0_symbol_clk;
+ struct clk *rx1_symbol_clk;
+ const struct samsung_ufs_phy_drvdata *drvdata;
+ struct samsung_ufs_phy_cfg **cfg;
+ const struct pmu_isol *isol;
+ u8 lane_cnt;
+ int ufs_phy_state;
+ enum phy_mode mode;
+};
+
+static inline struct samsung_ufs_phy *get_samsung_ufs_phy(struct phy *phy)
+{
+ return (struct samsung_ufs_phy *)phy_get_drvdata(phy);
+}
+
+static inline void samsung_ufs_phy_ctrl_isol(
+ struct samsung_ufs_phy *phy, u32 isol)
+{
+ regmap_update_bits(phy->reg_pmu, phy->isol->offset,
+ phy->isol->mask, isol ? 0 : phy->isol->en);
+}
+
+#include "phy-exynos7-ufs.h"
+
+#endif /* _PHY_SAMSUNG_UFS_ */
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 090aa02e02de..a3ed3ff04690 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -255,7 +255,7 @@ static struct platform_driver samsung_usb2_phy_driver = {
};
module_platform_driver(samsung_usb2_phy_driver);
-MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC USB PHY driver");
+MODULE_DESCRIPTION("Samsung S5P/Exynos SoC USB PHY driver");
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 56bdea4b0bd9..2b3639cba51a 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -327,7 +327,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
if (IS_ERR(usbphyc->base))
return PTR_ERR(usbphyc->base);
- usbphyc->clk = devm_clk_get(dev, 0);
+ usbphyc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(usbphyc->clk)) {
ret = PTR_ERR(usbphyc->clk);
dev_err(dev, "clk get failed: %d\n", ret);
@@ -340,7 +340,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
return ret;
}
- usbphyc->rst = devm_reset_control_get(dev, 0);
+ usbphyc->rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(usbphyc->rst)) {
reset_control_assert(usbphyc->rst);
udelay(2);
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index 26f194779064..57adc08a89b2 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -82,17 +82,16 @@ static int dm816x_usb_phy_init(struct phy *x)
{
struct dm816x_usb_phy *phy = phy_get_drvdata(x);
unsigned int val;
- int error;
if (clk_get_rate(phy->refclk) != 24000000)
dev_warn(phy->dev, "nonstandard phy refclk\n");
/* Set PLL ref clock and put phys to sleep */
- error = regmap_update_bits(phy->syscon, phy->usb_ctrl,
- DM816X_USB_CTRL_PHYCLKSRC |
- DM816X_USB_CTRL_PHYSLEEP1 |
- DM816X_USB_CTRL_PHYSLEEP0,
- 0);
+ regmap_update_bits(phy->syscon, phy->usb_ctrl,
+ DM816X_USB_CTRL_PHYCLKSRC |
+ DM816X_USB_CTRL_PHYSLEEP1 |
+ DM816X_USB_CTRL_PHYSLEEP0,
+ 0);
regmap_read(phy->syscon, phy->usb_ctrl, &val);
if ((val & 3) != 0)
dev_info(phy->dev,
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index a87946589eb7..e9332c90f75f 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -337,7 +337,6 @@ static int ti_pipe3_power_on(struct phy *x)
{
u32 val;
u32 mask;
- int ret;
unsigned long rate;
struct ti_pipe3 *phy = phy_get_drvdata(x);
bool rx_pending = false;
@@ -355,8 +354,8 @@ static int ti_pipe3_power_on(struct phy *x)
rate = rate / 1000000;
mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK;
val = rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
- ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
- mask, val);
+ regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
+ mask, val);
/*
* For PCIe, TX and RX must be powered on simultaneously.
* For USB and SATA, TX must be powered on before RX
diff --git a/drivers/phy/xilinx/Kconfig b/drivers/phy/xilinx/Kconfig
new file mode 100644
index 000000000000..d8b0d46b2b4d
--- /dev/null
+++ b/drivers/phy/xilinx/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+#
+# PHY drivers for Xilinx platforms
+#
+
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
diff --git a/drivers/phy/xilinx/Makefile b/drivers/phy/xilinx/Makefile
new file mode 100644
index 000000000000..3f1f6a2a9b45
--- /dev/null
+++ b/drivers/phy/xilinx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
new file mode 100644
index 000000000000..2b0f921b6ee3
--- /dev/null
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -0,0 +1,993 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
+ *
+ * Copyright (C) 2018-2020 Xilinx Inc.
+ *
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This driver is tested for USB, SATA and Display Port currently.
+ * Other controllers PCIe and SGMII should also work but that is
+ * experimental as of now.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+/*
+ * Lane Registers
+ */
+
+/* TX De-emphasis parameters */
+#define L0_TX_ANA_TM_18 0x0048
+#define L0_TX_ANA_TM_118 0x01d8
+#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
+
+/* DN Resistor calibration code parameters */
+#define L0_TXPMA_ST_3 0x0b0c
+#define L0_DN_CALIB_CODE 0x3f
+
+/* PMA control parameters */
+#define L0_TXPMD_TM_45 0x0cb4
+#define L0_TXPMD_TM_48 0x0cc0
+#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
+#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
+#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
+#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
+
+/* PCS control parameters */
+#define L0_TM_DIG_6 0x106c
+#define L0_TM_DIS_DESCRAMBLE_DECODER 0x0f
+#define L0_TX_DIG_61 0x00f4
+#define L0_TM_DISABLE_SCRAMBLE_ENCODER 0x0f
+
+/* PLL Test Mode register parameters */
+#define L0_TM_PLL_DIG_37 0x2094
+#define L0_TM_COARSE_CODE_LIMIT 0x10
+
+/* PLL SSC step size offsets */
+#define L0_PLL_SS_STEPS_0_LSB 0x2368
+#define L0_PLL_SS_STEPS_1_MSB 0x236c
+#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
+#define L0_PLL_SS_STEP_SIZE_1 0x2374
+#define L0_PLL_SS_STEP_SIZE_2 0x2378
+#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237c
+#define L0_PLL_STATUS_READ_1 0x23e4
+
+/* SSC step size parameters */
+#define STEP_SIZE_0_MASK 0xff
+#define STEP_SIZE_1_MASK 0xff
+#define STEP_SIZE_2_MASK 0xff
+#define STEP_SIZE_3_MASK 0x3
+#define STEP_SIZE_SHIFT 8
+#define FORCE_STEP_SIZE 0x10
+#define FORCE_STEPS 0x20
+#define STEPS_0_MASK 0xff
+#define STEPS_1_MASK 0x07
+
+/* Reference clock selection parameters */
+#define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
+#define L0_REF_CLK_SEL_MASK 0x8f
+
+/* Calibration digital logic parameters */
+#define L3_TM_CALIB_DIG19 0xec4c
+#define L3_CALIB_DONE_STATUS 0xef14
+#define L3_TM_CALIB_DIG18 0xec48
+#define L3_TM_CALIB_DIG19_NSW 0x07
+#define L3_TM_CALIB_DIG18_NSW 0xe0
+#define L3_TM_OVERRIDE_NSW_CODE 0x20
+#define L3_CALIB_DONE 0x02
+#define L3_NSW_SHIFT 5
+#define L3_NSW_PIPE_SHIFT 4
+#define L3_NSW_CALIB_SHIFT 3
+
+#define PHY_REG_OFFSET 0x4000
+
+/*
+ * Global Registers
+ */
+
+/* Refclk selection parameters */
+#define PLL_REF_SEL(n) (0x10000 + (n) * 4)
+#define PLL_FREQ_MASK 0x1f
+#define PLL_STATUS_LOCKED 0x10
+
+/* Inter Connect Matrix parameters */
+#define ICM_CFG0 0x10010
+#define ICM_CFG1 0x10014
+#define ICM_CFG0_L0_MASK 0x07
+#define ICM_CFG0_L1_MASK 0x70
+#define ICM_CFG1_L2_MASK 0x07
+#define ICM_CFG2_L3_MASK 0x70
+#define ICM_CFG_SHIFT 4
+
+/* Inter Connect Matrix allowed protocols */
+#define ICM_PROTOCOL_PD 0x0
+#define ICM_PROTOCOL_PCIE 0x1
+#define ICM_PROTOCOL_SATA 0x2
+#define ICM_PROTOCOL_USB 0x3
+#define ICM_PROTOCOL_DP 0x4
+#define ICM_PROTOCOL_SGMII 0x5
+
+/* Test Mode common reset control parameters */
+#define TM_CMN_RST 0x10018
+#define TM_CMN_RST_EN 0x1
+#define TM_CMN_RST_SET 0x2
+#define TM_CMN_RST_MASK 0x3
+
+/* Bus width parameters */
+#define TX_PROT_BUS_WIDTH 0x10040
+#define RX_PROT_BUS_WIDTH 0x10044
+#define PROT_BUS_WIDTH_10 0x0
+#define PROT_BUS_WIDTH_20 0x1
+#define PROT_BUS_WIDTH_40 0x2
+#define PROT_BUS_WIDTH_SHIFT 2
+
+/* Number of GT lanes */
+#define NUM_LANES 4
+
+/* SIOU SATA control register */
+#define SATA_CONTROL_OFFSET 0x0100
+
+/* Total number of controllers */
+#define CONTROLLERS_PER_LANE 5
+
+/* Protocol Type parameters */
+#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
+#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
+#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
+#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
+#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
+#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
+#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
+#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
+#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
+#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
+#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
+#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
+#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
+#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
+
+/* Timeout values */
+#define TIMEOUT_US 1000
+
+struct xpsgtr_dev;
+
+/**
+ * struct xpsgtr_ssc - structure to hold SSC settings for a lane
+ * @refclk_rate: PLL reference clock frequency
+ * @pll_ref_clk: value to be written to register for corresponding ref clk rate
+ * @steps: number of steps of SSC (Spread Spectrum Clock)
+ * @step_size: step size of each step
+ */
+struct xpsgtr_ssc {
+ u32 refclk_rate;
+ u8 pll_ref_clk;
+ u32 steps;
+ u32 step_size;
+};
+
+/**
+ * struct xpsgtr_phy - representation of a lane
+ * @phy: pointer to the kernel PHY device
+ * @type: controller which uses this lane
+ * @lane: lane number
+ * @protocol: protocol in which the lane operates
+ * @skip_phy_init: skip phy_init() if true
+ * @dev: pointer to the xpsgtr_dev instance
+ * @refclk: reference clock index
+ */
+struct xpsgtr_phy {
+ struct phy *phy;
+ u8 type;
+ u8 lane;
+ u8 protocol;
+ bool skip_phy_init;
+ struct xpsgtr_dev *dev;
+ unsigned int refclk;
+};
+
+/**
+ * struct xpsgtr_dev - representation of a ZynMP GT device
+ * @dev: pointer to device
+ * @serdes: serdes base address
+ * @siou: siou base address
+ * @gtr_mutex: mutex for locking
+ * @phys: PHY lanes
+ * @refclk_sscs: spread spectrum settings for the reference clocks
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
+ */
+struct xpsgtr_dev {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *siou;
+ struct mutex gtr_mutex; /* mutex for locking */
+ struct xpsgtr_phy phys[NUM_LANES];
+ const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
+};
+
+/*
+ * Configuration Data
+ */
+
+/* lookup table to hold all settings needed for a ref clock frequency */
+static const struct xpsgtr_ssc ssc_lookup[] = {
+ { 19200000, 0x05, 608, 264020 },
+ { 20000000, 0x06, 634, 243454 },
+ { 24000000, 0x07, 760, 168973 },
+ { 26000000, 0x08, 824, 143860 },
+ { 27000000, 0x09, 856, 86551 },
+ { 38400000, 0x0a, 1218, 65896 },
+ { 40000000, 0x0b, 634, 243454 },
+ { 52000000, 0x0c, 824, 143860 },
+ { 100000000, 0x0d, 1058, 87533 },
+ { 108000000, 0x0e, 856, 86551 },
+ { 125000000, 0x0f, 992, 119497 },
+ { 135000000, 0x10, 1070, 55393 },
+ { 150000000, 0x11, 792, 187091 }
+};
+
+/*
+ * I/O Accessors
+ */
+
+static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
+{
+ return readl(gtr_dev->serdes + reg);
+}
+
+static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
+{
+ writel(value, gtr_dev->serdes + reg);
+}
+
+static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
+ u32 clr, u32 set)
+{
+ u32 value = xpsgtr_read(gtr_dev, reg);
+
+ value &= ~clr;
+ value |= set;
+ xpsgtr_write(gtr_dev, reg, value);
+}
+
+static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ return readl(addr);
+}
+
+static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
+ u32 reg, u32 value)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ writel(value, addr);
+}
+
+static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
+ u32 reg, u32 clr, u32 set)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ writel((readl(addr) & ~clr) | set, addr);
+}
+
+/*
+ * Hardware Configuration
+ */
+
+/* Wait for the PLL to lock (with a timeout). */
+static int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ unsigned int timeout = TIMEOUT_US;
+ int ret;
+
+ dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
+
+ while (1) {
+ u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
+
+ if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
+ ret = 0;
+ break;
+ }
+
+ if (--timeout == 0) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ udelay(1);
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(gtr_dev->dev,
+ "lane %u (type %u, protocol %u): PLL lock timeout\n",
+ gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
+
+ return ret;
+}
+
+/* Configure PLL and spread-sprectrum clock. */
+static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+{
+ const struct xpsgtr_ssc *ssc;
+ u32 step_size;
+
+ ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
+ step_size = ssc->step_size;
+
+ xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
+ PLL_FREQ_MASK, ssc->pll_ref_clk);
+
+ /* Enable lane clock sharing, if required */
+ if (gtr_phy->refclk != gtr_phy->lane) {
+ /* Lane3 Ref Clock Selection Register */
+ xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
+ L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
+ }
+
+ /* SSC step size [7:0] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
+ STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
+
+ /* SSC step size [15:8] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
+ STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
+
+ /* SSC step size [23:16] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
+ STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
+
+ /* SSC steps [7:0] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
+ STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
+
+ /* SSC steps [10:8] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
+ STEPS_1_MASK,
+ (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
+
+ /* SSC step size [24:25] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
+ STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
+ FORCE_STEP_SIZE | FORCE_STEPS);
+}
+
+/* Configure the lane protocol. */
+static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u8 protocol = gtr_phy->protocol;
+
+ switch (gtr_phy->lane) {
+ case 0:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
+ break;
+ case 1:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
+ protocol << ICM_CFG_SHIFT);
+ break;
+ case 2:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
+ break;
+ case 3:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
+ protocol << ICM_CFG_SHIFT);
+ break;
+ default:
+ /* We already checked 0 <= lane <= 3 */
+ break;
+ }
+}
+
+/* Bypass (de)scrambler and 8b/10b decoder and encoder. */
+static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
+{
+ xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
+ xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
+}
+
+/* DP-specific initialization. */
+static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
+{
+ xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
+ L0_TXPMD_TM_45_OVER_DP_MAIN |
+ L0_TXPMD_TM_45_ENABLE_DP_MAIN |
+ L0_TXPMD_TM_45_OVER_DP_POST1 |
+ L0_TXPMD_TM_45_OVER_DP_POST2 |
+ L0_TXPMD_TM_45_ENABLE_DP_POST2);
+ xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
+ L0_TX_ANA_TM_118_FORCE_17_0);
+}
+
+/* SATA-specific initialization. */
+static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+
+ xpsgtr_bypass_scrambler_8b10b(gtr_phy);
+
+ writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
+}
+
+/* SGMII-specific initialization. */
+static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+
+ /* Set SGMII protocol TX and RX bus width to 10 bits. */
+ xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
+ PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
+ PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+
+ xpsgtr_bypass_scrambler_8b10b(gtr_phy);
+}
+
+/* Configure TX de-emphasis and margining for DP. */
+static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
+ unsigned int voltage)
+{
+ static const u8 voltage_swing[4][4] = {
+ { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff }
+ };
+ static const u8 pre_emphasis[4][4] = {
+ { 0x02, 0x02, 0x02, 0x02 },
+ { 0x01, 0x01, 0x01, 0xff },
+ { 0x00, 0x00, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff }
+ };
+
+ xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
+ xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
+}
+
+/*
+ * PHY Operations
+ */
+
+static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
+{
+ /*
+ * As USB may save the snapshot of the states during hibernation, doing
+ * phy_init() will put the USB controller into reset, resulting in the
+ * losing of the saved snapshot. So try to avoid phy_init() for USB
+ * except when gtr_phy->skip_phy_init is false (this happens when FPD is
+ * shutdown during suspend or when gt lane is changed from current one)
+ */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
+ return false;
+ else
+ return true;
+}
+
+/*
+ * There is a functional issue in the GT. The TX termination resistance can be
+ * out of spec due to a issue in the calibration logic. This is the workaround
+ * to fix it, required for XCZU9EG silicon.
+ */
+static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 timeout = TIMEOUT_US;
+ u32 nsw;
+
+ /* Enabling Test Mode control for CMN Rest */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ /* Set Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
+
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
+
+ /*
+ * As a part of work around sequence for PMOS calibration fix,
+ * we need to configure any lane ICM_CFG to valid protocol. This
+ * will deassert the CMN_Resetn signal.
+ */
+ xpsgtr_lane_set_protocol(gtr_phy);
+
+ /* Clear Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ dev_dbg(gtr_dev->dev, "calibrating...\n");
+
+ do {
+ u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
+
+ if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "calibration time out\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ } while (timeout > 0);
+
+ dev_dbg(gtr_dev->dev, "calibration done\n");
+
+ /* Reading NMOS Register Code */
+ nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
+
+ /* Set Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
+
+ /* Writing NMOS register values back [5:3] */
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
+
+ /* Writing NMOS register value [2:0] */
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
+ ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
+ (1 << L3_NSW_PIPE_SHIFT));
+
+ /* Clear Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ return 0;
+}
+
+static int xpsgtr_phy_init(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ int ret = 0;
+
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Skip initialization if not required. */
+ if (!xpsgtr_phy_init_required(gtr_phy))
+ goto out;
+
+ if (gtr_dev->tx_term_fix) {
+ ret = xpsgtr_phy_tx_term_fix(gtr_phy);
+ if (ret < 0)
+ goto out;
+
+ gtr_dev->tx_term_fix = false;
+ }
+
+ /* Enable coarse code saturation limiting logic. */
+ xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
+
+ /*
+ * Configure the PLL, the lane protocol, and perform protocol-specific
+ * initialization.
+ */
+ xpsgtr_configure_pll(gtr_phy);
+ xpsgtr_lane_set_protocol(gtr_phy);
+
+ switch (gtr_phy->protocol) {
+ case ICM_PROTOCOL_DP:
+ xpsgtr_phy_init_dp(gtr_phy);
+ break;
+
+ case ICM_PROTOCOL_SATA:
+ xpsgtr_phy_init_sata(gtr_phy);
+ break;
+
+ case ICM_PROTOCOL_SGMII:
+ xpsgtr_phy_init_sgmii(gtr_phy);
+ break;
+ }
+
+out:
+ mutex_unlock(&gtr_dev->gtr_mutex);
+ return ret;
+}
+
+static int xpsgtr_phy_exit(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ gtr_phy->skip_phy_init = false;
+
+ return 0;
+}
+
+static int xpsgtr_phy_power_on(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ int ret = 0;
+
+ /*
+ * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
+ * cumulating waits for both lanes. The user is expected to initialize
+ * lane 0 last.
+ */
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
+ gtr_phy->type == XPSGTR_TYPE_DP_0)
+ ret = xpsgtr_wait_pll_lock(phy);
+
+ return ret;
+}
+
+static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP)
+ return 0;
+
+ xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
+
+ return 0;
+}
+
+static const struct phy_ops xpsgtr_phyops = {
+ .init = xpsgtr_phy_init,
+ .exit = xpsgtr_phy_exit,
+ .power_on = xpsgtr_phy_power_on,
+ .configure = xpsgtr_phy_configure,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * OF Xlate Support
+ */
+
+/* Set the lane type and protocol based on the PHY type and instance number. */
+static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
+ unsigned int phy_instance)
+{
+ unsigned int num_phy_types;
+ const int *phy_types;
+
+ switch (phy_type) {
+ case PHY_TYPE_SATA: {
+ static const int types[] = {
+ XPSGTR_TYPE_SATA_0,
+ XPSGTR_TYPE_SATA_1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_SATA;
+ break;
+ }
+ case PHY_TYPE_USB3: {
+ static const int types[] = {
+ XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_USB1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_USB;
+ break;
+ }
+ case PHY_TYPE_DP: {
+ static const int types[] = {
+ XPSGTR_TYPE_DP_0,
+ XPSGTR_TYPE_DP_1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_DP;
+ break;
+ }
+ case PHY_TYPE_PCIE: {
+ static const int types[] = {
+ XPSGTR_TYPE_PCIE_0,
+ XPSGTR_TYPE_PCIE_1,
+ XPSGTR_TYPE_PCIE_2,
+ XPSGTR_TYPE_PCIE_3,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_PCIE;
+ break;
+ }
+ case PHY_TYPE_SGMII: {
+ static const int types[] = {
+ XPSGTR_TYPE_SGMII0,
+ XPSGTR_TYPE_SGMII1,
+ XPSGTR_TYPE_SGMII2,
+ XPSGTR_TYPE_SGMII3,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_SGMII;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (phy_instance >= num_phy_types)
+ return -EINVAL;
+
+ gtr_phy->type = phy_types[phy_instance];
+ return 0;
+}
+
+/*
+ * Valid combinations of controllers and lanes (Interconnect Matrix).
+ */
+static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
+ { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
+ { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
+ { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
+ { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+};
+
+/* Translate OF phandle and args to PHY instance. */
+static struct phy *xpsgtr_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ struct xpsgtr_phy *gtr_phy;
+ unsigned int phy_instance;
+ unsigned int phy_lane;
+ unsigned int phy_type;
+ unsigned int refclk;
+ unsigned int i;
+ int ret;
+
+ if (args->args_count != 4) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Get the PHY parameters from the OF arguments and derive the lane
+ * type.
+ */
+ phy_lane = args->args[0];
+ if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
+ dev_err(dev, "Invalid lane number %u\n", phy_lane);
+ return ERR_PTR(-ENODEV);
+ }
+
+ gtr_phy = &gtr_dev->phys[phy_lane];
+ phy_type = args->args[1];
+ phy_instance = args->args[2];
+
+ ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
+ return ERR_PTR(ret);
+ }
+
+ refclk = args->args[3];
+ if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
+ !gtr_dev->refclk_sscs[refclk]) {
+ dev_err(dev, "Invalid reference clock number %u\n", refclk);
+ return ERR_PTR(-EINVAL);
+ }
+
+ gtr_phy->refclk = refclk;
+
+ /*
+ * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
+ * is allowed to operate on the lane.
+ */
+ for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
+ if (icm_matrix[phy_lane][i] == gtr_phy->type)
+ return gtr_phy->phy;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xpsgtr_suspend(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ /* Save the snapshot ICM_CFG registers. */
+ gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+ return 0;
+}
+
+static int __maybe_unused xpsgtr_resume(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ unsigned int icm_cfg0, icm_cfg1;
+ unsigned int i;
+ bool skip_phy_init;
+
+ icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+ /* Return if no GT lanes got configured before suspend. */
+ if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
+ return 0;
+
+ /* Check if the ICM configurations changed after suspend. */
+ if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
+ icm_cfg1 == gtr_dev->saved_icm_cfg1)
+ skip_phy_init = true;
+ else
+ skip_phy_init = false;
+
+ /* Update the skip_phy_init for all gtr_phy instances. */
+ for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
+ gtr_dev->phys[i].skip_phy_init = skip_phy_init;
+
+ return 0;
+}
+
+static const struct dev_pm_ops xpsgtr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+};
+
+/*
+ * Probe & Platform Driver
+ */
+
+static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+{
+ unsigned int refclk;
+
+ for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
+ unsigned long rate;
+ unsigned int i;
+ struct clk *clk;
+ char name[8];
+
+ snprintf(name, sizeof(name), "ref%u", refclk);
+ clk = devm_clk_get_optional(gtr_dev->dev, name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(gtr_dev->dev,
+ "Failed to get reference clock %u: %ld\n",
+ refclk, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ if (!clk)
+ continue;
+
+ /*
+ * Get the spread spectrum (SSC) settings for the reference
+ * clock rate.
+ */
+ rate = clk_get_rate(clk);
+
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ if (rate == ssc_lookup[i].refclk_rate) {
+ gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ssc_lookup)) {
+ dev_err(gtr_dev->dev,
+ "Invalid rate %lu for reference clock %u\n",
+ rate, refclk);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int xpsgtr_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ unsigned int port;
+ int ret;
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+ if (!gtr_dev)
+ return -ENOMEM;
+
+ gtr_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gtr_dev);
+
+ mutex_init(&gtr_dev->gtr_mutex);
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
+ gtr_dev->tx_term_fix =
+ of_property_read_bool(np, "xlnx,tx-termination-fix");
+
+ /* Acquire resources. */
+ gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
+ if (IS_ERR(gtr_dev->serdes))
+ return PTR_ERR(gtr_dev->serdes);
+
+ gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
+ if (IS_ERR(gtr_dev->siou))
+ return PTR_ERR(gtr_dev->siou);
+
+ ret = xpsgtr_get_ref_clocks(gtr_dev);
+ if (ret)
+ return ret;
+
+ /* Create PHYs. */
+ for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
+ struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
+ struct phy *phy;
+
+ gtr_phy->lane = port;
+ gtr_phy->dev = gtr_dev;
+
+ phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ gtr_phy->phy = phy;
+ phy_set_drvdata(phy, gtr_phy);
+ }
+
+ /* Register the PHY provider. */
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+ return PTR_ERR(provider);
+ }
+ return 0;
+}
+
+static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+ .pm = &xpsgtr_pm_ops,
+ },
+};
+
+module_platform_driver(xpsgtr_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index b5871612613b..7c53ffae9f50 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -4,22 +4,22 @@
#
#Bus Objs
-soundwire-bus-objs := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
+soundwire-bus-y := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
sysfs_slave.o sysfs_slave_dpn.o
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
ifdef CONFIG_DEBUG_FS
-soundwire-bus-objs += debugfs.o
+soundwire-bus-y += debugfs.o
endif
#Cadence Objs
-soundwire-cadence-objs := cadence_master.o
+soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-objs := intel.o intel_init.o
+soundwire-intel-y := intel.o intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
#Qualcomm driver
-soundwire-qcom-objs := qcom.o
+soundwire-qcom-y := qcom.o
obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 24ba77226376..e6e0fb9a81b4 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -863,13 +863,13 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
if (!slave->dev_num)
continue;
- /* Identify if Slave(s) are available on Bus */
- is_slave = true;
-
if (slave->status != SDW_SLAVE_ATTACHED &&
slave->status != SDW_SLAVE_ALERT)
continue;
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
slave_mode = sdw_get_clk_stop_mode(slave);
slave->curr_clk_stop_mode = slave_mode;
@@ -900,6 +900,10 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
return ret;
}
+ /* Don't need to inform slaves if there is no slave attached */
+ if (!is_slave)
+ return ret;
+
/* Inform slaves that prep is done */
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
@@ -985,13 +989,13 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
if (!slave->dev_num)
continue;
- /* Identify if Slave(s) are available on Bus */
- is_slave = true;
-
if (slave->status != SDW_SLAVE_ATTACHED &&
slave->status != SDW_SLAVE_ALERT)
continue;
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
mode = slave->curr_clk_stop_mode;
if (mode == SDW_CLK_STOP_MODE1) {
@@ -1016,6 +1020,13 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
if (is_slave && !simple_clk_stop)
sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+ /*
+ * Don't need to call slave callback function if there is no slave
+ * attached
+ */
+ if (!is_slave)
+ return 0;
+
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
continue;
@@ -1059,12 +1070,119 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
return ret;
}
+static int sdw_slave_set_frequency(struct sdw_slave *slave)
+{
+ u32 mclk_freq = slave->bus->prop.mclk_freq;
+ u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
+ unsigned int scale;
+ u8 scale_index;
+ u8 base;
+ int ret;
+
+ /*
+ * frequency base and scale registers are required for SDCA
+ * devices. They may also be used for 1.2+/non-SDCA devices,
+ * but we will need a DisCo property to cover this case
+ */
+ if (!slave->id.class_id)
+ return 0;
+
+ if (!mclk_freq) {
+ dev_err(&slave->dev,
+ "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map base frequency using Table 89 of SoundWire 1.2 spec.
+ * The order of the tests just follows the specification, this
+ * is not a selection between possible values or a search for
+ * the best value but just a mapping. Only one case per platform
+ * is relevant.
+ * Some BIOS have inconsistent values for mclk_freq but a
+ * correct root so we force the mclk_freq to avoid variations.
+ */
+ if (!(19200000 % mclk_freq)) {
+ mclk_freq = 19200000;
+ base = SDW_SCP_BASE_CLOCK_19200000_HZ;
+ } else if (!(24000000 % mclk_freq)) {
+ mclk_freq = 24000000;
+ base = SDW_SCP_BASE_CLOCK_24000000_HZ;
+ } else if (!(24576000 % mclk_freq)) {
+ mclk_freq = 24576000;
+ base = SDW_SCP_BASE_CLOCK_24576000_HZ;
+ } else if (!(22579200 % mclk_freq)) {
+ mclk_freq = 22579200;
+ base = SDW_SCP_BASE_CLOCK_22579200_HZ;
+ } else if (!(32000000 % mclk_freq)) {
+ mclk_freq = 32000000;
+ base = SDW_SCP_BASE_CLOCK_32000000_HZ;
+ } else {
+ dev_err(&slave->dev,
+ "Unsupported clock base, mclk %d\n",
+ mclk_freq);
+ return -EINVAL;
+ }
+
+ if (mclk_freq % curr_freq) {
+ dev_err(&slave->dev,
+ "mclk %d is not multiple of bus curr_freq %d\n",
+ mclk_freq, curr_freq);
+ return -EINVAL;
+ }
+
+ scale = mclk_freq / curr_freq;
+
+ /*
+ * map scale to Table 90 of SoundWire 1.2 spec - and check
+ * that the scale is a power of two and maximum 64
+ */
+ scale_index = ilog2(scale);
+
+ if (BIT(scale_index) != scale || scale_index > 6) {
+ dev_err(&slave->dev,
+ "No match found for scale %d, bus mclk %d curr_freq %d\n",
+ scale, mclk_freq, curr_freq);
+ return -EINVAL;
+ }
+ scale_index++;
+
+ ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
+ return ret;
+ }
+
+ /* initialize scale for both banks */
+ ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
+ return ret;
+ }
+ ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
+ if (ret < 0)
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
+
+ dev_dbg(&slave->dev,
+ "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
+ base, scale_index, mclk_freq, curr_freq);
+
+ return ret;
+}
+
static int sdw_initialize_slave(struct sdw_slave *slave)
{
struct sdw_slave_prop *prop = &slave->prop;
int ret;
u8 val;
+ ret = sdw_slave_set_frequency(slave);
+ if (ret < 0)
+ return ret;
+
/*
* Set bus clash, parity and SCP implementation
* defined interrupt mask
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index de9a671802b8..6fba55898cf0 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -20,14 +20,16 @@
static const struct sdw_device_id *
sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
{
- const struct sdw_device_id *id = drv->id_table;
+ const struct sdw_device_id *id;
- while (id && id->mfg_id) {
+ for (id = drv->id_table; id && id->mfg_id; id++)
if (slave->id.mfg_id == id->mfg_id &&
- slave->id.part_id == id->part_id)
+ slave->id.part_id == id->part_id &&
+ (!id->sdw_version ||
+ slave->id.sdw_version == id->sdw_version) &&
+ (!id->class_id ||
+ slave->id.class_id == id->class_id))
return id;
- id++;
- }
return NULL;
}
@@ -49,10 +51,11 @@ static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
{
- /* modalias is sdw:m<mfg_id>p<part_id> */
+ /* modalias is sdw:m<mfg_id>p<part_id>v<version>c<class_id> */
- return snprintf(buf, size, "sdw:m%04Xp%04X\n",
- slave->id.mfg_id, slave->id.part_id);
+ return snprintf(buf, size, "sdw:m%04Xp%04Xv%02Xc%02X\n",
+ slave->id.mfg_id, slave->id.part_id,
+ slave->id.sdw_version, slave->id.class_id);
}
int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 9ea87538b9ef..24eafe0aa1c3 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -17,6 +17,7 @@
#include <linux/soundwire/sdw.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <linux/workqueue.h>
#include "bus.h"
#include "cadence_master.h"
@@ -790,7 +791,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
CDNS_MCP_INT_SLAVE_MASK, 0);
int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
- ret = IRQ_WAKE_THREAD;
+ schedule_work(&cdns->work);
}
cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
@@ -799,13 +800,15 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
EXPORT_SYMBOL(sdw_cdns_irq);
/**
- * sdw_cdns_thread() - Cadence irq thread handler
- * @irq: irq number
- * @dev_id: irq context
+ * To update slave status in a work since we will need to handle
+ * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
+ * process.
+ * @work: cdns worker thread
*/
-irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
+static void cdns_update_slave_status_work(struct work_struct *work)
{
- struct sdw_cdns *cdns = dev_id;
+ struct sdw_cdns *cdns =
+ container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
dev_dbg_ratelimited(cdns->dev, "Slave status change\n");
@@ -822,9 +825,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
- return IRQ_HANDLED;
}
-EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
@@ -1427,6 +1428,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
init_completion(&cdns->tx_complete);
cdns->bus.port_ops = &cdns_port_ops;
+ INIT_WORK(&cdns->work, cdns_update_slave_status_work);
return 0;
}
EXPORT_SYMBOL(sdw_cdns_probe);
@@ -1437,25 +1439,49 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_cdns_dma_data *dma;
- dma = kzalloc(sizeof(*dma), GFP_KERNEL);
- if (!dma)
- return -ENOMEM;
+ if (stream) {
+ /* first paranoia check */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dma = dai->playback_dma_data;
+ else
+ dma = dai->capture_dma_data;
+
+ if (dma) {
+ dev_err(dai->dev,
+ "dma_data already allocated for dai %s\n",
+ dai->name);
+ return -EINVAL;
+ }
- if (pcm)
- dma->stream_type = SDW_STREAM_PCM;
- else
- dma->stream_type = SDW_STREAM_PDM;
+ /* allocate and set dma info */
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
- dma->bus = &cdns->bus;
- dma->link_id = cdns->instance;
+ if (pcm)
+ dma->stream_type = SDW_STREAM_PCM;
+ else
+ dma->stream_type = SDW_STREAM_PDM;
- dma->stream = stream;
+ dma->bus = &cdns->bus;
+ dma->link_id = cdns->instance;
- if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = dma;
- else
- dai->capture_dma_data = dma;
+ dma->stream = stream;
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_dma_data = dma;
+ else
+ dai->capture_dma_data = dma;
+ } else {
+ /* for NULL stream we release allocated dma_data */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ kfree(dai->playback_dma_data);
+ dai->playback_dma_data = NULL;
+ } else {
+ kfree(dai->capture_dma_data);
+ dai->capture_dma_data = NULL;
+ }
+ }
return 0;
}
EXPORT_SYMBOL(cdns_set_sdw_stream);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index b410656f8194..7638858397df 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -129,6 +129,10 @@ struct sdw_cdns {
bool link_up;
unsigned int msg_count;
+
+ struct work_struct work;
+
+ struct list_head list;
};
#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index c7422740edd4..a283670659a9 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
+#include <linux/pm_runtime.h>
#include <sound/soc.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
@@ -46,7 +47,8 @@
#define SDW_SHIM_LCTL_SPA BIT(0)
#define SDW_SHIM_LCTL_CPA BIT(8)
-#define SDW_SHIM_SYNC_SYNCPRD_VAL 0x176F
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
@@ -92,23 +94,12 @@
#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
-#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
-
enum intel_pdi_type {
INTEL_PDI_IN = 0,
INTEL_PDI_OUT = 1,
INTEL_PDI_BD = 2,
};
-struct sdw_intel {
- struct sdw_cdns cdns;
- int instance;
- struct sdw_intel_link_res *link_res;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
-};
-
#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
/*
@@ -134,40 +125,33 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
writew(value, base + offset);
}
-static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
+static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
{
int timeout = 10;
u32 reg_read;
- writel(value, base + offset);
do {
reg_read = readl(base + offset);
- if (!(reg_read & mask))
+ if ((reg_read & mask) == target)
return 0;
timeout--;
- udelay(50);
+ usleep_range(50, 100);
} while (timeout != 0);
return -EAGAIN;
}
-static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
{
- int timeout = 10;
- u32 reg_read;
-
writel(value, base + offset);
- do {
- reg_read = readl(base + offset);
- if (reg_read & mask)
- return 0;
-
- timeout--;
- udelay(50);
- } while (timeout != 0);
+ return intel_wait_bit(base, offset, mask, 0);
+}
- return -EAGAIN;
+static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+ writel(value, base + offset);
+ return intel_wait_bit(base, offset, mask, mask);
}
/*
@@ -290,8 +274,46 @@ static int intel_link_power_up(struct sdw_intel *sdw)
{
unsigned int link_id = sdw->instance;
void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+ struct sdw_master_prop *prop = &bus->prop;
int spa_mask, cpa_mask;
- int link_control, ret;
+ int link_control;
+ int ret = 0;
+ u32 syncprd;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /*
+ * The hardware relies on an internal counter, typically 4kHz,
+ * to generate the SoundWire SSP - which defines a 'safe'
+ * synchronization point between commands and audio transport
+ * and allows for multi link synchronization. The SYNCPRD value
+ * is only dependent on the oscillator clock provided to
+ * the IP, so adjust based on _DSD properties reported in DSDT
+ * tables. The values reported are based on either 24MHz
+ * (CNL/CML) or 38.4 MHz (ICL/TGL+).
+ */
+ if (prop->mclk_freq % 6000000)
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
+ else
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
+
+ if (!*shim_mask) {
+ /* we first need to program the SyncPRD/CPU registers */
+ dev_dbg(sdw->cdns.dev,
+ "%s: first link up, programming SYNCPRD\n", __func__);
+
+ /* set SyncPRD period */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= (syncprd <<
+ SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
+
+ /* Set SyncCPU bit */
+ sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+ }
/* Link power up sequence */
link_control = intel_readl(shim, SDW_SHIM_LCTL);
@@ -300,66 +322,218 @@ static int intel_link_power_up(struct sdw_intel *sdw)
link_control |= spa_mask;
ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
+ goto out;
+ }
+
+ if (!*shim_mask) {
+ /* SyncCPU will change once link is active */
+ ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
+ SDW_SHIM_SYNC_SYNCCPU, 0);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev,
+ "Failed to set SHIM_SYNC: %d\n", ret);
+ goto out;
+ }
+ }
+
+ *shim_mask |= BIT(link_id);
sdw->cdns.link_up = true;
- return 0;
+out:
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
}
-static int intel_shim_init(struct sdw_intel *sdw)
+/* this needs to be called with shim_lock */
+static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int sync_reg, ret;
- u16 ioctl = 0, act = 0;
+ u16 ioctl;
- /* Initialize Shim */
- ioctl |= SDW_SHIM_IOCTL_BKE;
+ /* Switch to MIP from Glue logic */
+ ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+
+ ioctl &= ~(SDW_SHIM_IOCTL_DOE);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_WPDD;
+ ioctl &= ~(SDW_SHIM_IOCTL_DO);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_DO;
+ ioctl |= (SDW_SHIM_IOCTL_MIF);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_DOE;
+ ioctl &= ~(SDW_SHIM_IOCTL_BKE);
+ ioctl &= ~(SDW_SHIM_IOCTL_COE);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- /* Switch to MIP from Glue logic */
- ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+ /* at this point Master IP has full control of the I/Os */
+}
- ioctl &= ~(SDW_SHIM_IOCTL_DOE);
+/* this needs to be called with shim_lock */
+static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u16 ioctl;
+
+ /* Glue logic */
+ ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+ ioctl |= SDW_SHIM_IOCTL_BKE;
+ ioctl |= SDW_SHIM_IOCTL_COE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl &= ~(SDW_SHIM_IOCTL_DO);
+ ioctl &= ~(SDW_SHIM_IOCTL_MIF);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= (SDW_SHIM_IOCTL_MIF);
+ /* at this point Integration Glue has full control of the I/Os */
+}
+
+static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ unsigned int link_id = sdw->instance;
+ int ret = 0;
+ u16 ioctl = 0, act = 0;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /* Initialize Shim */
+ ioctl |= SDW_SHIM_IOCTL_BKE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl &= ~(SDW_SHIM_IOCTL_BKE);
- ioctl &= ~(SDW_SHIM_IOCTL_COE);
+ ioctl |= SDW_SHIM_IOCTL_WPDD;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+ ioctl |= SDW_SHIM_IOCTL_DO;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+
+ ioctl |= SDW_SHIM_IOCTL_DOE;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+
+ intel_shim_glue_to_master_ip(sdw);
act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS);
act |= SDW_SHIM_CTMCTL_DACTQE;
act |= SDW_SHIM_CTMCTL_DODS;
intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
+ usleep_range(10, 15);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
+static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ unsigned int link_id = sdw->instance;
+ u16 wake_en, wake_sts;
+
+ mutex_lock(sdw->link_res->shim_lock);
+ wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
+
+ if (wake_enable) {
+ /* Enable the wakeup */
+ wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
+ } else {
+ /* Disable the wake up interrupt */
+ wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
+
+ /* Clear wake status */
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+ wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
+ }
+ mutex_unlock(sdw->link_res->shim_lock);
+}
+
+static int __maybe_unused intel_link_power_down(struct sdw_intel *sdw)
+{
+ int link_control, spa_mask, cpa_mask;
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ int ret = 0;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ intel_shim_master_ip_to_glue(sdw);
+
+ /* Link power down sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+ spa_mask = ~(SDW_SHIM_LCTL_SPA << link_id);
+ cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
+ link_control &= spa_mask;
+
+ ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+
+ if (!(*shim_mask & BIT(link_id)))
+ dev_err(sdw->cdns.dev,
+ "%s: Unbalanced power-up/down calls\n", __func__);
+
+ *shim_mask &= ~BIT(link_id);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ if (ret < 0)
+ return ret;
+
+ sdw->cdns.link_up = false;
+ return 0;
+}
+
+static void intel_shim_sync_arm(struct sdw_intel *sdw)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /* update SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+}
+
+static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ u32 sync_reg;
+ int ret;
- /* Now set SyncPRD period */
+ /* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- sync_reg |= (SDW_SHIM_SYNC_SYNCPRD_VAL <<
- SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
- /* Set SyncCPU bit */
- sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ /*
+ * Set SyncGO bit to synchronously trigger a bank switch for
+ * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
+ * the Masters.
+ */
+ sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+
ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCCPU);
+ SDW_SHIM_SYNC_SYNCGO);
+
if (ret < 0)
- dev_err(sdw->cdns.dev, "Failed to set sync period: %d\n", ret);
+ dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
return ret;
}
@@ -577,17 +751,12 @@ static int intel_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->link_res->shim;
- int sync_reg;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
- /* Read SYNC register */
- sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance;
- intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+ intel_shim_sync_arm(sdw);
return 0;
}
@@ -603,6 +772,8 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
if (!bus->multi_link)
return 0;
+ mutex_lock(sdw->link_res->shim_lock);
+
/* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
@@ -614,18 +785,15 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
*
* So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
*/
- if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK))
- return 0;
+ if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
+ ret = 0;
+ goto unlock;
+ }
- /*
- * Set SyncGO bit to synchronously trigger a bank switch for
- * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
- * the Masters.
- */
- sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+ ret = intel_shim_sync_go_unlocked(sdw);
+unlock:
+ mutex_unlock(sdw->link_res->shim_lock);
- ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCGO);
if (ret < 0)
dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
@@ -636,57 +804,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static int sdw_stream_setup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct sdw_stream_runtime *sdw_stream = NULL;
- char *name;
- int i, ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- name = kasprintf(GFP_KERNEL, "%s-Playback", dai->name);
- else
- name = kasprintf(GFP_KERNEL, "%s-Capture", dai->name);
-
- if (!name)
- return -ENOMEM;
-
- sdw_stream = sdw_alloc_stream(name);
- if (!sdw_stream) {
- dev_err(dai->dev, "alloc stream failed for DAI %s", dai->name);
- ret = -ENOMEM;
- goto error;
- }
-
- /* Set stream pointer on CPU DAI */
- ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
- if (ret < 0) {
- dev_err(dai->dev, "failed to set stream pointer on cpu dai %s",
- dai->name);
- goto release_stream;
- }
-
- /* Set stream pointer on all CODEC DAIs */
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(asoc_rtd_to_codec(rtd, i), sdw_stream,
- substream->stream);
- if (ret < 0) {
- dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
- asoc_rtd_to_codec(rtd, i)->name);
- goto release_stream;
- }
- }
-
- return 0;
-
-release_stream:
- sdw_release_stream(sdw_stream);
-error:
- kfree(name);
- return ret;
-}
-
static int intel_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -694,8 +811,7 @@ static int intel_startup(struct snd_pcm_substream *substream,
* TODO: add pm_runtime support here, the startup callback
* will make sure the IP is 'active'
*/
-
- return sdw_stream_setup(substream, dai);
+ return 0;
}
static int intel_hw_params(struct snd_pcm_substream *substream,
@@ -863,23 +979,13 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
- kfree(dma->stream->name);
- sdw_release_stream(dma->stream);
-
return 0;
}
static void intel_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct sdw_cdns_dma_data *dma;
- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma)
- return;
-
- snd_soc_dai_set_dma_data(dai, substream, NULL);
- kfree(dma);
}
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
@@ -894,6 +1000,22 @@ static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
return cdns_set_sdw_stream(dai, stream, false, direction);
}
+static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ struct sdw_cdns_dma_data *dma;
+
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dma = dai->playback_dma_data;
+ else
+ dma = dai->capture_dma_data;
+
+ if (!dma)
+ return NULL;
+
+ return dma->stream;
+}
+
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
@@ -902,6 +1024,7 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pcm_set_sdw_stream,
+ .get_sdw_stream = intel_get_sdw_stream,
};
static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
@@ -912,6 +1035,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pdm_set_sdw_stream,
+ .get_sdw_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
@@ -1074,9 +1198,17 @@ static struct sdw_master_ops sdw_intel_ops = {
static int intel_init(struct sdw_intel *sdw)
{
+ bool clock_stop;
+
/* Initialize shim and controller */
intel_link_power_up(sdw);
- intel_shim_init(sdw);
+
+ clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
+
+ intel_shim_init(sdw, clock_stop);
+
+ if (clock_stop)
+ return 0;
return sdw_cdns_init(&sdw->cdns);
}
@@ -1084,41 +1216,66 @@ static int intel_init(struct sdw_intel *sdw)
/*
* probe and init
*/
-static int intel_probe(struct platform_device *pdev)
+static int intel_master_probe(struct platform_device *pdev)
{
- struct sdw_cdns_stream_config config;
+ struct device *dev = &pdev->dev;
struct sdw_intel *sdw;
+ struct sdw_cdns *cdns;
+ struct sdw_bus *bus;
int ret;
- sdw = devm_kzalloc(&pdev->dev, sizeof(*sdw), GFP_KERNEL);
+ sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
if (!sdw)
return -ENOMEM;
+ cdns = &sdw->cdns;
+ bus = &cdns->bus;
+
sdw->instance = pdev->id;
- sdw->link_res = dev_get_platdata(&pdev->dev);
- sdw->cdns.dev = &pdev->dev;
- sdw->cdns.registers = sdw->link_res->registers;
- sdw->cdns.instance = sdw->instance;
- sdw->cdns.msg_count = 0;
- sdw->cdns.bus.link_id = pdev->id;
+ sdw->link_res = dev_get_platdata(dev);
+ cdns->dev = dev;
+ cdns->registers = sdw->link_res->registers;
+ cdns->instance = sdw->instance;
+ cdns->msg_count = 0;
+
+ bus->link_id = pdev->id;
- sdw_cdns_probe(&sdw->cdns);
+ sdw_cdns_probe(cdns);
/* Set property read ops */
sdw_intel_ops.read_prop = intel_prop_read;
- sdw->cdns.bus.ops = &sdw_intel_ops;
+ bus->ops = &sdw_intel_ops;
- platform_set_drvdata(pdev, sdw);
+ /* set driver data, accessed by snd_soc_dai_get_drvdata() */
+ dev_set_drvdata(dev, cdns);
- ret = sdw_bus_master_add(&sdw->cdns.bus, &pdev->dev, pdev->dev.fwnode);
+ ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
- dev_err(&pdev->dev, "sdw_bus_master_add fail: %d\n", ret);
+ dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
return ret;
}
- if (sdw->cdns.bus.prop.hw_disabled) {
- dev_info(&pdev->dev, "SoundWire master %d is disabled, ignoring\n",
- sdw->cdns.bus.link_id);
+ if (bus->prop.hw_disabled)
+ dev_info(dev,
+ "SoundWire master %d is disabled, will be ignored\n",
+ bus->link_id);
+
+ return 0;
+}
+
+int intel_master_startup(struct platform_device *pdev)
+{
+ struct sdw_cdns_stream_config config;
+ struct device *dev = &pdev->dev;
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_info(dev,
+ "SoundWire master %d is disabled, ignoring\n",
+ sdw->instance);
return 0;
}
@@ -1129,39 +1286,29 @@ static int intel_probe(struct platform_device *pdev)
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
- ret = sdw_cdns_pdi_init(&sdw->cdns, config);
+ ret = sdw_cdns_pdi_init(cdns, config);
if (ret)
goto err_init;
intel_pdi_ch_update(sdw);
- /* Acquire IRQ */
- ret = request_threaded_irq(sdw->link_res->irq,
- sdw_cdns_irq, sdw_cdns_thread,
- IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
- sdw->link_res->irq);
+ dev_err(dev, "cannot enable interrupts\n");
goto err_init;
}
- ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
- goto err_init;
- }
-
- ret = sdw_cdns_exit_reset(&sdw->cdns);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ dev_err(dev, "unable to exit bus reset sequence\n");
goto err_interrupt;
}
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
- dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
- snd_soc_unregister_component(sdw->cdns.dev);
+ dev_err(dev, "DAI registration failed: %d\n", ret);
+ snd_soc_unregister_component(dev);
goto err_interrupt;
}
@@ -1170,41 +1317,75 @@ static int intel_probe(struct platform_device *pdev)
return 0;
err_interrupt:
- sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->link_res->irq, sdw);
+ sdw_cdns_enable_interrupt(cdns, false);
err_init:
- sdw_bus_master_delete(&sdw->cdns.bus);
return ret;
}
-static int intel_remove(struct platform_device *pdev)
+static int intel_master_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+
+ if (!bus->prop.hw_disabled) {
+ intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(cdns, false);
+ snd_soc_unregister_component(dev);
+ }
+ sdw_bus_master_delete(bus);
+
+ return 0;
+}
+
+int intel_master_process_wakeen_event(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct sdw_intel *sdw;
+ struct sdw_bus *bus;
+ void __iomem *shim;
+ u16 wake_sts;
sdw = platform_get_drvdata(pdev);
+ bus = &sdw->cdns.bus;
- if (!sdw->cdns.bus.prop.hw_disabled) {
- intel_debugfs_exit(sdw);
- sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->link_res->irq, sdw);
- snd_soc_unregister_component(sdw->cdns.dev);
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
+ return 0;
}
- sdw_bus_master_delete(&sdw->cdns.bus);
+
+ shim = sdw->link_res->shim;
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+
+ if (!(wake_sts & BIT(sdw->instance)))
+ return 0;
+
+ /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
+ intel_shim_wake(sdw, false);
+
+ /*
+ * resume the Master, which will generate a bus reset and result in
+ * Slaves re-attaching and be re-enumerated. The SoundWire physical
+ * device which generated the wake will trigger an interrupt, which
+ * will in turn cause the corresponding Linux Slave device to be
+ * resumed and the Slave codec driver to check the status.
+ */
+ pm_request_resume(dev);
return 0;
}
static struct platform_driver sdw_intel_drv = {
- .probe = intel_probe,
- .remove = intel_remove,
+ .probe = intel_master_probe,
+ .remove = intel_master_remove,
.driver = {
- .name = "int-sdw",
-
+ .name = "intel-sdw",
},
};
module_platform_driver(sdw_intel_drv);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:int-sdw");
+MODULE_ALIAS("platform:intel-sdw");
MODULE_DESCRIPTION("Intel Soundwire Master Driver");
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 38b7c125fb10..4ea3d262d249 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -15,6 +15,10 @@
* @irq: Interrupt line
* @ops: Shim callback ops
* @dev: device implementing hw_params and free callbacks
+ * @shim_lock: mutex to handle access to shared SHIM registers
+ * @shim_mask: global pointer to check SHIM register initialization
+ * @cdns: Cadence master descriptor
+ * @list: used to walk-through all masters exposed by the same controller
*/
struct sdw_intel_link_res {
struct platform_device *pdev;
@@ -25,6 +29,24 @@ struct sdw_intel_link_res {
int irq;
const struct sdw_intel_ops *ops;
struct device *dev;
+ struct mutex *shim_lock; /* protect shared registers */
+ u32 *shim_mask;
+ struct sdw_cdns *cdns;
+ struct list_head list;
};
+struct sdw_intel {
+ struct sdw_cdns cdns;
+ int instance;
+ struct sdw_intel_link_res *link_res;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
+
+int intel_master_startup(struct platform_device *pdev);
+int intel_master_process_wakeen_event(struct platform_device *pdev);
+
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d5d42795a48f..047252a91c9e 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,10 +9,12 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
+#include "cadence_master.h"
#include "intel.h"
#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */
@@ -23,138 +25,324 @@
#define SDW_LINK_BASE 0x30000
#define SDW_LINK_SIZE 0x10000
-static int link_mask;
-module_param_named(sdw_link_mask, link_mask, int, 0444);
+static int ctrl_link_mask;
+module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
+static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+{
+ struct fwnode_handle *link;
+ char name[32];
+ u32 quirk_mask = 0;
+
+ /* Find master handle */
+ snprintf(name, sizeof(name),
+ "mipi-sdw-link-%d-subproperties", i);
+
+ link = fwnode_get_named_child_node(fw_node, name);
+ if (!link)
+ return false;
+
+ fwnode_property_read_u32(link,
+ "intel-quirk-mask",
+ &quirk_mask);
+
+ if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
+ return false;
+
+ return true;
+}
+
+static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx)
{
struct sdw_intel_link_res *link = ctx->links;
+ u32 link_mask;
int i;
if (!link)
return 0;
- for (i = 0; i < ctx->count; i++) {
+ link_mask = ctx->link_mask;
+
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
if (link->pdev)
platform_device_unregister(link->pdev);
- link++;
}
- kfree(ctx->links);
- ctx->links = NULL;
-
return 0;
}
-static struct sdw_intel_ctx
-*sdw_intel_add_controller(struct sdw_intel_res *res)
+static int
+sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
- struct platform_device_info pdevinfo;
- struct platform_device *pdev;
- struct sdw_intel_link_res *link;
- struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
int ret, i;
u8 count;
- u32 caps;
- if (acpi_bus_get_device(res->handle, &adev))
- return NULL;
+ if (acpi_bus_get_device(info->handle, &adev))
+ return -EINVAL;
/* Found controller, find links supported */
count = 0;
ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
"mipi-sdw-master-count", &count, 1);
- /* Don't fail on error, continue and use hw value */
+ /*
+ * In theory we could check the number of links supported in
+ * hardware, but in that step we cannot assume SoundWire IP is
+ * powered.
+ *
+ * In addition, if the BIOS doesn't even provide this
+ * 'master-count' property then all the inits based on link
+ * masks will fail as well.
+ *
+ * We will check the hardware capabilities in the startup() step
+ */
+
if (ret) {
dev_err(&adev->dev,
"Failed to read mipi-sdw-master-count: %d\n", ret);
- count = SDW_MAX_LINKS;
+ return -EINVAL;
}
- /* Check SNDWLCAP.LCOUNT */
- caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
- caps &= GENMASK(2, 0);
-
- /* Check HW supported vs property value and use min of two */
- count = min_t(u8, caps, count);
-
/* Check count is within bounds */
if (count > SDW_MAX_LINKS) {
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_MAX_LINKS);
- return NULL;
+ return -EINVAL;
}
if (!count) {
dev_warn(&adev->dev, "No SoundWire links detected\n");
- return NULL;
+ return -EINVAL;
+ }
+ dev_dbg(&adev->dev, "ACPI reports %d SDW Link devices\n", count);
+
+ info->count = count;
+ info->link_mask = 0;
+
+ for (i = 0; i < count; i++) {
+ if (ctrl_link_mask && !(ctrl_link_mask & BIT(i))) {
+ dev_dbg(&adev->dev,
+ "Link %d masked, will not be enabled\n", i);
+ continue;
+ }
+
+ if (!is_link_enabled(acpi_fwnode_handle(adev), i)) {
+ dev_dbg(&adev->dev,
+ "Link %d not selected in firmware\n", i);
+ continue;
+ }
+
+ info->link_mask |= BIT(i);
}
+ return 0;
+}
+
+#define HDA_DSP_REG_ADSPIC2 (0x10)
+#define HDA_DSP_REG_ADSPIS2 (0x14)
+#define HDA_DSP_REG_ADSPIC2_SNDW BIT(5)
+
+/**
+ * sdw_intel_enable_irq() - enable/disable Intel SoundWire IRQ
+ * @mmio_base: The mmio base of the control register
+ * @enable: true if enable
+ */
+void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable)
+{
+ u32 val;
+
+ val = readl(mmio_base + HDA_DSP_REG_ADSPIC2);
+
+ if (enable)
+ val |= HDA_DSP_REG_ADSPIC2_SNDW;
+ else
+ val &= ~HDA_DSP_REG_ADSPIC2_SNDW;
+
+ writel(val, mmio_base + HDA_DSP_REG_ADSPIC2);
+}
+EXPORT_SYMBOL_NS(sdw_intel_enable_irq, SOUNDWIRE_INTEL_INIT);
+
+irqreturn_t sdw_intel_thread(int irq, void *dev_id)
+{
+ struct sdw_intel_ctx *ctx = dev_id;
+ struct sdw_intel_link_res *link;
+
+ list_for_each_entry(link, &ctx->link_list, list)
+ sdw_cdns_irq(irq, link->cdns);
+
+ sdw_intel_enable_irq(ctx->mmio_base, true);
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(sdw_intel_thread, SOUNDWIRE_INTEL_INIT);
+
+static struct sdw_intel_ctx
+*sdw_intel_probe_controller(struct sdw_intel_res *res)
+{
+ struct platform_device_info pdevinfo;
+ struct platform_device *pdev;
+ struct sdw_intel_link_res *link;
+ struct sdw_intel_ctx *ctx;
+ struct acpi_device *adev;
+ struct sdw_slave *slave;
+ struct list_head *node;
+ struct sdw_bus *bus;
+ u32 link_mask;
+ int num_slaves = 0;
+ int count;
+ int i;
+
+ if (!res)
+ return NULL;
+
+ if (acpi_bus_get_device(res->handle, &adev))
+ return NULL;
+
+ if (!res->count)
+ return NULL;
+
+ count = res->count;
dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&adev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
ctx->count = count;
- ctx->links = kcalloc(ctx->count, sizeof(*ctx->links), GFP_KERNEL);
+ ctx->links = devm_kcalloc(&adev->dev, ctx->count,
+ sizeof(*ctx->links), GFP_KERNEL);
if (!ctx->links)
- goto link_err;
+ return NULL;
+
+ ctx->count = count;
+ ctx->mmio_base = res->mmio_base;
+ ctx->link_mask = res->link_mask;
+ ctx->handle = res->handle;
+ mutex_init(&ctx->shim_lock);
link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ INIT_LIST_HEAD(&ctx->link_list);
/* Create SDW Master devices */
- for (i = 0; i < count; i++) {
- if (link_mask && !(link_mask & BIT(i))) {
+ for (i = 0; i < count; i++, link++) {
+ if (!(link_mask & BIT(i))) {
dev_dbg(&adev->dev,
"Link %d masked, will not be enabled\n", i);
- link++;
continue;
}
+ link->mmio_base = res->mmio_base;
link->registers = res->mmio_base + SDW_LINK_BASE
- + (SDW_LINK_SIZE * i);
+ + (SDW_LINK_SIZE * i);
link->shim = res->mmio_base + SDW_SHIM_BASE;
link->alh = res->mmio_base + SDW_ALH_BASE;
link->ops = res->ops;
link->dev = res->dev;
+ link->shim_lock = &ctx->shim_lock;
+ link->shim_mask = &ctx->shim_mask;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = res->parent;
- pdevinfo.name = "int-sdw";
+ pdevinfo.name = "intel-sdw";
pdevinfo.id = i;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.data = link;
+ pdevinfo.size_data = sizeof(*link);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
dev_err(&adev->dev,
"platform device creation failed: %ld\n",
PTR_ERR(pdev));
- goto pdev_err;
+ goto err;
}
-
link->pdev = pdev;
- link++;
+ link->cdns = platform_get_drvdata(pdev);
+
+ list_add_tail(&link->list, &ctx->link_list);
+ bus = &link->cdns->bus;
+ /* Calculate number of slaves */
+ list_for_each(node, &bus->slaves)
+ num_slaves++;
+ }
+
+ ctx->ids = devm_kcalloc(&adev->dev, num_slaves,
+ sizeof(*ctx->ids), GFP_KERNEL);
+ if (!ctx->ids)
+ goto err;
+
+ ctx->num_slaves = num_slaves;
+ i = 0;
+ list_for_each_entry(link, &ctx->link_list, list) {
+ bus = &link->cdns->bus;
+ list_for_each_entry(slave, &bus->slaves, node) {
+ ctx->ids[i].id = slave->id;
+ ctx->ids[i].link_id = bus->link_id;
+ i++;
+ }
}
return ctx;
-pdev_err:
- sdw_intel_cleanup_pdev(ctx);
-link_err:
- kfree(ctx);
+err:
+ ctx->count = i;
+ sdw_intel_cleanup(ctx);
return NULL;
}
+static int
+sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
+{
+ struct acpi_device *adev;
+ struct sdw_intel_link_res *link;
+ u32 caps;
+ u32 link_mask;
+ int i;
+
+ if (acpi_bus_get_device(ctx->handle, &adev))
+ return -EINVAL;
+
+ /* Check SNDWLCAP.LCOUNT */
+ caps = ioread32(ctx->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
+ caps &= GENMASK(2, 0);
+
+ /* Check HW supported vs property value */
+ if (caps < ctx->count) {
+ dev_err(&adev->dev,
+ "BIOS master count is larger than hardware capabilities\n");
+ return -EINVAL;
+ }
+
+ if (!ctx->links)
+ return -EINVAL;
+
+ link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ /* Startup SDW Master devices */
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
+ intel_master_startup(link->pdev);
+ }
+
+ return 0;
+}
+
static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
void *cdata, void **return_value)
{
- struct sdw_intel_res *res = cdata;
+ struct sdw_intel_acpi_info *info = cdata;
struct acpi_device *adev;
acpi_status status;
u64 adr;
@@ -168,7 +356,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
return AE_NOT_FOUND;
}
- res->handle = handle;
+ info->handle = handle;
/*
* On some Intel platforms, multiple children of the HDAS
@@ -185,39 +373,93 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
}
/**
- * sdw_intel_init() - SoundWire Intel init routine
+ * sdw_intel_acpi_scan() - SoundWire Intel init routine
* @parent_handle: ACPI parent handle
- * @res: resource data
+ * @info: description of what firmware/DSDT tables expose
*
- * This scans the namespace and creates SoundWire link controller devices
- * based on the info queried.
+ * This scans the namespace and queries firmware to figure out which
+ * links to enable. A follow-up use of sdw_intel_probe() and
+ * sdw_intel_startup() is required for creation of devices and bus
+ * startup
*/
-void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
+int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+ struct sdw_intel_acpi_info *info)
{
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
parent_handle, 1,
sdw_intel_acpi_cb,
- NULL, res, NULL);
+ NULL, info, NULL);
if (ACPI_FAILURE(status))
- return NULL;
+ return -ENODEV;
- return sdw_intel_add_controller(res);
+ return sdw_intel_scan_controller(info);
}
+EXPORT_SYMBOL_NS(sdw_intel_acpi_scan, SOUNDWIRE_INTEL_INIT);
/**
+ * sdw_intel_probe() - SoundWire Intel probe routine
+ * @res: resource data
+ *
+ * This registers a platform device for each Master handled by the controller,
+ * and SoundWire Master and Slave devices will be created by the platform
+ * device probe. All the information necessary is stored in the context, and
+ * the res argument pointer can be freed after this step.
+ * This function will be called after sdw_intel_acpi_scan() by SOF probe.
+ */
+struct sdw_intel_ctx
+*sdw_intel_probe(struct sdw_intel_res *res)
+{
+ return sdw_intel_probe_controller(res);
+}
+EXPORT_SYMBOL_NS(sdw_intel_probe, SOUNDWIRE_INTEL_INIT);
+
+/**
+ * sdw_intel_startup() - SoundWire Intel startup
+ * @ctx: SoundWire context allocated in the probe
+ *
+ * Startup Intel SoundWire controller. This function will be called after
+ * Intel Audio DSP is powered up.
+ */
+int sdw_intel_startup(struct sdw_intel_ctx *ctx)
+{
+ return sdw_intel_startup_controller(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_intel_startup, SOUNDWIRE_INTEL_INIT);
+/**
* sdw_intel_exit() - SoundWire Intel exit
- * @arg: callback context
+ * @ctx: SoundWire context allocated in the probe
*
* Delete the controller instances created and cleanup
*/
void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
- sdw_intel_cleanup_pdev(ctx);
- kfree(ctx);
+ sdw_intel_cleanup(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_intel_exit, SOUNDWIRE_INTEL_INIT);
+
+void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx)
+{
+ struct sdw_intel_link_res *link;
+ u32 link_mask;
+ int i;
+
+ if (!ctx->links)
+ return;
+
+ link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ /* Startup SDW Master devices */
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
+ intel_master_process_wakeen_event(link->pdev);
+ }
}
-EXPORT_SYMBOL(sdw_intel_exit);
+EXPORT_SYMBOL_NS(sdw_intel_process_wakeen_event, SOUNDWIRE_INTEL_INIT);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Intel Soundwire Init Library");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index a1c2a44a3b4d..915c2cf0c274 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -406,13 +406,13 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
return ctrl->reg_write(ctrl, reg, val);
}
-static struct sdw_master_port_ops qcom_swrm_port_ops = {
+static const struct sdw_master_port_ops qcom_swrm_port_ops = {
.dpn_set_port_params = qcom_swrm_port_params,
.dpn_set_port_transport_params = qcom_swrm_transport_params,
.dpn_port_enable_ch = qcom_swrm_port_enable,
};
-static struct sdw_master_ops qcom_swrm_ops = {
+static const struct sdw_master_ops qcom_swrm_ops = {
.xfer_msg = qcom_swrm_xfer_msg,
.pre_bank_switch = qcom_swrm_pre_bank_switch,
};
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index a9a72574b34a..37290a799023 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
+#include <sound/soc.h>
#include "bus.h"
/*
@@ -1826,3 +1827,100 @@ state_err:
return ret;
}
EXPORT_SYMBOL(sdw_deprepare_stream);
+
+static int set_stream(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *sdw_stream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai;
+ int ret = 0;
+ int i;
+
+ /* Set stream pointer on all DAIs */
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ if (ret < 0) {
+ dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdw_startup_stream() - Startup SoundWire stream
+ *
+ * @sdw_substream: Soundwire stream
+ *
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
+ */
+int sdw_startup_stream(void *sdw_substream)
+{
+ struct snd_pcm_substream *substream = sdw_substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream;
+ char *name;
+ int ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ name = kasprintf(GFP_KERNEL, "%s-Playback", substream->name);
+ else
+ name = kasprintf(GFP_KERNEL, "%s-Capture", substream->name);
+
+ if (!name)
+ return -ENOMEM;
+
+ sdw_stream = sdw_alloc_stream(name);
+ if (!sdw_stream) {
+ dev_err(rtd->dev, "alloc stream failed for substream DAI %s", substream->name);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = set_stream(substream, sdw_stream);
+ if (ret < 0)
+ goto release_stream;
+ return 0;
+
+release_stream:
+ sdw_release_stream(sdw_stream);
+ set_stream(substream, NULL);
+error:
+ kfree(name);
+ return ret;
+}
+EXPORT_SYMBOL(sdw_startup_stream);
+
+/**
+ * sdw_shutdown_stream() - Shutdown SoundWire stream
+ *
+ * @sdw_substream: Soundwire stream
+ *
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
+ */
+void sdw_shutdown_stream(void *sdw_substream)
+{
+ struct snd_pcm_substream *substream = sdw_substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream;
+ struct snd_soc_dai *dai;
+
+ /* Find stream from first CPU DAI */
+ dai = asoc_rtd_to_cpu(rtd, 0);
+
+ sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
+
+ if (!sdw_stream) {
+ dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
+ return;
+ }
+
+ /* release memory */
+ kfree(sdw_stream->name);
+ sdw_release_stream(sdw_stream);
+
+ /* clear DAI data */
+ set_stream(substream, NULL);
+}
+EXPORT_SYMBOL(sdw_shutdown_stream);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 6e27fe4fcca3..ec7f66f4555a 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -199,6 +200,24 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
goto bad1;
uioinfo->irq = ret;
}
+
+ if (uioinfo->irq) {
+ struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
+
+ /*
+ * If a level interrupt, dont do lazy disable. Otherwise the
+ * irq will fire again since clearing of the actual cause, on
+ * device level, is done in userspace
+ * irqd_is_level_type() isn't used since isn't valid until
+ * irq is configured.
+ */
+ if (irq_data &&
+ irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
+ dev_dbg(&pdev->dev, "disable lazy unmask\n");
+ irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
+ }
+ }
+
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index b60173bc93ce..63258b6accc4 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -20,6 +20,7 @@
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -171,6 +172,23 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
}
}
+ if (uioinfo->irq) {
+ struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
+
+ /*
+ * If a level interrupt, dont do lazy disable. Otherwise the
+ * irq will fire again since clearing of the actual cause, on
+ * device level, is done in userspace
+ * irqd_is_level_type() isn't used since isn't valid until
+ * irq is configured.
+ */
+ if (irq_data &&
+ irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
+ dev_dbg(&pdev->dev, "disable lazy unmask\n");
+ irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
+ }
+ }
+
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
@@ -187,8 +205,10 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
}
uiomem->memtype = UIO_MEM_PHYS;
- uiomem->addr = r->start;
- uiomem->size = resource_size(r);
+ uiomem->addr = r->start & PAGE_MASK;
+ uiomem->offs = r->start & ~PAGE_MASK;
+ uiomem->size = (uiomem->offs + resource_size(r)
+ + PAGE_SIZE - 1) & PAGE_MASK;
uiomem->name = r->name;
++uiomem;
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 18ebd7a6af98..0b43efddea22 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
* Changes the event filter mask for the given session.
*
* This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
- * do session cleanup. Takes the session spinlock.
+ * do session cleanup. Takes the session mutex.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
@@ -662,7 +662,156 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
}
/**
- * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Set guest capabilities on the host.
+ * Must be called with gdev->session_mutex hold.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @session_termination: Set if we're called by the session cleanup code.
+ */
+static int vbg_set_host_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ bool session_termination)
+{
+ struct vmmdev_mask *req;
+ u32 caps;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&gdev->session_mutex));
+
+ caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
+
+ if (gdev->guest_caps_host == caps)
+ return 0;
+
+ /* On termination the requestor is the kernel, as we're cleaning up. */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
+ if (!req) {
+ gdev->guest_caps_host = U32_MAX;
+ return -ENOMEM;
+ }
+
+ req->or_mask = caps;
+ req->not_mask = ~caps;
+ rc = vbg_req_perform(gdev, req);
+ vbg_req_free(req, sizeof(*req));
+
+ gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
+
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Acquire (get exclusive access) guest capabilities for a session.
+ * Takes the session mutex.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
+ * @or_mask: The capabilities to add.
+ * @not_mask: The capabilities to remove.
+ * @session_termination: Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ */
+static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 or_mask, u32 not_mask,
+ u32 flags, bool session_termination)
+{
+ unsigned long irqflags;
+ bool wakeup = false;
+ int ret = 0;
+
+ mutex_lock(&gdev->session_mutex);
+
+ if (gdev->set_guest_caps_tracker.mask & or_mask) {
+ vbg_err("%s error: cannot acquire caps which are currently set\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Mark any caps in the or_mask as now being in acquire-mode. Note
+ * once caps are in acquire_mode they always stay in this mode.
+ * This impacts event handling, so we take the event-lock.
+ */
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ gdev->acquire_mode_guest_caps |= or_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ /* If we only have to switch the caps to acquire mode, we're done. */
+ if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
+ goto out;
+
+ not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
+ not_mask &= session->acquired_guest_caps;
+ or_mask &= ~session->acquired_guest_caps;
+
+ if (or_mask == 0 && not_mask == 0)
+ goto out;
+
+ if (gdev->acquired_guest_caps & or_mask) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ gdev->acquired_guest_caps |= or_mask;
+ gdev->acquired_guest_caps &= ~not_mask;
+ /* session->acquired_guest_caps impacts event handling, take the lock */
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ session->acquired_guest_caps |= or_mask;
+ session->acquired_guest_caps &= ~not_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ ret = vbg_set_host_capabilities(gdev, session, session_termination);
+ /* Roll back on failure, unless it's session termination time. */
+ if (ret < 0 && !session_termination) {
+ gdev->acquired_guest_caps &= ~or_mask;
+ gdev->acquired_guest_caps |= not_mask;
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ session->acquired_guest_caps &= ~or_mask;
+ session->acquired_guest_caps |= not_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+ }
+
+ /*
+ * If we added a capability, check if that means some other thread in
+ * our session should be unblocked because there are events pending
+ * (the result of vbg_get_allowed_event_mask_for_session() may change).
+ *
+ * HACK ALERT! When the seamless support capability is added we generate
+ * a seamless change event so that the ring-3 client can sync with
+ * the seamless state.
+ */
+ if (ret == 0 && or_mask != 0) {
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+
+ if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
+ gdev->pending_events |=
+ VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+ if (gdev->pending_events)
+ wakeup = true;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ if (wakeup)
+ wake_up(&gdev->event_wq);
+ }
+
+out:
+ mutex_unlock(&gdev->session_mutex);
+
+ return ret;
+}
+
+/**
+ * Sets the guest capabilities for a session. Takes the session mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
@@ -678,62 +827,40 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
u32 or_mask, u32 not_mask,
bool session_termination)
{
- struct vmmdev_mask *req;
u32 changed, previous;
- int rc, ret = 0;
-
- /*
- * Allocate a request buffer before taking the spinlock, when
- * the session is being terminated the requestor is the kernel,
- * as we're cleaning up.
- */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
- session_termination ? VBG_KERNEL_REQUEST :
- session->requestor);
- if (!req) {
- if (!session_termination)
- return -ENOMEM;
- /* Ignore allocation failure, we must do session cleanup. */
- }
+ int ret = 0;
mutex_lock(&gdev->session_mutex);
+ if (gdev->acquire_mode_guest_caps & or_mask) {
+ vbg_err("%s error: cannot set caps which are in acquire_mode\n",
+ __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
/* Apply the changes to the session mask. */
- previous = session->guest_caps;
- session->guest_caps |= or_mask;
- session->guest_caps &= ~not_mask;
+ previous = session->set_guest_caps;
+ session->set_guest_caps |= or_mask;
+ session->set_guest_caps &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
- changed = previous ^ session->guest_caps;
+ changed = previous ^ session->set_guest_caps;
if (!changed)
goto out;
- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
- or_mask = gdev->guest_caps_tracker.mask;
-
- if (gdev->guest_caps_host == or_mask || !req)
- goto out;
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
- gdev->guest_caps_host = or_mask;
- req->or_mask = or_mask;
- req->not_mask = ~or_mask;
- rc = vbg_req_perform(gdev, req);
- if (rc < 0) {
- ret = vbg_status_code_to_errno(rc);
-
- /* Failed, roll back (unless it's session termination time). */
- gdev->guest_caps_host = U32_MAX;
- if (session_termination)
- goto out;
-
- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
- session->guest_caps);
- session->guest_caps = previous;
+ ret = vbg_set_host_capabilities(gdev, session, session_termination);
+ /* Roll back on failure, unless it's session termination time. */
+ if (ret < 0 && !session_termination) {
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
+ session->set_guest_caps);
+ session->set_guest_caps = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
- vbg_req_free(req, sizeof(*req));
return ret;
}
@@ -949,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session)
struct vbg_dev *gdev = session->gdev;
int i, rc;
+ vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
@@ -1006,6 +1134,25 @@ static int vbg_ioctl_driver_version_info(
return 0;
}
+/* Must be called with the event_lock held */
+static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
+ struct vbg_session *session)
+{
+ u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
+ u32 session_acquired_caps = session->acquired_guest_caps;
+ u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
+
+ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
+ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
+ allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
+
+ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
+ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
+ allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+ return allowed_events;
+}
+
static bool vbg_wait_event_cond(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
@@ -1017,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev,
spin_lock_irqsave(&gdev->event_spinlock, flags);
events = gdev->pending_events & event_mask;
+ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
wakeup = events || session->cancel_waiters;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
@@ -1031,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
{
u32 events = gdev->pending_events & event_mask;
+ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
gdev->pending_events &= ~events;
return events;
}
@@ -1150,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
+ case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
case VMMDEVREQ_GET_VRDPCHANGE_REQ:
case VMMDEVREQ_LOG_STRING:
@@ -1432,6 +1583,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
false);
}
+static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_acquire_guest_caps *caps)
+{
+ u32 flags, or_mask, not_mask;
+
+ if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
+ return -EINVAL;
+
+ flags = caps->u.in.flags;
+ or_mask = caps->u.in.or_mask;
+ not_mask = caps->u.in.not_mask;
+
+ if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
+ return -EINVAL;
+
+ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
+ return -EINVAL;
+
+ return vbg_acquire_session_capabilities(gdev, session, or_mask,
+ not_mask, flags, false);
+}
+
static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
{
@@ -1452,7 +1626,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
if (ret)
return ret;
- caps->u.out.session_caps = session->guest_caps;
+ caps->u.out.session_caps = session->set_guest_caps;
caps->u.out.global_caps = gdev->guest_caps_host;
return 0;
@@ -1541,6 +1715,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
case VBG_IOCTL_CHANGE_FILTER_MASK:
return vbg_ioctl_change_filter_mask(gdev, session, data);
+ case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
+ return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
return vbg_ioctl_change_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHECK_BALLOON:
@@ -1563,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
return vbg_ioctl_log(data);
}
- vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+ vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
return -ENOTTY;
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 77c3a9c8255d..ab4bf64e2cec 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -118,11 +118,21 @@ struct vbg_dev {
u32 event_filter_host;
/**
- * Usage counters for guest capabilities. Indexed by capability bit
+ * Guest capabilities which have been switched to acquire_mode.
+ */
+ u32 acquire_mode_guest_caps;
+ /**
+ * Guest capabilities acquired by vbg_acquire_session_capabilities().
+ * Only one session can acquire a capability at a time.
+ */
+ u32 acquired_guest_caps;
+ /**
+ * Usage counters for guest capabilities requested through
+ * vbg_set_session_capabilities(). Indexed by capability bit
* number, one count per session using a capability.
* Protected by session_mutex.
*/
- struct vbg_bit_usage_tracker guest_caps_tracker;
+ struct vbg_bit_usage_tracker set_guest_caps_tracker;
/**
* The guest capabilities last reported to the host (or UINT32_MAX).
* Protected by session_mutex.
@@ -164,11 +174,16 @@ struct vbg_session {
*/
u32 event_filter;
/**
- * Guest capabilities for this session.
+ * Guest capabilities acquired by vbg_acquire_session_capabilities().
+ * Only one session can acquire a capability at a time.
+ */
+ u32 acquired_guest_caps;
+ /**
+ * Guest capabilities set through vbg_set_session_capabilities().
* A capability claimed by any guest session will be reported to the
* host. Protected by vbg_gdev.session_mutex.
*/
- u32 guest_caps;
+ u32 set_guest_caps;
/** VMMDEV_REQUESTOR_* flags */
u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 7396187ee32a..ea05af41ec69 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -59,6 +59,7 @@ EXPORT_SYMBOL(name)
VBG_LOG(vbg_info, pr_info);
VBG_LOG(vbg_warn, pr_warn);
VBG_LOG(vbg_err, pr_err);
+VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
VBG_LOG(vbg_debug, pr_debug);
#endif