summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/sve.txt16
-rw-r--r--Documentation/block/switching-sched.txt18
-rw-r--r--Documentation/cgroup-v1/blkio-controller.txt96
-rw-r--r--Documentation/cgroup-v1/hugetlb.txt22
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/tlbflush.h3
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h14
-rw-r--r--arch/arm64/kernel/fpsimd.c42
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h30
-rw-r--r--arch/powerpc/include/asm/btext.h4
-rw-r--r--arch/powerpc/include/asm/kexec.h3
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c3
-rw-r--r--arch/powerpc/mm/pgtable.c16
-rw-r--r--arch/x86/include/asm/fpu/internal.h6
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c7
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/signal.c16
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c2
-rw-r--r--arch/x86/mm/kaslr.c11
-rw-r--r--block/Kconfig1
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/blk-mq-debugfs.c145
-rw-r--r--block/blk-mq-debugfs.h36
-rw-r--r--block/blk-mq-sched.c1
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/base/devres.c24
-rw-r--r--drivers/block/null_blk_zoned.c4
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c8
-rw-r--r--drivers/clocksource/timer-ti-dm.c2
-rw-r--r--drivers/dax/device.c13
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c5
-rw-r--r--drivers/gpu/drm/drm_bufs.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c55
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c3
-rw-r--r--drivers/gpu/drm/drm_ioc32.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c32
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c15
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c18
-rw-r--r--drivers/gpu/drm/i915/intel_display.c52
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c58
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c12
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c6
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c8
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c13
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c3
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/hid/hid-a4tech.c11
-rw-r--r--drivers/hid/hid-core.c16
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-dj.c50
-rw-r--r--drivers/hid/hid-logitech-hidpp.c9
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-rmi.c15
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/wacom_wac.c71
-rw-r--r--drivers/i2c/busses/i2c-acorn.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c3
-rw-r--r--drivers/iommu/arm-smmu.c15
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intel-pasid.c2
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/md/bcache/bset.c16
-rw-r--r--drivers/md/bcache/bset.h34
-rw-r--r--drivers/md/bcache/sysfs.c7
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h4
-rw-r--r--drivers/nvdimm/pmem.c17
-rw-r--r--drivers/pci/p2pdma.c115
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c8
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/intel-vbtn.c16
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/ras/cec.c80
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/scsi/hpsa.c7
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/spi/spi-bitbang.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c24
-rw-r--r--drivers/usb/dwc2/hcd.c39
-rw-r--r--drivers/usb/dwc2/hcd.h20
-rw-r--r--drivers/usb/dwc2/hcd_intr.c5
-rw-r--r--drivers/usb/dwc2/hcd_queue.c10
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c5
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c7
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c14
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h3
-rw-r--r--drivers/usb/storage/unusual_realtek.h5
-rw-r--r--drivers/usb/typec/bus.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c6
-rw-r--r--drivers/vfio/mdev/mdev_core.c136
-rw-r--r--drivers/vfio/mdev/mdev_private.h4
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c6
-rw-r--r--drivers/xen/swiotlb-xen.c12
-rw-r--r--fs/btrfs/extent-tree.c28
-rw-r--r--fs/gfs2/bmap.c5
-rw-r--r--fs/io_uring.c4
-rw-r--r--fs/ocfs2/dcache.c12
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/cgroup.h14
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/genalloc.h55
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/memremap.h8
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/sound/sof/dai.h1
-rw-r--r--include/sound/sof/header.h23
-rw-r--r--include/sound/sof/info.h20
-rw-r--r--include/sound/sof/xtensa.h9
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--kernel/cgroup/cgroup.c139
-rw-r--r--kernel/cgroup/cpuset.c15
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/memremap.c23
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/trace/ftrace.c22
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_uprobe.c15
-rw-r--r--lib/genalloc.c51
-rw-r--r--mm/hmm.c14
-rw-r--r--mm/khugepaged.c3
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memcontrol.c41
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/mmu_gather.c24
-rw-r--r--mm/vmalloc.c14
-rw-r--r--mm/vmscan.c6
-rwxr-xr-xscripts/decode_stacktrace.sh2
-rw-r--r--security/selinux/avc.c10
-rw-r--r--security/selinux/hooks.c39
-rw-r--r--security/smack/smack_lsm.c12
-rw-r--r--sound/firewire/motu/motu-stream.c2
-rw-r--r--sound/firewire/oxfw/oxfw.c3
-rw-r--r--sound/hda/ext/hdac_ext_bus.c1
-rw-r--r--sound/pci/hda/hda_codec.c9
-rw-r--r--sound/pci/hda/patch_realtek.c91
-rw-r--r--sound/pci/ice1712/ews.c2
-rw-r--r--sound/soc/codecs/ak4458.c18
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/cs42xx8.c1
-rw-r--r--sound/soc/codecs/max98090.c16
-rw-r--r--sound/soc/codecs/rt274.c3
-rw-r--r--sound/soc/codecs/rt5670.c12
-rw-r--r--sound/soc/codecs/rt5677-spi.c5
-rw-r--r--sound/soc/fsl/fsl_asrc.c4
-rw-r--r--sound/soc/intel/atom/sst/sst_pvt.c4
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_nau8824.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c2
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c11
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c17
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c10
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/soc-core.c36
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/soc/sof/Kconfig8
-rw-r--r--sound/soc/sof/control.c9
-rw-r--r--sound/soc/sof/core.c29
-rw-r--r--sound/soc/sof/intel/bdw.c26
-rw-r--r--sound/soc/sof/intel/byt.c25
-rw-r--r--sound/soc/sof/intel/cnl.c4
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c102
-rw-r--r--sound/soc/sof/intel/hda-ipc.c17
-rw-r--r--sound/soc/sof/intel/hda.c129
-rw-r--r--sound/soc/sof/ipc.c26
-rw-r--r--sound/soc/sof/loader.c2
-rw-r--r--sound/soc/sof/pcm.c8
-rw-r--r--sound/soc/sof/xtensa/core.c2
-rw-r--r--sound/soc/sunxi/sun4i-codec.c9
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c6
-rw-r--r--tools/testing/nvdimm/test/iomap.c2
218 files changed, 2184 insertions, 1210 deletions
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index 9940e924a47e..5689fc9a976a 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -56,6 +56,18 @@ model features for SVE is included in Appendix A.
is to connect to a target process first and then attempt a
ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
+* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory
+ between userspace and the kernel, the register value is encoded in memory in
+ an endianness-invariant layout, with bits [(8 * i + 7) : (8 * i)] encoded at
+ byte offset i from the start of the memory representation. This affects for
+ example the signal frame (struct sve_context) and ptrace interface
+ (struct user_sve_header) and associated data.
+
+ Beware that on big-endian systems this results in a different byte order than
+ for the FPSIMD V-registers, which are stored as single host-endian 128-bit
+ values, with bits [(127 - 8 * i) : (120 - 8 * i)] of the register encoded at
+ byte offset i. (struct fpsimd_context, struct user_fpsimd_state).
+
2. Vector length terminology
-----------------------------
@@ -124,6 +136,10 @@ the SVE instruction set architecture.
size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to
the members.
+* Each scalable register (Zn, Pn, FFR) is stored in an endianness-invariant
+ layout, with bits [(8 * i + 7) : (8 * i)] stored at byte offset i from the
+ start of the register's representation in memory.
+
* If the SVE context is too big to fit in sigcontext.__reserved[], then extra
space is allocated on the stack, an extra_context record is written in
__reserved[] referencing this space. sve_context is then written in the
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index 3b2612e342f1..7977f6fb8b20 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -13,11 +13,9 @@ you can do so by typing:
# mount none /sys -t sysfs
-As of the Linux 2.6.10 kernel, it is now possible to change the
-IO scheduler for a given block device on the fly (thus making it possible,
-for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the deadline or noop schedulers - which
-can improve that device's throughput).
+It is possible to change the IO scheduler for a given block device on
+the fly to select one of mq-deadline, none, bfq, or kyber schedulers -
+which can improve that device's throughput.
To set a specific scheduler, simply do this:
@@ -30,8 +28,8 @@ The list of defined schedulers can be found by simply doing
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
will be displayed, with the currently selected scheduler in brackets:
-# cat /sys/block/hda/queue/scheduler
-noop deadline [cfq]
-# echo deadline > /sys/block/hda/queue/scheduler
-# cat /sys/block/hda/queue/scheduler
-noop [deadline] cfq
+# cat /sys/block/sda/queue/scheduler
+[mq-deadline] kyber bfq none
+# echo none >/sys/block/sda/queue/scheduler
+# cat /sys/block/sda/queue/scheduler
+[none] mq-deadline kyber bfq
diff --git a/Documentation/cgroup-v1/blkio-controller.txt b/Documentation/cgroup-v1/blkio-controller.txt
index 673dc34d3f78..d1a1b7bdd03a 100644
--- a/Documentation/cgroup-v1/blkio-controller.txt
+++ b/Documentation/cgroup-v1/blkio-controller.txt
@@ -8,61 +8,13 @@ both at leaf nodes as well as at intermediate nodes in a storage hierarchy.
Plan is to use the same cgroup based management interface for blkio controller
and based on user options switch IO policies in the background.
-Currently two IO control policies are implemented. First one is proportional
-weight time based division of disk policy. It is implemented in CFQ. Hence
-this policy takes effect only on leaf nodes when CFQ is being used. The second
-one is throttling policy which can be used to specify upper IO rate limits
-on devices. This policy is implemented in generic block layer and can be
-used on leaf nodes as well as higher level logical devices like device mapper.
+One IO control policy is throttling policy which can be used to
+specify upper IO rate limits on devices. This policy is implemented in
+generic block layer and can be used on leaf nodes as well as higher
+level logical devices like device mapper.
HOWTO
=====
-Proportional Weight division of bandwidth
------------------------------------------
-You can do a very simple testing of running two dd threads in two different
-cgroups. Here is what you can do.
-
-- Enable Block IO controller
- CONFIG_BLK_CGROUP=y
-
-- Enable group scheduling in CFQ
- CONFIG_CFQ_GROUP_IOSCHED=y
-
-- Compile and boot into kernel and mount IO controller (blkio); see
- cgroups.txt, Why are cgroups needed?.
-
- mount -t tmpfs cgroup_root /sys/fs/cgroup
- mkdir /sys/fs/cgroup/blkio
- mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
-
-- Create two cgroups
- mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
-
-- Set weights of group test1 and test2
- echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
- echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
-
-- Create two same size files (say 512MB each) on same disk (file1, file2) and
- launch two dd threads in different cgroup to read those files.
-
- sync
- echo 3 > /proc/sys/vm/drop_caches
-
- dd if=/mnt/sdb/zerofile1 of=/dev/null &
- echo $! > /sys/fs/cgroup/blkio/test1/tasks
- cat /sys/fs/cgroup/blkio/test1/tasks
-
- dd if=/mnt/sdb/zerofile2 of=/dev/null &
- echo $! > /sys/fs/cgroup/blkio/test2/tasks
- cat /sys/fs/cgroup/blkio/test2/tasks
-
-- At macro level, first dd should finish first. To get more precise data, keep
- on looking at (with the help of script), at blkio.disk_time and
- blkio.disk_sectors files of both test1 and test2 groups. This will tell how
- much disk time (in milliseconds), each group got and how many sectors each
- group dispatched to the disk. We provide fairness in terms of disk time, so
- ideally io.disk_time of cgroups should be in proportion to the weight.
-
Throttling/Upper Limit policy
-----------------------------
- Enable Block IO controller
@@ -94,7 +46,7 @@ Throttling/Upper Limit policy
Hierarchical Cgroups
====================
-Both CFQ and throttling implement hierarchy support; however,
+Throttling implements hierarchy support; however,
throttling's hierarchy support is enabled iff "sane_behavior" is
enabled from cgroup side, which currently is a development option and
not publicly available.
@@ -107,9 +59,8 @@ If somebody created a hierarchy like as follows.
|
test3
-CFQ by default and throttling with "sane_behavior" will handle the
-hierarchy correctly. For details on CFQ hierarchy support, refer to
-Documentation/block/cfq-iosched.txt. For throttling, all limits apply
+Throttling with "sane_behavior" will handle the
+hierarchy correctly. For throttling, all limits apply
to the whole subtree while all statistics are local to the IOs
directly generated by tasks in that cgroup.
@@ -130,10 +81,6 @@ CONFIG_DEBUG_BLK_CGROUP
- Debug help. Right now some additional stats file show up in cgroup
if this option is enabled.
-CONFIG_CFQ_GROUP_IOSCHED
- - Enables group scheduling in CFQ. Currently only 1 level of group
- creation is allowed.
-
CONFIG_BLK_DEV_THROTTLING
- Enable block device throttling support in block layer.
@@ -344,32 +291,3 @@ Common files among various policies
- blkio.reset_stats
- Writing an int to this file will result in resetting all the stats
for that cgroup.
-
-CFQ sysfs tunable
-=================
-/sys/block/<disk>/queue/iosched/slice_idle
-------------------------------------------
-On a faster hardware CFQ can be slow, especially with sequential workload.
-This happens because CFQ idles on a single queue and single queue might not
-drive deeper request queue depths to keep the storage busy. In such scenarios
-one can try setting slice_idle=0 and that would switch CFQ to IOPS
-(IO operations per second) mode on NCQ supporting hardware.
-
-That means CFQ will not idle between cfq queues of a cfq group and hence be
-able to driver higher queue depth and achieve better throughput. That also
-means that cfq provides fairness among groups in terms of IOPS and not in
-terms of disk time.
-
-/sys/block/<disk>/queue/iosched/group_idle
-------------------------------------------
-If one disables idling on individual cfq queues and cfq service trees by
-setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
-on the group in an attempt to provide fairness among groups.
-
-By default group_idle is same as slice_idle and does not do anything if
-slice_idle is enabled.
-
-One can experience an overall throughput drop if you have created multiple
-groups and put applications in that group which are not driving enough
-IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
-on individual groups and throughput should improve.
diff --git a/Documentation/cgroup-v1/hugetlb.txt b/Documentation/cgroup-v1/hugetlb.txt
index 106245c3aecc..1260e5369b9b 100644
--- a/Documentation/cgroup-v1/hugetlb.txt
+++ b/Documentation/cgroup-v1/hugetlb.txt
@@ -32,14 +32,18 @@ Brief summary of control files
hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
-For a system supporting two hugepage size (16M and 16G) the control
+For a system supporting three hugepage sizes (64k, 32M and 1G), the control
files include:
-hugetlb.16GB.limit_in_bytes
-hugetlb.16GB.max_usage_in_bytes
-hugetlb.16GB.usage_in_bytes
-hugetlb.16GB.failcnt
-hugetlb.16MB.limit_in_bytes
-hugetlb.16MB.max_usage_in_bytes
-hugetlb.16MB.usage_in_bytes
-hugetlb.16MB.failcnt
+hugetlb.1GB.limit_in_bytes
+hugetlb.1GB.max_usage_in_bytes
+hugetlb.1GB.usage_in_bytes
+hugetlb.1GB.failcnt
+hugetlb.64KB.limit_in_bytes
+hugetlb.64KB.max_usage_in_bytes
+hugetlb.64KB.usage_in_bytes
+hugetlb.64KB.failcnt
+hugetlb.32MB.limit_in_bytes
+hugetlb.32MB.max_usage_in_bytes
+hugetlb.32MB.usage_in_bytes
+hugetlb.32MB.failcnt
diff --git a/Makefile b/Makefile
index b81e17261250..9514dac2660a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Golden Lions
# *DOCUMENTATION*
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 8fbd583b18e1..e9d2e578cbe6 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -51,7 +51,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += -Wno-psabi
+KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 3a1870228946..dff8f9ea5754 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -195,6 +195,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
+ start = round_down(start, stride);
+ end = round_up(end, stride);
+
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
flush_tlb_mm(vma->vm_mm);
return;
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 7b7ac0f6cec9..d819a3e8b552 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -260,6 +260,13 @@ struct kvm_vcpu_events {
KVM_REG_SIZE_U256 | \
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+/*
+ * Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
+ * KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
+ * invariant layout which differs from the layout used for the FPSIMD
+ * V-registers on big-endian systems: see sigcontext.h for more explanation.
+ */
+
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index d78623acb649..97c53203150b 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -176,6 +176,10 @@ struct user_sve_header {
* FPCR uint32_t FPCR
*
* Additional data might be appended in the future.
+ *
+ * The Z-, P- and FFR registers are represented in memory in an endianness-
+ * invariant layout which differs from the layout used for the FPSIMD
+ * V-registers on big-endian systems: see sigcontext.h for more explanation.
*/
#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 5f3c0cec5af9..3d448a0bb225 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -77,6 +77,15 @@ struct fpsimd_context {
__uint128_t vregs[32];
};
+/*
+ * Note: similarly to all other integer fields, each V-register is stored in an
+ * endianness-dependent format, with the byte at offset i from the start of the
+ * in-memory representation of the register value containing
+ *
+ * bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
+ * bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
+ */
+
/* ESR_EL1 context */
#define ESR_MAGIC 0x45535201
@@ -204,6 +213,11 @@ struct sve_context {
* FFR uint16_t[vq] first-fault status register
*
* Additional data might be appended in the future.
+ *
+ * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
+ * is encoded in memory in an endianness-invariant format, with the byte at
+ * offset i from the start of the in-memory representation containing bits
+ * [(7 + 8 * i) : (8 * i)] of the register value.
*/
#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index a38bf74bcca8..bb42cd04baec 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
+#include <linux/swab.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
@@ -352,6 +353,23 @@ static int __init sve_sysctl_init(void) { return 0; }
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ u64 a = swab64(x);
+ u64 b = swab64(x >> 64);
+
+ return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
@@ -369,14 +387,16 @@ static void fpsimd_to_sve(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
}
/*
@@ -395,14 +415,16 @@ static void sve_to_fpsimd(struct task_struct *task)
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t const *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(&fst->vregs[i], ZREG(sst, vq, i),
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t const *)ZREG(sst, vq, i);
+ fst->vregs[i] = arm64_le128_to_cpu(*p);
+ }
}
#ifdef CONFIG_ARM64_SVE
@@ -491,6 +513,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@@ -499,9 +522,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
}
int sve_set_vector_length(struct task_struct *task,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 7dede2e34b70..ccf00a8b98c6 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
return false;
}
+static inline int pmd_is_serializing(pmd_t pmd)
+{
+ /*
+ * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
+ * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
+ *
+ * This condition may also occur when flushing a pmd while flushing
+ * it (see ptep_modify_prot_start), so callers must ensure this
+ * case is fine as well.
+ */
+ if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
+ cpu_to_be64(_PAGE_INVALID))
+ return true;
+
+ return false;
+}
+
static inline int pmd_bad(pmd_t pmd)
{
if (radix_enabled())
@@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
+ /*
+ * pmdp_invalidate sets this combination (which is not caught by
+ * !pte_present() check in pte_access_permitted), to prevent
+ * lock-free lookups, as part of the serialize_against_pte_lookup()
+ * synchronisation.
+ *
+ * This also catches the case where the PTE's hardware PRESENT bit is
+ * cleared while TLB is flushed, which is suboptimal but should not
+ * be frequent.
+ */
+ if (pmd_is_serializing(pmd))
+ return false;
+
return pte_access_permitted(pmd_pte(pmd), write);
}
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 3ffad030393c..461b0f193864 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch);
extern void btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address);
+#ifdef CONFIG_PPC32
extern void btext_prepare_BAT(void);
+#else
+static inline void btext_prepare_BAT(void) { }
+#endif
extern void btext_map(void);
extern void btext_unmap(void);
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 4a585cba1787..c68476818753 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
return crashing_cpu >= 0;
}
+void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
+ unsigned long start_address) __noreturn;
+
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index affe5dcce7f4..2b160d68db49 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
*/
void default_machine_kexec(struct kimage *image)
{
- extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
+ if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
+ relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
+
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 3555cad7bdde..ed446b7ea164 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
width, height, pitch, addr);
btext_setup_display(width, height, 8, pitch, addr);
+ btext_prepare_BAT();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
}
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 518d416971c1..160bef0d553d 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -24,7 +24,7 @@ fi
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
-logo_linux_clut224
+logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index a255707e4aee..01bc9663360d 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
+ *
+ * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
+ * a special case check in pmd_access_permitted.
*/
serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 39d2f8012386..fc10c0c24f51 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
+
/*
- * A hugepage collapse is captured by pmd_none, because
- * it mark the pmd none and do a hpte invalidate.
+ * A hugepage collapse is captured by this condition, see
+ * pmdp_collapse_flush.
*/
if (pmd_none(pmd))
return NULL;
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * A hugepage split is captured by this condition, see
+ * pmdp_invalidate.
+ *
+ * Huge page modification can be caught here too.
+ */
+ if (pmd_is_serializing(pmd))
+ return NULL;
+#endif
+
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp)
*is_thp = true;
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 9e27fa05a7ae..4c95c365058a 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
struct fpu *fpu = &current->thread.fpu;
int cpu = smp_processor_id();
- if (WARN_ON_ONCE(current->mm == NULL))
+ if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
return;
if (!fpregs_state_valid(fpu, cpu)) {
@@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
* otherwise.
*
* The FPU context is only stored/restored for a user task and
- * ->mm is used to distinguish between kernel and user threads.
+ * PF_KTHREAD is used to distinguish between kernel and user threads.
*/
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
+ if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9f15384c504a..310118805f57 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -52,6 +52,9 @@
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
+#define INTEL_FAM6_ICELAKE_X 0x6A
+#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
+#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 70a04436380e..a813987b5552 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -872,7 +872,7 @@ int __init microcode_init(void)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+ cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 7ee93125a211..397206f23d14 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
struct list_head *head;
struct rdtgroup *entry;
+ if (!is_mbm_local_enabled())
+ return;
+
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
closid = rgrp->closid;
rmid = rgrp->mon.rmid;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 2f48f208f7e2..2131b8bbaad7 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
+ /*
+ * ctrl values for locksetup aren't relevant
+ * until the schemata is written, and the mode
+ * becomes RDT_MODE_PSEUDO_LOCKED.
+ */
+ continue;
/*
* If CDP is active include peer domain's
* usage to ensure there is no overlap
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 466fca686fb9..649fbc3fcf9f 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
kernel_fpu_disable();
- if (current->mm) {
+ if (!(current->flags & PF_KTHREAD)) {
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
/*
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 5a8d118bc423..0071b794ed19 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -5,6 +5,7 @@
#include <linux/compat.h>
#include <linux/cpu.h>
+#include <linux/pagemap.h>
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
@@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
struct user_i387_ia32_struct env;
struct _fpstate_32 __user *fp = buf;
+ fpregs_lock();
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+ copy_fxregs_to_kernel(&tsk->thread.fpu);
+ fpregs_unlock();
+
convert_from_fxsr(&env, tsk);
if (__copy_to_user(buf, &env, sizeof(env)) ||
@@ -189,15 +195,7 @@ retry:
fpregs_unlock();
if (ret) {
- int aligned_size;
- int nr_pages;
-
- aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
- nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
-
- ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
- NULL, FOLL_WRITE);
- if (ret == nr_pages)
+ if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
goto retry;
return -EFAULT;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 9a8c1648fc9a..6690c5652aeb 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
BREAK_INSTR_SIZE);
bpt->type = BP_POKE_BREAKPOINT;
- return err;
+ return 0;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8dc0fc0b1382..296da58f3013 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;
- p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
+ p4d = pgd_val(*pgd) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc3f058bdf9b..dc6182eecefa 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = {
{ &page_offset_base, 0 },
{ &vmalloc_base, 0 },
- { &vmemmap_base, 1 },
+ { &vmemmap_base, 0 },
};
/* Get size in bytes used by the memory region */
@@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
+ unsigned long vmemmap_size;
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
+ /*
+ * Calculate the vmemmap region size in TBs, aligned to a TB
+ * boundary.
+ */
+ vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
+ sizeof(struct page);
+ kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
+
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
diff --git a/block/Kconfig b/block/Kconfig
index 1b220101a9cb..2466dcc3ef1d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -73,6 +73,7 @@ config BLK_DEV_INTEGRITY
config BLK_DEV_ZONED
bool "Zoned block device support"
+ select MQ_IOSCHED_DEADLINE
---help---
Block layer zoned block device support. This option enables
support for ZAC/ZBC host-managed and host-aware zoned block devices.
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 59f46904cb11..b3796a40a61a 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1046,8 +1046,7 @@ struct blkcg_policy blkcg_policy_bfq = {
struct cftype bfq_blkcg_legacy_files[] = {
{
.name = "bfq.weight",
- .link_name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
+ .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = bfq_io_show_weight,
.write_u64 = bfq_io_set_weight_legacy,
},
@@ -1167,8 +1166,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
struct cftype bfq_blkg_files[] = {
{
.name = "bfq.weight",
- .link_name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
+ .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = bfq_io_show_weight,
.write = bfq_io_set_weight,
},
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 6aea0ebc3a73..2489ddbb21db 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -821,38 +821,28 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
{},
};
-static bool debugfs_create_files(struct dentry *parent, void *data,
+static void debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
if (IS_ERR_OR_NULL(parent))
- return false;
+ return;
d_inode(parent)->i_private = data;
- for (; attr->name; attr++) {
- if (!debugfs_create_file(attr->name, attr->mode, parent,
- (void *)attr, &blk_mq_debugfs_fops))
- return false;
- }
- return true;
+ for (; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, parent,
+ (void *)attr, &blk_mq_debugfs_fops);
}
-int blk_mq_debugfs_register(struct request_queue *q)
+void blk_mq_debugfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- if (!blk_debugfs_root)
- return -ENOENT;
-
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
- if (!q->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(q->debugfs_dir, q,
- blk_mq_debugfs_queue_attrs))
- goto err;
+ debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
/*
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
@@ -864,11 +854,10 @@ int blk_mq_debugfs_register(struct request_queue *q)
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
- goto err;
- if (q->elevator && !hctx->sched_debugfs_dir &&
- blk_mq_debugfs_register_sched_hctx(q, hctx))
- goto err;
+ if (!hctx->debugfs_dir)
+ blk_mq_debugfs_register_hctx(q, hctx);
+ if (q->elevator && !hctx->sched_debugfs_dir)
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
if (q->rq_qos) {
@@ -879,12 +868,6 @@ int blk_mq_debugfs_register(struct request_queue *q)
rqos = rqos->next;
}
}
-
- return 0;
-
-err:
- blk_mq_debugfs_unregister(q);
- return -ENOMEM;
}
void blk_mq_debugfs_unregister(struct request_queue *q)
@@ -894,52 +877,32 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
q->debugfs_dir = NULL;
}
-static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
+static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
- if (!ctx_dir)
- return -ENOMEM;
- if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
}
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
char name[20];
int i;
- if (!q->debugfs_dir)
- return -ENOENT;
-
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
- if (!hctx->debugfs_dir)
- return -ENOMEM;
-
- if (!debugfs_create_files(hctx->debugfs_dir, hctx,
- blk_mq_debugfs_hctx_attrs))
- goto err;
-
- hctx_for_each_ctx(hctx, ctx, i) {
- if (blk_mq_debugfs_register_ctx(hctx, ctx))
- goto err;
- }
- return 0;
+ debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
-err:
- blk_mq_debugfs_unregister_hctx(hctx);
- return -ENOMEM;
+ hctx_for_each_ctx(hctx, ctx, i)
+ blk_mq_debugfs_register_ctx(hctx, ctx);
}
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -949,17 +912,13 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
hctx->debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (blk_mq_debugfs_register_hctx(q, hctx))
- return -ENOMEM;
- }
-
- return 0;
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_register_hctx(q, hctx);
}
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
@@ -971,29 +930,16 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
blk_mq_debugfs_unregister_hctx(hctx);
}
-int blk_mq_debugfs_register_sched(struct request_queue *q)
+void blk_mq_debugfs_register_sched(struct request_queue *q)
{
struct elevator_type *e = q->elevator->type;
- if (!q->debugfs_dir)
- return -ENOENT;
-
if (!e->queue_debugfs_attrs)
- return 0;
+ return;
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
- if (!q->sched_debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(q->sched_debugfs_dir, q,
- e->queue_debugfs_attrs))
- goto err;
-
- return 0;
-
-err:
- blk_mq_debugfs_unregister_sched(q);
- return -ENOMEM;
+ debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
@@ -1008,36 +954,22 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
rqos->debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
struct request_queue *q = rqos->q;
const char *dir_name = rq_qos_id_to_name(rqos->id);
- if (!q->debugfs_dir)
- return -ENOENT;
-
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
- return 0;
+ return;
- if (!q->rqos_debugfs_dir) {
+ if (!q->rqos_debugfs_dir)
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
q->debugfs_dir);
- if (!q->rqos_debugfs_dir)
- return -ENOMEM;
- }
rqos->debugfs_dir = debugfs_create_dir(dir_name,
rqos->q->rqos_debugfs_dir);
- if (!rqos->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(rqos->debugfs_dir, rqos,
- rqos->ops->debugfs_attrs))
- goto err;
- return 0;
- err:
- blk_mq_debugfs_unregister_rqos(rqos);
- return -ENOMEM;
+ debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
}
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
@@ -1046,27 +978,18 @@ void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
q->rqos_debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct elevator_type *e = q->elevator->type;
- if (!hctx->debugfs_dir)
- return -ENOENT;
-
if (!e->hctx_debugfs_attrs)
- return 0;
+ return;
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
hctx->debugfs_dir);
- if (!hctx->sched_debugfs_dir)
- return -ENOMEM;
-
- if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
- e->hctx_debugfs_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+ e->hctx_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index 8c9012a578c1..a68aa6041a10 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -18,74 +18,68 @@ struct blk_mq_debugfs_attr {
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
-int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
-int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
+static inline void blk_mq_debugfs_register(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
-static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
-static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 500cb04901cc..2766066a15db 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -555,7 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
int i;
lockdep_assert_held(&q->sysfs_lock);
- WARN_ON(!q->elevator);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index aaa57e0c809d..4a2dff303865 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4460,9 +4460,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards)
+ the ST disks also have LPM issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e038e2b3b7ea..0bbb328bd17f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -755,10 +755,32 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
&devres));
-
}
EXPORT_SYMBOL_GPL(devm_remove_action);
+/**
+ * devm_release_action() - release previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Releases and removes instance of @action previously added by
+ * devm_add_action(). Both action and data should match one of the
+ * existing entries.
+ */
+void devm_release_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
+ &devres));
+
+}
+EXPORT_SYMBOL_GPL(devm_release_action);
+
/*
* Managed kmalloc/kfree
*/
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 5d1c261a2cfd..fca0c97ff1aa 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -74,10 +74,6 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
struct nullb_device *dev = nullb->dev;
unsigned int zno, nrz = 0;
- if (!dev->zoned)
- /* Not a zoned null device */
- return -EOPNOTSUPP;
-
zno = null_zone_no(dev, sector);
if (zno < dev->nr_zones) {
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 4c7f51b1eda9..4628e1a27a2b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -767,7 +767,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
- dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
+ dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
device_add_disk(&dev->core, gendisk, NULL);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index b2a951a798e2..5c69c9a9a6a4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -149,22 +149,22 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
-static u64 arch_counter_get_cntpct_stable(void)
+static notrace u64 arch_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
}
-static u64 arch_counter_get_cntpct(void)
+static notrace u64 arch_counter_get_cntpct(void)
{
return __arch_counter_get_cntpct();
}
-static u64 arch_counter_get_cntvct_stable(void)
+static notrace u64 arch_counter_get_cntvct_stable(void)
{
return __arch_counter_get_cntvct_stable();
}
-static u64 arch_counter_get_cntvct(void)
+static notrace u64 arch_counter_get_cntvct(void)
{
return __arch_counter_get_cntvct();
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e40b55a7086f..5394d9dbdfbc 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -896,7 +896,7 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
return ret;
}
-const static struct omap_dm_timer_ops dmtimer_ops = {
+static const struct omap_dm_timer_ops dmtimer_ops = {
.request_by_node = omap_dm_timer_request_by_node,
.request_specific = omap_dm_timer_request_specific,
.request = omap_dm_timer_request,
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 996d68ff992a..8465d12fecba 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
complete(&dev_dax->cmp);
}
-static void dev_dax_percpu_exit(void *data)
+static void dev_dax_percpu_exit(struct percpu_ref *ref)
{
- struct percpu_ref *ref = data;
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
dev_dbg(&dev_dax->dev, "%s\n", __func__);
@@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev)
if (rc)
return rc;
- rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
- if (rc)
- return rc;
-
dev_dax->pgmap.ref = &dev_dax->ref;
dev_dax->pgmap.kill = dev_dax_percpu_kill;
+ dev_dax->pgmap.cleanup = dev_dax_percpu_exit;
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
- if (IS_ERR(addr)) {
- devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
- percpu_ref_exit(&dev_dax->ref);
+ if (IS_ERR(addr))
return PTR_ERR(addr);
- }
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 2328d04201a9..cfe827cefad8 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -305,7 +305,8 @@ static const struct regmap_config pca953x_i2c_regmap = {
.volatile_reg = pca953x_volatile_register,
.cache_type = REGCACHE_RBTREE,
- .max_register = 0x7f,
+ /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
+ .max_register = 0xff,
};
static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 039cfa2ec89d..abeaab4bf1bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2492,7 +2492,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
- int r = -EINVAL;
+ int r;
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
@@ -2502,7 +2502,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
}
*smu_version = adev->pm.fw_version;
}
- return r;
+ return 0;
}
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index eaef5edefc34..24c6e5fcda86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -172,6 +172,8 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ if (block >= AMDGPU_RAS_BLOCK_COUNT)
+ return 0;
return ras && (ras->supported & (1 << block));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index ecf6f96df2ad..e6b07ece3910 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -594,7 +594,7 @@ error:
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;
@@ -602,6 +602,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
if (r)
return r;
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9edddf9f88a..be70e6e5f9df 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -170,13 +170,16 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 2191d3d0a219..fc4f0bb9a2e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;
@@ -185,6 +185,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bfc419ed9d6c..ceca79b1468d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1340,7 +1340,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
+
+ if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
+ return -EFAULT;
+ return 0;
}
int drm_legacy_infobufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 649cfd8b4200..e804ac5dec02 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1570,6 +1570,50 @@ static void connector_bad_edid(struct drm_connector *connector,
}
}
+/* Get override or firmware EDID */
+static struct edid *drm_get_override_edid(struct drm_connector *connector)
+{
+ struct edid *override = NULL;
+
+ if (connector->override_edid)
+ override = drm_edid_duplicate(connector->edid_blob_ptr->data);
+
+ if (!override)
+ override = drm_load_edid_firmware(connector);
+
+ return IS_ERR(override) ? NULL : override;
+}
+
+/**
+ * drm_add_override_edid_modes - add modes from override/firmware EDID
+ * @connector: connector we're probing
+ *
+ * Add modes from the override/firmware EDID, if available. Only to be used from
+ * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
+ * failed during drm_get_edid() and caused the override/firmware EDID to be
+ * skipped.
+ *
+ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_override_edid_modes(struct drm_connector *connector)
+{
+ struct edid *override;
+ int num_modes = 0;
+
+ override = drm_get_override_edid(connector);
+ if (override) {
+ drm_connector_update_edid_property(connector, override);
+ num_modes = drm_add_edid_modes(connector, override);
+ kfree(override);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
+ connector->base.id, connector->name, num_modes);
+ }
+
+ return num_modes;
+}
+EXPORT_SYMBOL(drm_add_override_edid_modes);
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1597,15 +1641,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
- struct edid *override = NULL;
-
- if (connector->override_edid)
- override = drm_edid_duplicate(connector->edid_blob_ptr->data);
-
- if (!override)
- override = drm_load_edid_firmware(connector);
+ struct edid *override;
- if (!IS_ERR_OR_NULL(override))
+ override = drm_get_override_edid(connector);
+ if (override)
return override;
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 1ee208c2c85e..472ea5d81f82 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -255,7 +255,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (obj->import_attach)
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
- shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 374b372da58a..3972ebe48463 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -375,7 +375,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
+
+ if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
+ return -EFAULT;
+ return 0;
}
static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 521aff99b08a..d8a0bcd02f34 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -42,6 +42,14 @@ static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+ .width = 720,
+ .height = 1280,
+ .bios_dates = (const char * const []){ "04/26/2019",
+ NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@@ -50,6 +58,14 @@ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_pocket2 = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "06/28/2018", "08/28/2018",
+ "12/07/2018", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data gpd_win = {
.width = 720,
.height = 1280,
@@ -99,6 +115,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
+ }, { /* GPD MicroPC (generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_micropc,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
@@ -112,6 +136,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_pocket,
+ }, { /* GPD Pocket 2 (generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_pocket2,
}, { /* GPD Win (same note on DMI match as GPD Pocket) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6fd08e04b323..dd427c7ff967 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -479,6 +479,13 @@ retry:
count = (*connector_funcs->get_modes)(connector);
+ /*
+ * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ * override/firmware EDID.
+ */
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_override_edid_modes(connector);
+
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a6ade66349bd..25f78196b964 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1254,18 +1254,15 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- u32 data;
- int ret;
-
- write_vreg(vgpu, offset, p_data, bytes);
- data = vgpu_vreg(vgpu, offset);
+ u32 data = *(u32 *)p_data;
+ bool invalid_write = false;
switch (offset) {
case _vgtif_reg(display_ready):
send_display_ready_uevent(vgpu, data ? 1 : 0);
break;
case _vgtif_reg(g2v_notify):
- ret = handle_g2v_notification(vgpu, data);
+ handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
case _vgtif_reg(cursor_x_hot):
@@ -1282,13 +1279,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_hi):
break;
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ invalid_write = true;
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break;
default:
+ invalid_write = true;
gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
+
+ if (!invalid_write)
+ write_vreg(vgpu, offset, p_data, bytes);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 39a4804091d7..dc4ce694c06a 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3005,6 +3005,7 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen8_is_valid_mux_addr(dev_priv, addr) ||
+ addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
(addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2aa69d347ec4..13d6bd4e17b2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1062,6 +1062,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define NOA_DATA _MMIO(0x986C)
#define NOA_WRITE _MMIO(0x9888)
+#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884)
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index f43c2a2563a5..96618af47088 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -303,10 +303,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
u32 i;
u32 *dmc_payload;
+ size_t fsize;
if (!fw)
return NULL;
+ fsize = sizeof(struct intel_css_header) +
+ sizeof(struct intel_package_header) +
+ sizeof(struct intel_dmc_header);
+ if (fsize > fw->size)
+ goto error_truncated;
+
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@@ -366,6 +373,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Convert dmc_offset into number of bytes. By default it is in dwords*/
dmc_offset *= 4;
readcount += dmc_offset;
+ fsize += dmc_offset;
+ if (fsize > fw->size)
+ goto error_truncated;
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
@@ -397,6 +407,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
+ fsize += nbytes;
+ if (fsize > fw->size)
+ goto error_truncated;
+
if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
@@ -410,6 +424,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
}
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+
+error_truncated:
+ DRM_ERROR("Truncated DMC firmware, rejecting.\n");
+ return NULL;
}
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5098228f1302..75105a2c59ea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2432,10 +2432,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* main surface.
*/
static const struct drm_format_info ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
static const struct drm_format_info *
@@ -11942,7 +11946,7 @@ encoder_retry:
return 0;
}
-static bool intel_fuzzy_clock_check(int clock1, int clock2)
+bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -12001,9 +12005,6 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
intel_compare_m_n(m_n->link_m, m_n->link_n,
m2_n2->link_m, m2_n2->link_n, !adjust)) {
- if (adjust)
- *m2_n2 = *m_n;
-
return true;
}
@@ -13145,6 +13146,33 @@ static int calc_watermark_data(struct intel_atomic_state *state)
return 0;
}
+static void intel_crtc_check_fastset(struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(new_crtc_state->base.crtc->dev);
+
+ if (!intel_pipe_config_compare(dev_priv, old_crtc_state,
+ new_crtc_state, true))
+ return;
+
+ new_crtc_state->base.mode_changed = false;
+ new_crtc_state->update_pipe = true;
+
+ /*
+ * If we're not doing the full modeset we want to
+ * keep the current M/N values as they may be
+ * sufficiently different to the computed values
+ * to cause problems.
+ *
+ * FIXME: should really copy more fuzzy state here
+ */
+ new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
+ new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
+ new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
+ new_crtc_state->has_drrs = old_crtc_state->has_drrs;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13193,12 +13221,8 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
- if (intel_pipe_config_compare(dev_priv,
- to_intel_crtc_state(old_crtc_state),
- pipe_config, true)) {
- crtc_state->mode_changed = false;
- pipe_config->update_pipe = true;
- }
+ intel_crtc_check_fastset(to_intel_crtc_state(old_crtc_state),
+ pipe_config);
if (needs_modeset(crtc_state))
any_ms = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a38b9cff5cd0..e85cd377a652 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1742,6 +1742,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 3074448446bc..4b8e48db1843 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -853,6 +853,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (mipi_config->target_burst_mode_freq) {
u32 bitrate = intel_dsi_bitrate(intel_dsi);
+ /*
+ * Sometimes the VBT contains a slightly lower clock,
+ * then the bitrate we have calculated, in this case
+ * just replace it with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(
+ mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
if (mipi_config->target_burst_mode_freq < bitrate) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0e3d91d9ef13..9ecfba0a54a1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -916,6 +916,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
+static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
+ u8 audio_state)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
+ &audio_state, 1);
+}
+
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -1487,11 +1494,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
- if (crtc_state->has_audio) {
- WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
- sdvox |= SDVO_AUDIO_ENABLE;
- }
-
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
@@ -1635,8 +1637,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- if (sdvox & SDVO_AUDIO_ENABLE)
- pipe_config->has_audio = true;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
+ &val, 1)) {
+ u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
+
+ if ((val & mask) == mask)
+ pipe_config->has_audio = true;
+ }
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
&val, 1)) {
@@ -1647,6 +1654,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
}
+static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+{
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+}
+
+static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ struct drm_connector *connector = conn_state->connector;
+ u8 *eld = connector->eld;
+
+ eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+
+ intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ SDVO_HBUF_TX_DISABLED,
+ eld, drm_eld_size(eld));
+
+ intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
+ SDVO_AUDIO_PRESENCE_DETECT);
+}
+
static void intel_disable_sdvo(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
@@ -1656,6 +1689,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
+ if (old_crtc_state->has_audio)
+ intel_sdvo_disable_audio(intel_sdvo);
+
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1741,6 +1777,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+
+ if (pipe_config->has_audio)
+ intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -2603,7 +2642,6 @@ static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
@@ -2640,9 +2678,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- /* gen3 doesn't do the hdmi bits in the SDVO register */
- if (INTEL_GEN(dev_priv) >= 4 &&
- intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_connector->is_hdmi = true;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index db0ed499268a..e9ba3b047f93 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
+ #define SDVO_AUDIO_ELD_VALID (1 << 0)
+ #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
+ #define SDVO_AUDIO_CP_READY (1 << 2)
#define SDVO_CMD_SET_HBUF_INDEX 0x93
#define SDVO_HBUF_INDEX_ELD 0
#define SDVO_HBUF_INDEX_AVI_IF 1
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f426dfdfb418..a9007210dda1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -90,10 +90,6 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- int i;
-
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
mtk_disp_mutex_put(mtk_crtc->mutex);
@@ -186,7 +182,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
- ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
+ ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
if (ret) {
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
goto err;
@@ -196,7 +192,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
return 0;
err:
while (--i >= 0)
- clk_disable(mtk_crtc->ddp_comp[i]->clk);
+ clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
return ret;
}
@@ -206,7 +202,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- clk_disable(mtk_crtc->ddp_comp[i]->clk);
+ clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
@@ -577,15 +573,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!comp) {
dev_err(dev, "Component %pOF not initialized\n", node);
ret = -ENODEV;
- goto unprepare;
- }
-
- ret = clk_prepare(comp->clk);
- if (ret) {
- dev_err(dev,
- "Failed to prepare clock for component %pOF: %d\n",
- node, ret);
- goto unprepare;
+ return ret;
}
mtk_crtc->ddp_comp[i] = comp;
@@ -603,23 +591,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
BIT(pipe), type);
if (ret)
- goto unprepare;
+ return ret;
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
NULL, pipe);
if (ret < 0)
- goto unprepare;
+ return ret;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
priv->num_pipes++;
return 0;
-
-unprepare:
- while (--i >= 0)
- clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
-
- return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 7fcb7407096f..95fdbd0fbcac 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -303,6 +303,7 @@ err_config_cleanup:
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@@ -389,7 +390,9 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
+ mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
+ private->num_pipes = 0;
private->drm = NULL;
}
@@ -560,13 +563,8 @@ err_node:
static int mtk_drm_remove(struct platform_device *pdev)
{
struct mtk_drm_private *private = platform_get_drvdata(pdev);
- struct drm_device *drm = private->drm;
int i;
- drm_dev_unregister(drm);
- mtk_drm_kms_deinit(drm);
- drm_dev_put(drm);
-
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
of_node_put(private->mutex_node);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 3eefb22206c7..0d69698f8173 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -136,7 +136,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
@@ -168,6 +167,12 @@ int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
obj = vma->vm_private_data;
+ /*
+ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+ * whole buffer from the start.
+ */
+ vma->vm_pgoff = 0;
+
return mtk_drm_gem_object_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4a0b9150a7bb..b91c4616644a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -622,6 +622,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
if (dsi->panel) {
if (drm_panel_unprepare(dsi->panel)) {
@@ -688,7 +697,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
}
- mtk_dsi_stop(dsi);
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
@@ -836,6 +844,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
/* Skip connector cleanup if creation was delegated to the bridge */
if (dsi->conn.dev)
drm_connector_cleanup(&dsi->conn);
+ if (dsi->panel)
+ drm_panel_detach(dsi->panel);
}
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 685715144156..aa8ea107524e 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -107,8 +107,6 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
priv->io_base + _REG(VPP_OUT_H_V_SIZE));
drm_crtc_vblank_on(crtc);
-
- priv->viu.osd1_enabled = true;
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -137,8 +135,6 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
priv->io_base + _REG(VPP_MISC));
drm_crtc_vblank_on(crtc);
-
- priv->viu.osd1_enabled = true;
}
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -256,6 +252,8 @@ static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
writel_relaxed(priv->viu.osb_blend1_size,
priv->io_base +
_REG(VIU_OSD_BLEND_BLEND1_SIZE));
+ writel_bits_relaxed(3 << 8, 3 << 8,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
}
static void meson_crtc_enable_vd1(struct meson_drm *priv)
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 22490047932e..d90427b93a51 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -305,6 +305,8 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
meson_plane->enabled = true;
}
+ priv->viu.osd1_enabled = true;
+
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
@@ -316,14 +318,14 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
/* Disable OSD1 */
if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- writel_bits_relaxed(BIT(0) | BIT(21), 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_bits_relaxed(3 << 8, 0,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
else
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
-
+ priv->viu.osd1_enabled = false;
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 58b4af5fbb6d..26732f038d19 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -503,8 +503,17 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* G12A HDMI PLL Needs specific parameters for 5.4GHz */
if (m >= 0xf7) {
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ if (frac < 0x10000) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
+ 0x6a685c00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
+ 0x11551293);
+ } else {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
+ 0xea68dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
+ 0x65771290);
+ }
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
} else {
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 462c7cb3e1bd..4b2b3024d371 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -405,8 +405,7 @@ void meson_viu_init(struct meson_drm *priv)
0 << 16 |
1,
priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
- writel_relaxed(3 << 8 |
- 1 << 20,
+ writel_relaxed(1 << 20,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
writel_relaxed(1 << 20,
priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
index 81963e964b0f..86cdc0ce79e6 100644
--- a/drivers/gpu/drm/panfrost/Kconfig
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -10,6 +10,7 @@ config DRM_PANFROST
select IOMMU_IO_PGTABLE_LPAE
select DRM_GEM_SHMEM_HELPER
select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 29fcffdf2d57..db798532b0b6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -140,7 +140,9 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
- if (ret)
+ if (ret == -ENODEV) /* Optional, continue without devfreq */
+ return 0;
+ else if (ret)
return ret;
panfrost_devfreq_reset(pfdev);
@@ -170,6 +172,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
int i;
+ if (!pfdev->devfreq.devfreq)
+ return;
+
panfrost_devfreq_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
pfdev->devfreq.slot[i].busy = false;
@@ -179,6 +184,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
{
+ if (!pfdev->devfreq.devfreq)
+ return;
+
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
@@ -188,6 +196,9 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
ktime_t now;
ktime_t last;
+ if (!pfdev->devfreq.devfreq)
+ return;
+
now = ktime_get();
last = pfdev->devfreq.slot[slot].time_last_update;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index a5528a360ef4..97d64ceedbb4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,7 +19,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
- panfrost_mmu_unmap(bo);
+ if (bo->is_mapped)
+ panfrost_mmu_unmap(bo);
spin_lock(&pfdev->mm_lock);
drm_mm_remove_node(&bo->node);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 045000eb5fcf..6dbcaba020fc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -11,6 +11,7 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct drm_mm_node node;
+ bool is_mapped;
};
static inline
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 762b1bd2a8c2..92ac995dd9c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -156,6 +156,9 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
struct sg_table *sgt;
int ret;
+ if (WARN_ON(bo->is_mapped))
+ return 0;
+
sgt = drm_gem_shmem_get_pages_sgt(obj);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
@@ -189,6 +192,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
+ bo->is_mapped = true;
return 0;
}
@@ -203,6 +207,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
size_t unmapped_len = 0;
int ret;
+ if (WARN_ON(!bo->is_mapped))
+ return;
+
dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
ret = pm_runtime_get_sync(pfdev->dev);
@@ -230,6 +237,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
+ bo->is_mapped = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4ff11a0077e1..9506190a0300 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -747,6 +747,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
+ dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
+ SCATTERLIST_MAX_SEGMENT));
+
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 8b9270f31409..e4e09d47c5c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel)
return 0;
}
+/**
+ * vmw_port_hb_out - Send the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @msg: NULL-terminated message.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
+ const char *msg, bool hb)
+{
+ unsigned long si, di, eax, ebx, ecx, edx;
+ unsigned long msg_len = strlen(msg);
+
+ if (hb) {
+ unsigned long bp = channel->cookie_high;
+
+ si = (uintptr_t) msg;
+ di = channel->cookie_low;
+
+ VMW_PORT_HB_OUT(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ msg_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ return ebx;
+ }
+
+ /* HB port not available. Send the message 4 bytes at a time. */
+ ecx = MESSAGE_STATUS_SUCCESS << 16;
+ while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
+ unsigned int bytes = min_t(size_t, msg_len, 4);
+ unsigned long word = 0;
+
+ memcpy(&word, msg, bytes);
+ msg_len -= bytes;
+ msg += bytes;
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
+ word, si, di,
+ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+ }
+
+ return ecx;
+}
+
+/**
+ * vmw_port_hb_in - Receive the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @reply: Pointer to buffer holding reply.
+ * @reply_len: Length of the reply.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
+ unsigned long reply_len, bool hb)
+{
+ unsigned long si, di, eax, ebx, ecx, edx;
+
+ if (hb) {
+ unsigned long bp = channel->cookie_low;
+
+ si = channel->cookie_high;
+ di = (uintptr_t) reply;
+
+ VMW_PORT_HB_IN(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ reply_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ return ebx;
+ }
+
+ /* HB port not available. Retrieve the message 4 bytes at a time. */
+ ecx = MESSAGE_STATUS_SUCCESS << 16;
+ while (reply_len) {
+ unsigned int bytes = min_t(unsigned long, reply_len, 4);
+
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
+ MESSAGE_STATUS_SUCCESS, si, di,
+ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+ break;
+
+ memcpy(reply, &ebx, bytes);
+ reply_len -= bytes;
+ reply += bytes;
+ }
+
+ return ecx;
+}
/**
@@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel)
*/
static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
{
- unsigned long eax, ebx, ecx, edx, si, di, bp;
+ unsigned long eax, ebx, ecx, edx, si, di;
size_t msg_len = strlen(msg);
int retries = 0;
-
while (retries < RETRIES) {
retries++;
@@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
- /* Expected success + high-bandwidth. Give up. */
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ /* Expected success. Give up. */
return -EINVAL;
}
/* Send msg */
- si = (uintptr_t) msg;
- di = channel->cookie_low;
- bp = channel->cookie_high;
-
- VMW_PORT_HB_OUT(
- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- msg_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ ebx = vmw_port_hb_out(channel, msg,
+ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
return 0;
@@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
- unsigned long eax, ebx, ecx, edx, si, di, bp;
+ unsigned long eax, ebx, ecx, edx, si, di;
char *reply;
size_t reply_len;
int retries = 0;
@@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
@@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
/* Receive buffer */
- si = channel->cookie_high;
- di = (uintptr_t) reply;
- bp = channel->cookie_low;
-
- VMW_PORT_HB_IN(
- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- reply_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
-
+ ebx = vmw_port_hb_in(channel, reply, reply_len,
+ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index a6ea75b58a83..d8ea3dd10af0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -441,11 +441,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long)
- vsgt->num_pages << PAGE_SHIFT,
- GFP_KERNEL);
+ ret = __sg_alloc_table_from_pages
+ (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long) vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->dev->dev),
+ GFP_KERNEL);
if (unlikely(ret != 0))
goto out_sg_alloc_fail;
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index fd806f590bf8..98bf694626f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -35,8 +35,10 @@ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
{
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
- if (usage->type == EV_REL && usage->code == REL_WHEEL)
+ if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
set_bit(REL_HWHEEL, *bit);
+ set_bit(REL_HWHEEL_HI_RES, *bit);
+ }
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
return -1;
@@ -57,7 +59,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
input = field->hidinput->input;
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
- if (usage->type == EV_REL && usage->code == REL_WHEEL) {
+ if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
a4->delayed_value = value;
return 1;
}
@@ -65,6 +67,8 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
if (usage->hid == 0x000100b8) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
+ input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
+ REL_WHEEL_HI_RES, a4->delayed_value * 120);
return 1;
}
}
@@ -74,8 +78,9 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- if (usage->code == REL_WHEEL && a4->hw_wheel) {
+ if (usage->code == REL_WHEEL_HI_RES && a4->hw_wheel) {
input_event(input, usage->type, REL_HWHEEL, value);
+ input_event(input, usage->type, REL_HWHEEL_HI_RES, value * 120);
return 1;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 08d310723e96..210b81a56e1a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -27,7 +27,6 @@
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
-#include <linux/async.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -1311,10 +1310,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 256) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
+ if (n > 32) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
- n = 256;
+ n = 32;
}
return __extract(report, offset, n);
@@ -2362,15 +2361,6 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
- /*
- * Try loading the module for the device before the add, so that we do
- * not first have hid-generic binding only to have it replaced
- * immediately afterwards with a specialized driver.
- */
- if (!current_is_async())
- request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
- hdev->group, hdev->vendor, hdev->product);
-
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8f806f46c278..7795831d37c2 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -606,5 +606,7 @@ static void __exit mousevsc_exit(void)
}
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic HID Driver");
+
module_init(mousevsc_init);
module_exit(mousevsc_exit);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 84e0c78d73cd..eac0c54c5970 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1086,6 +1086,7 @@
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 904a4b0d90f5..e564bff86515 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -113,6 +113,7 @@ enum recvr_type {
recvr_type_dj,
recvr_type_hidpp,
recvr_type_gaming_hidpp,
+ recvr_type_mouse_only,
recvr_type_27mhz,
recvr_type_bluetooth,
};
@@ -864,9 +865,12 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
schedule_work(&djrcv_dev->work);
}
-static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
+static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report,
struct dj_workitem *workitem)
{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
workitem->type = WORKITEM_TYPE_PAIRED;
workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
HIDPP_DEVICE_TYPE_MASK;
@@ -880,6 +884,8 @@ static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
break;
case REPORT_TYPE_MOUSE:
workitem->reports_supported |= STD_MOUSE | HIDPP;
+ if (djrcv_dev->type == recvr_type_mouse_only)
+ workitem->reports_supported |= MULTIMEDIA;
break;
}
}
@@ -923,7 +929,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
case 0x01:
device_type = "Bluetooth";
/* Bluetooth connect packet contents is the same as (e)QUAD */
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
HIDPP_MANUFACTURER_MASK)) {
hid_info(hdev, "Non Logitech device connected on slot %d\n",
@@ -937,18 +943,18 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
case 0x03:
device_type = "QUAD or eQUAD";
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x04:
device_type = "eQUAD step 4 DJ";
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x05:
device_type = "DFU Lite";
break;
case 0x06:
device_type = "eQUAD step 4 Lite";
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x07:
device_type = "eQUAD step 4 Gaming";
@@ -958,11 +964,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
case 0x0a:
device_type = "eQUAD nano Lite";
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x0c:
device_type = "eQUAD Lightspeed";
- logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;
}
@@ -1313,7 +1319,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
if (djdev->reports_supported & STD_MOUSE) {
dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
- if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
+ if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp ||
+ djdev->dj_receiver_dev->type == recvr_type_mouse_only)
rdcat(rdesc, &rsize, mse_high_res_descriptor,
sizeof(mse_high_res_descriptor));
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
@@ -1556,15 +1563,19 @@ static int logi_dj_raw_event(struct hid_device *hdev,
data[0] = data[1];
data[1] = 0;
}
- /* The 27 MHz mouse-only receiver sends unnumbered mouse data */
+ /*
+ * Mouse-only receivers send unnumbered mouse data. The 27 MHz
+ * receiver uses 6 byte packets, the nano receiver 8 bytes.
+ */
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
- size == 6) {
- u8 mouse_report[7];
+ size <= 8) {
+ u8 mouse_report[9];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
- memcpy(mouse_report + 1, data, 6);
- logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
+ memcpy(mouse_report + 1, data, size);
+ logi_dj_recv_forward_input_report(hdev, mouse_report,
+ size + 1);
}
return false;
@@ -1635,6 +1646,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_dj: no_dj_interfaces = 3; break;
case recvr_type_hidpp: no_dj_interfaces = 2; break;
case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
+ case recvr_type_mouse_only: no_dj_interfaces = 2; break;
case recvr_type_27mhz: no_dj_interfaces = 2; break;
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
}
@@ -1808,10 +1820,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
.driver_data = recvr_type_dj},
- { /* Logitech Nano (non DJ) receiver */
+ { /* Logitech Nano mouse only receiver */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
- .driver_data = recvr_type_hidpp},
+ .driver_data = recvr_type_mouse_only},
{ /* Logitech Nano (non DJ) receiver */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
@@ -1836,6 +1848,14 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc70a),
.driver_data = recvr_type_bluetooth},
+ { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc71b),
+ .driver_data = recvr_type_bluetooth},
+ { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc71c),
+ .driver_data = recvr_type_bluetooth},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 3d2e6d254e7d..cf05816a601f 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3728,6 +3728,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
+ LDJ_DEVICE(0xb30b),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ LDJ_DEVICE(HID_ANY_ID) },
@@ -3740,6 +3743,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX3200 (Y-RAV80) */
L27MHZ_DEVICE(0x005c),
.driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+ { /* S510 Media Remote */
+ L27MHZ_DEVICE(0x00fe),
+ .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
{ L27MHZ_DEVICE(HID_ANY_ID) },
@@ -3756,6 +3762,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* MX5500 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{}
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index c34e972f6296..5df5dd56ecc8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -637,6 +637,13 @@ static void mt_store_field(struct hid_device *hdev,
if (*target != DEFAULT_TRUE &&
*target != DEFAULT_FALSE &&
*target != DEFAULT_ZERO) {
+ if (usage->contactid == DEFAULT_ZERO ||
+ usage->x == DEFAULT_ZERO ||
+ usage->y == DEFAULT_ZERO) {
+ hid_dbg(hdev,
+ "ignoring duplicate usage on incomplete");
+ return;
+ }
usage = mt_allocate_usage(hdev, application);
if (!usage)
return;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 853842448f70..7c6abd7e0979 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -35,6 +35,7 @@
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
+#define RMI_DEVICE_OUTPUT_SET_REPORT BIT(2)
/*
* retrieve the ctrl registers
@@ -163,9 +164,19 @@ static int rmi_set_mode(struct hid_device *hdev, u8 mode)
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
{
+ struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
- ret = hid_hw_output_report(hdev, (void *)report, len);
+ if (data->device_flags & RMI_DEVICE_OUTPUT_SET_REPORT) {
+ /*
+ * Talk to device by using SET_REPORT requests instead.
+ */
+ ret = hid_hw_raw_request(hdev, report[0], report,
+ len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ } else {
+ ret = hid_hw_output_report(hdev, (void *)report, len);
+ }
+
if (ret < 0) {
dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
return ret;
@@ -747,6 +758,8 @@ static const struct hid_device_id rmi_id[] = {
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5),
+ .driver_data = RMI_DEVICE_OUTPUT_SET_REPORT },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index fd1b6eea6d2f..75078c83be1a 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -354,6 +354,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "iBall Aer3",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "iBall"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Aer3"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1f1ed276e388..43f6da357165 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1232,13 +1232,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
/* Add back in missing bits of ID for non-USI pens */
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
}
- wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
for (i = 0; i < pen_frames; i++) {
unsigned char *frame = &data[i*pen_frame_len + 1];
bool valid = frame[0] & 0x80;
bool prox = frame[0] & 0x40;
bool range = frame[0] & 0x20;
+ bool invert = frame[0] & 0x10;
if (!valid)
continue;
@@ -1247,9 +1247,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->shared->stylus_in_proximity = false;
wacom_exit_report(wacom);
input_sync(pen_input);
+
+ wacom->tool[0] = 0;
+ wacom->id[0] = 0;
+ wacom->serial[0] = 0;
return;
}
+
if (range) {
+ if (!wacom->tool[0]) { /* first in range */
+ /* Going into range select tool */
+ if (invert)
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom->id[0])
+ wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
+ else
+ wacom->tool[0] = BTN_TOOL_PEN;
+ }
+
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
@@ -1271,23 +1286,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
get_unaligned_le16(&frame[11]));
}
}
- input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
- if (wacom->features.type == INTUOSP2_BT) {
- input_report_abs(pen_input, ABS_DISTANCE,
- range ? frame[13] : wacom->features.distance_max);
- } else {
- input_report_abs(pen_input, ABS_DISTANCE,
- range ? frame[7] : wacom->features.distance_max);
- }
- input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
- input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
- input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+ if (wacom->tool[0]) {
+ input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ if (wacom->features.type == INTUOSP2_BT) {
+ input_report_abs(pen_input, ABS_DISTANCE,
+ range ? frame[13] : wacom->features.distance_max);
+ } else {
+ input_report_abs(pen_input, ABS_DISTANCE,
+ range ? frame[7] : wacom->features.distance_max);
+ }
- input_report_key(pen_input, wacom->tool[0], prox);
- input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
- input_report_abs(pen_input, ABS_MISC,
- wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+ input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
+ input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+ input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+ input_report_key(pen_input, wacom->tool[0], prox);
+ input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+ input_report_abs(pen_input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+ }
wacom->shared->stylus_in_proximity = prox;
@@ -1349,11 +1367,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
if (wacom->num_contacts_left <= 0) {
wacom->num_contacts_left = 0;
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+ input_sync(touch_input);
}
}
- input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
- input_sync(touch_input);
+ if (wacom->num_contacts_left == 0) {
+ // Be careful that we don't accidentally call input_sync with
+ // only a partial set of fingers of processed
+ input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+ input_sync(touch_input);
+ }
+
}
static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
@@ -1361,7 +1385,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;
- int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+ int buttons = data[282] | ((data[281] & 0x40) << 2);
int ring = data[285] & 0x7F;
bool ringstatus = data[285] & 0x80;
bool prox = buttons || ringstatus;
@@ -3810,7 +3834,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
int mask, int group)
{
- int button_per_group;
+ int group_button;
/*
* 21UX2 has LED group 1 to the left and LED group 0
@@ -3820,9 +3844,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
if (wacom->wacom_wac.features.type == WACOM_21UX2)
group = 1 - group;
- button_per_group = button_count/wacom->led.count;
+ group_button = group * (button_count/wacom->led.count);
+
+ if (wacom->wacom_wac.features.type == INTUOSP2_BT)
+ group_button = 8;
- return mask & (1 << (group * button_per_group));
+ return mask & (1 << group_button);
}
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index f4a5ae69bf6a..fa3763e4b3ee 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {
static struct i2c_adapter ioc_ops = {
.nr = 0,
+ .name = "ioc",
.algo_data = &ioc_data,
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index de3fe6e828cb..f50afa8e3cba 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/i2c-algo-pca.h>
#include <linux/platform_data/i2c-pca-platform.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -173,7 +172,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = np;
- i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW);
+ i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(i2c->gpio))
return PTR_ERR(i2c->gpio);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5aeb1dbfaa08..586dd5a46d9f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -47,6 +47,15 @@
#include "arm-smmu-regs.h"
+/*
+ * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
+ * global register space are still, in fact, using a hypervisor to mediate it
+ * by trapping and emulating register accesses. Sadly, some deployed versions
+ * of said trapping code have bugs wherein they go horribly wrong for stores
+ * using r31 (i.e. XZR/WZR) as the source register.
+ */
+#define QCOM_DUMMY_VAL -1
+
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
@@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
{
unsigned int spin_cnt, delay;
- writel_relaxed(0, sync);
+ writel_relaxed(QCOM_DUMMY_VAL, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
@@ -1751,8 +1760,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 162b3236e72c..56297298d6ee 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2504,6 +2504,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
}
}
+ spin_lock(&iommu->lock);
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
@@ -2519,17 +2520,16 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (found) {
spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&iommu->lock);
free_devinfo_mem(info);
/* Caller must free the original domain */
return found;
}
- spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
-
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&iommu->lock);
free_devinfo_mem(info);
return NULL;
}
@@ -2539,6 +2539,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev)
dev->archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&iommu->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 2fefeafda437..fe51d8af457f 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -389,7 +389,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
- pasid_set_bits(&pe->val[1], 1 << 23, value);
+ pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f9cacce909d3..9f0a2844371c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -329,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
type = "unmanaged\n";
break;
case IOMMU_DOMAIN_DMA:
- type = "DMA";
+ type = "DMA\n";
break;
}
}
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8f07fa6e1739..268f1b685084 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
- m = bch_btree_iter_init(b, &iter, b->ops->is_extents
- ? PRECEDING_KEY(&START_KEY(k))
- : PRECEDING_KEY(k));
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);
if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index bac76aabca6d..c71365e7c1fa 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
return __bch_cut_back(where, k);
}
-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+ if (KEY_INODE(k) || KEY_OFFSET(k)) {
+ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+ if (!(*preceding_key_p)->low)
+ (*preceding_key_p)->high--;
+ (*preceding_key_p)->low--;
+ } else {
+ (*preceding_key_p) = NULL;
+ }
+}
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
{
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 6cd44d3cf906..bfb437ffb13c 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -431,8 +431,13 @@ STORE(bch_cached_dev)
bch_writeback_queue(dc);
}
+ /*
+ * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
+ * a cache set, otherwise it doesn't make sense.
+ */
if (attr == &sysfs_writeback_percent)
- if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ if ((dc->disk.c != NULL) &&
+ (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index ecd3277f2e89..6351a97f3d18 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -905,7 +905,7 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
- dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
+ dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u",
tuner_min, tuner_max, frontend_min, frontend_max);
/* If the standard is for satellite, convert frequencies to kHz */
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 04cc80b106d6..b70551e296b7 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -560,7 +560,7 @@ struct hfi_capability {
struct hfi_capabilities {
u32 num_capabilities;
- struct hfi_capability *data;
+ struct hfi_capability data[];
};
#define HFI_DEBUG_MSG_LOW 0x01
@@ -717,7 +717,7 @@ struct hfi_profile_level {
struct hfi_profile_level_supported {
u32 profile_count;
- struct hfi_profile_level *profile_level;
+ struct hfi_profile_level profile_level[];
};
struct hfi_quality_vs_speed {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 28cb44c61d4a..24d7fe7c74ed 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL,
};
-static void pmem_release_queue(void *q)
+static void __pmem_release_queue(struct percpu_ref *ref)
{
+ struct request_queue *q;
+
+ q = container_of(ref, typeof(*q), q_usage_counter);
blk_cleanup_queue(q);
}
+static void pmem_release_queue(void *ref)
+{
+ __pmem_release_queue(ref);
+}
+
static void pmem_freeze_queue(struct percpu_ref *ref)
{
struct request_queue *q;
@@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev,
if (!q)
return -ENOMEM;
- if (devm_add_action_or_reset(dev, pmem_release_queue, q))
- return -ENOMEM;
-
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
pmem->pgmap.kill = pmem_freeze_queue;
+ pmem->pgmap.cleanup = __pmem_release_queue;
if (is_nd_pfn(dev)) {
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
return -ENOMEM;
@@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else {
+ if (devm_add_action_or_reset(dev, pmem_release_queue,
+ &q->q_usage_counter))
+ return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 742928d0053e..a98126ad9c3a 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -20,12 +20,16 @@
#include <linux/seq_buf.h>
struct pci_p2pdma {
- struct percpu_ref devmap_ref;
- struct completion devmap_ref_done;
struct gen_pool *pool;
bool p2pmem_published;
};
+struct p2pdma_pagemap {
+ struct dev_pagemap pgmap;
+ struct percpu_ref ref;
+ struct completion ref_done;
+};
+
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
+static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
+{
+ return container_of(ref, struct p2pdma_pagemap, ref);
+}
+
static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
{
- struct pci_p2pdma *p2p =
- container_of(ref, struct pci_p2pdma, devmap_ref);
+ struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
- complete_all(&p2p->devmap_ref_done);
+ complete(&p2p_pgmap->ref_done);
}
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
{
- /*
- * pci_p2pdma_add_resource() may be called multiple times
- * by a driver and may register the percpu_kill devm action multiple
- * times. We only want the first action to actually kill the
- * percpu_ref.
- */
- if (percpu_ref_is_dying(ref))
- return;
-
percpu_ref_kill(ref);
}
+static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
+{
+ struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
+
+ wait_for_completion(&p2p_pgmap->ref_done);
+ percpu_ref_exit(&p2p_pgmap->ref);
+}
+
static void pci_p2pdma_release(void *data)
{
struct pci_dev *pdev = data;
+ struct pci_p2pdma *p2pdma = pdev->p2pdma;
- if (!pdev->p2pdma)
+ if (!p2pdma)
return;
- wait_for_completion(&pdev->p2pdma->devmap_ref_done);
- percpu_ref_exit(&pdev->p2pdma->devmap_ref);
+ /* Flush and disable pci_alloc_p2p_mem() */
+ pdev->p2pdma = NULL;
+ synchronize_rcu();
- gen_pool_destroy(pdev->p2pdma->pool);
+ gen_pool_destroy(p2pdma->pool);
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
- pdev->p2pdma = NULL;
}
static int pci_p2pdma_setup(struct pci_dev *pdev)
@@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
if (!p2p->pool)
goto out;
- init_completion(&p2p->devmap_ref_done);
- error = percpu_ref_init(&p2p->devmap_ref,
- pci_p2pdma_percpu_release, 0, GFP_KERNEL);
- if (error)
- goto out_pool_destroy;
-
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
if (error)
goto out_pool_destroy;
@@ -163,6 +165,7 @@ out:
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset)
{
+ struct p2pdma_pagemap *p2p_pgmap;
struct dev_pagemap *pgmap;
void *addr;
int error;
@@ -185,18 +188,27 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
return error;
}
- pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
- if (!pgmap)
+ p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
+ if (!p2p_pgmap)
return -ENOMEM;
+ init_completion(&p2p_pgmap->ref_done);
+ error = percpu_ref_init(&p2p_pgmap->ref,
+ pci_p2pdma_percpu_release, 0, GFP_KERNEL);
+ if (error)
+ goto pgmap_free;
+
+ pgmap = &p2p_pgmap->pgmap;
+
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
pgmap->res.end = pgmap->res.start + size - 1;
pgmap->res.flags = pci_resource_flags(pdev, bar);
- pgmap->ref = &pdev->p2pdma->devmap_ref;
+ pgmap->ref = &p2p_pgmap->ref;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
pci_resource_start(pdev, bar);
pgmap->kill = pci_p2pdma_percpu_kill;
+ pgmap->cleanup = pci_p2pdma_percpu_cleanup;
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@@ -204,19 +216,22 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
goto pgmap_free;
}
- error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
+ error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
- resource_size(&pgmap->res), dev_to_node(&pdev->dev));
+ resource_size(&pgmap->res), dev_to_node(&pdev->dev),
+ &p2p_pgmap->ref);
if (error)
- goto pgmap_free;
+ goto pages_free;
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
&pgmap->res);
return 0;
+pages_free:
+ devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
- devm_kfree(&pdev->dev, pgmap);
+ devm_kfree(&pdev->dev, p2p_pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -585,19 +600,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
*/
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
{
- void *ret;
+ void *ret = NULL;
+ struct percpu_ref *ref;
+ /*
+ * Pairs with synchronize_rcu() in pci_p2pdma_release() to
+ * ensure pdev->p2pdma is non-NULL for the duration of the
+ * read-lock.
+ */
+ rcu_read_lock();
if (unlikely(!pdev->p2pdma))
- return NULL;
-
- if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
- return NULL;
-
- ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
+ goto out;
- if (unlikely(!ret))
- percpu_ref_put(&pdev->p2pdma->devmap_ref);
+ ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
+ (void **) &ref);
+ if (!ret)
+ goto out;
+ if (unlikely(!percpu_ref_tryget_live(ref))) {
+ gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
+ ret = NULL;
+ goto out;
+ }
+out:
+ rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
@@ -610,8 +636,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
*/
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
{
- gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
- percpu_ref_put(&pdev->p2pdma->devmap_ref);
+ struct percpu_ref *ref;
+
+ gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
+ (void **) &ref);
+ percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(pci_free_p2pmem);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 687ce6817d0d..f85a1b9d129b 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -694,6 +694,7 @@ static int mlxreg_hotplug_remove(struct platform_device *pdev)
/* Clean interrupts setup. */
mlxreg_hotplug_unset_irq(priv);
+ devm_free_irq(&pdev->dev, priv->irq, priv);
return 0;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 81642102bf65..8d9e30dbb5af 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -65,10 +65,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_q500a = {
.i8042_filter = asus_q500a_i8042_filter,
+ .wmi_backlight_set_devstate = true,
};
/*
@@ -79,26 +81,32 @@ static struct quirk_entry quirk_asus_q500a = {
static struct quirk_entry quirk_asus_x55u = {
.wapf = 4,
.wmi_backlight_power = true,
+ .wmi_backlight_set_devstate = true,
.no_display_toggle = true,
};
static struct quirk_entry quirk_asus_wapf4 = {
.wapf = 4,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x550lb = {
+ .wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
};
static struct quirk_entry quirk_asus_forceals = {
+ .wmi_backlight_set_devstate = true,
.wmi_force_als_set = true,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 3e4336025e8f..9b18a184e0aa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2146,7 +2146,7 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
- } else
+ } else if (asus->driver->quirks->wmi_backlight_set_devstate)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 0930be770688..4f31b68642a0 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -31,6 +31,7 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
bool wmi_backlight_native;
+ bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
int wapf;
/*
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 06cd7e818ed5..a0d0cecff55f 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -76,12 +76,24 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
unsigned int val = !(event & 1); /* Even=press, Odd=release */
- const struct key_entry *ke_rel;
+ const struct key_entry *ke, *ke_rel;
bool autorelease;
if (priv->wakeup_mode) {
- if (sparse_keymap_entry_from_scancode(priv->input_dev, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->input_dev, event);
+ if (ke) {
pm_wakeup_hard_event(&device->dev);
+
+ /*
+ * Switch events like tablet mode will wake the device
+ * and report the new switch position to the input
+ * subsystem.
+ */
+ if (ke->type == KE_SW)
+ sparse_keymap_report_event(priv->input_dev,
+ event,
+ val,
+ 0);
return;
}
goto out_unknown;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index cee039f57499..983f02b5b106 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -2032,7 +2032,7 @@ static int __init mlxplat_init(void)
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
- &mlxplat_dev->dev,
+ &priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL,
0, &mlxplat_mux_data[i],
sizeof(mlxplat_mux_data[i]));
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 88e4f3ff0cb8..673f8a128397 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -2,6 +2,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/workqueue.h>
#include <asm/mce.h>
@@ -123,16 +124,12 @@ static u64 dfs_pfn;
/* Amount of errors after which we offline */
static unsigned int count_threshold = COUNT_MASK;
-/*
- * The timer "decays" element count each timer_interval which is 24hrs by
- * default.
- */
-
-#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
-#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */
-#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
-static struct timer_list cec_timer;
-static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL;
+/* Each element "decays" each decay_interval which is 24hrs by default. */
+#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
+#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */
+#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
+static struct delayed_work cec_work;
+static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;
/*
* Decrement decay value. We're using DECAY_BITS bits to denote decay of an
@@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca)
/*
* @interval in seconds
*/
-static void cec_mod_timer(struct timer_list *t, unsigned long interval)
+static void cec_mod_work(unsigned long interval)
{
unsigned long iv;
- iv = interval * HZ + jiffies;
-
- mod_timer(t, round_jiffies(iv));
+ iv = interval * HZ;
+ mod_delayed_work(system_wq, &cec_work, round_jiffies(iv));
}
-static void cec_timer_fn(struct timer_list *unused)
+static void cec_work_fn(struct work_struct *work)
{
+ mutex_lock(&ce_mutex);
do_spring_cleaning(&ce_arr);
+ mutex_unlock(&ce_mutex);
- cec_mod_timer(&cec_timer, timer_interval);
+ cec_mod_work(decay_interval);
}
/*
@@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused)
*/
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
{
+ int min = 0, max = ca->n - 1;
u64 this_pfn;
- int min = 0, max = ca->n;
- while (min < max) {
- int tmp = (max + min) >> 1;
+ while (min <= max) {
+ int i = (min + max) >> 1;
- this_pfn = PFN(ca->array[tmp]);
+ this_pfn = PFN(ca->array[i]);
if (this_pfn < pfn)
- min = tmp + 1;
+ min = i + 1;
else if (this_pfn > pfn)
- max = tmp;
- else {
- min = tmp;
- break;
+ max = i - 1;
+ else if (this_pfn == pfn) {
+ if (to)
+ *to = i;
+
+ return i;
}
}
+ /*
+ * When the loop terminates without finding @pfn, min has the index of
+ * the element slot where the new @pfn should be inserted. The loop
+ * terminates when min > max, which means the min index points to the
+ * bigger element while the max index to the smaller element, in-between
+ * which the new @pfn belongs to.
+ *
+ * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
+ */
if (to)
*to = min;
- this_pfn = PFN(ca->array[min]);
-
- if (this_pfn == pfn)
- return min;
-
return -ENOKEY;
}
@@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val)
{
*(u64 *)data = val;
- if (val < CEC_TIMER_MIN_INTERVAL)
+ if (val < CEC_DECAY_MIN_INTERVAL)
return -EINVAL;
- if (val > CEC_TIMER_MAX_INTERVAL)
+ if (val > CEC_DECAY_MAX_INTERVAL)
return -EINVAL;
- timer_interval = val;
+ decay_interval = val;
- cec_mod_timer(&cec_timer, timer_interval);
+ cec_mod_work(decay_interval);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
@@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v)
seq_printf(m, "Flags: 0x%x\n", ca->flags);
- seq_printf(m, "Timer interval: %lld seconds\n", timer_interval);
+ seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
seq_printf(m, "Decays: %lld\n", ca->decays_done);
seq_printf(m, "Action threshold: %d\n", count_threshold);
@@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void)
}
decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
- &timer_interval, &decay_interval_ops);
+ &decay_interval, &decay_interval_ops);
if (!decay) {
pr_warn("Error creating decay_interval debugfs node!\n");
goto err;
@@ -508,8 +512,8 @@ void __init cec_init(void)
if (create_debugfs_nodes())
return;
- timer_setup(&cec_timer, cec_timer_fn, 0);
- cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
+ INIT_DELAYED_WORK(&cec_work, cec_work_fn);
+ schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
pr_info("Correctable Errors collector initialized.\n");
}
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index a1b7fab91dd4..d2a8f69b2665 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -403,12 +403,12 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
/* common for all regulators */
tps->mfd = tps6507x_dev;
- for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
+ for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++) {
/* Register the regulators */
tps->info[i] = info;
- if (init_data && init_data->driver_data) {
+ if (init_data && init_data[i].driver_data) {
struct tps6507x_reg_platform_data *data =
- init_data->driver_data;
+ init_data[i].driver_data;
info->defdcdc_default = data->defdcdc_default;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1bef1da273c2..8068520cf89e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4940,7 +4940,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
curr_sg->reserved[0] = 0;
curr_sg->reserved[1] = 0;
curr_sg->reserved[2] = 0;
- curr_sg->chain_indicator = 0x80;
+ curr_sg->chain_indicator = IOACCEL2_CHAIN;
curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
}
@@ -4957,6 +4957,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
curr_sg++;
}
+ /*
+ * Set the last s/g element bit
+ */
+ (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
+
switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE:
cp->direction &= ~IOACCEL2_DIRECTION_MASK;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 21a726e2eec6..f6afca4b2319 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -517,6 +517,7 @@ struct ioaccel2_sg_element {
u8 reserved[3];
u8 chain_indicator;
#define IOACCEL2_CHAIN 0x80
+#define IOACCEL2_LAST_SG 0x40
};
/*
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dad566bfe372..d84e22dd6f9f 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -406,7 +406,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
if (ret)
spi_master_put(master);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 4c71df93a6f6..1d9b33aa1a3b 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -428,7 +428,6 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
m->status = status;
- spi_finalize_current_message(master);
if (status || !cs_change) {
ndelay(nsecs);
@@ -436,6 +435,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
fsl_spi_setup_transfer(spi, NULL);
+ spi_finalize_current_message(master);
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e75944ad5d1..5e4654032bfa 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1181,10 +1181,10 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_finalize_current_message(ctlr);
-
spi_res_release(ctlr, msg);
+ spi_finalize_current_message(ctlr);
+
return ret;
}
@@ -1307,10 +1307,15 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
dev_err(&ctlr->dev,
- "failed to prepare transfer hardware\n");
+ "failed to prepare transfer hardware: %d\n",
+ ret);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
+
+ ctlr->cur_msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
mutex_unlock(&ctlr->io_mutex);
return;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6082b008969b..6b6413073584 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -215,6 +215,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Webcam C270 */
+ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 16ffd9fd9361..bff48a8a1984 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -835,19 +835,22 @@ static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
* with corresponding information based on transfer data.
*/
static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
- struct usb_request *ureq,
- unsigned int offset,
+ dma_addr_t dma_buff,
unsigned int len)
{
+ struct usb_request *ureq = NULL;
struct dwc2_dma_desc *desc = hs_ep->desc_list;
struct scatterlist *sg;
int i;
u8 desc_count = 0;
+ if (hs_ep->req)
+ ureq = &hs_ep->req->req;
+
/* non-DMA sg buffer */
- if (!ureq->num_sgs) {
+ if (!ureq || !ureq->num_sgs) {
dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
- ureq->dma + offset, len, true);
+ dma_buff, len, true);
return;
}
@@ -1135,7 +1138,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
offset = ureq->actual;
/* Fill DDMA chain entries */
- dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq, offset,
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
length);
/* write descriptor chain address to control register */
@@ -2037,12 +2040,13 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
index);
if (using_desc_dma(hsotg)) {
+ /* Not specific buffer needed for ep0 ZLP */
+ dma_addr_t dma = hs_ep->desc_list_dma;
+
if (!index)
dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
- /* Not specific buffer needed for ep0 ZLP */
- dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &hs_ep->desc_list,
- hs_ep->desc_list_dma, 0, true);
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
} else {
dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
DXEPTSIZ_XFERSIZE(0),
@@ -2417,6 +2421,10 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
dwc2_gadget_incr_frame_num(hs_ep);
}
+ /* Set actual frame number for completed transfers */
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous)
+ req->frame_number = hsotg->frame_number;
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b50ec3714fd8..2192a2873c7c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2480,8 +2480,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
return;
/* Restore urb->transfer_buffer from the end of the allocated area */
- memcpy(&stored_xfer_buffer, urb->transfer_buffer +
- urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
+ memcpy(&stored_xfer_buffer,
+ PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
+ sizeof(urb->transfer_buffer));
if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
@@ -2513,6 +2515,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* DMA
*/
kmalloc_size = urb->transfer_buffer_length +
+ (dma_get_cache_alignment() - 1) +
sizeof(urb->transfer_buffer);
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
@@ -2523,7 +2526,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* Position value of original urb->transfer_buffer pointer to the end
* of allocation for later referencing
*/
- memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+ memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
&urb->transfer_buffer, sizeof(urb->transfer_buffer));
if (usb_urb_dir_out(urb))
@@ -2608,7 +2612,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
chan->speed = qh->dev_speed;
- chan->max_packet = dwc2_max_packet(qh->maxp);
+ chan->max_packet = qh->maxp;
chan->xfer_started = 0;
chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
@@ -2686,7 +2690,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
* This value may be modified when the transfer is started
* to reflect the actual transfer length
*/
- chan->multi_count = dwc2_hb_mult(qh->maxp);
+ chan->multi_count = qh->maxp_mult;
if (hsotg->params.dma_desc_enable) {
chan->desc_list_addr = qh->desc_list_dma;
@@ -3806,19 +3810,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb, u8 dev_addr,
- u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
+ u8 ep_num, u8 ep_type, u8 ep_dir,
+ u16 maxp, u16 maxp_mult)
{
if (dbg_perio() ||
ep_type == USB_ENDPOINT_XFER_BULK ||
ep_type == USB_ENDPOINT_XFER_CONTROL)
dev_vdbg(hsotg->dev,
- "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
- dev_addr, ep_num, ep_dir, ep_type, mps);
+ "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
+ dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
urb->pipe_info.dev_addr = dev_addr;
urb->pipe_info.ep_num = ep_num;
urb->pipe_info.pipe_type = ep_type;
urb->pipe_info.pipe_dir = ep_dir;
- urb->pipe_info.mps = mps;
+ urb->pipe_info.maxp = maxp;
+ urb->pipe_info.maxp_mult = maxp_mult;
}
/*
@@ -3909,8 +3915,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
"IN" : "OUT");
dev_dbg(hsotg->dev,
- " Max packet size: %d\n",
- dwc2_hcd_get_mps(&urb->pipe_info));
+ " Max packet size: %d (%d mult)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_dbg(hsotg->dev,
" transfer_buffer: %p\n",
urb->buf);
@@ -4510,8 +4517,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
}
dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
- dev_vdbg(hsotg->dev, " Max packet size: %d\n",
- usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
+ usb_endpoint_maxp(&urb->ep->desc),
+ usb_endpoint_maxp_mult(&urb->ep->desc));
+
dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
urb->transfer_buffer_length);
dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
@@ -4594,8 +4603,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), ep_type,
usb_pipein(urb->pipe),
- usb_maxpacket(urb->dev, urb->pipe,
- !(usb_pipein(urb->pipe))));
+ usb_endpoint_maxp(&ep->desc),
+ usb_endpoint_maxp_mult(&ep->desc));
buf = urb->transfer_buffer;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index c089ffa1f0a8..ce6445a06588 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info {
u8 ep_num;
u8 pipe_type;
u8 pipe_dir;
- u16 mps;
+ u16 maxp;
+ u16 maxp_mult;
};
struct dwc2_hcd_iso_packet_desc {
@@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time {
* - USB_ENDPOINT_XFER_ISOC
* @ep_is_in: Endpoint direction
* @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
+ * @maxp_mult: Multiplier for maxp
* @dev_speed: Device speed. One of the following values:
* - USB_SPEED_LOW
* - USB_SPEED_FULL
@@ -340,6 +342,7 @@ struct dwc2_qh {
u8 ep_type;
u8 ep_is_in;
u16 maxp;
+ u16 maxp_mult;
u8 dev_speed;
u8 data_toggle;
u8 ping_state;
@@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
return pipe->pipe_type;
}
-static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->maxp;
+}
+
+static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
{
- return pipe->mps;
+ return pipe->maxp_mult;
}
static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
@@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb)
static inline bool dbg_perio(void) { return false; }
#endif
-/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
-#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
-
-/* Packet size for any kind of endpoint descriptor */
-#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
-
/*
* Returns true if frame1 index is greater than frame2 index. The comparison
* is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 88b5dcf3aefc..a052d39b4375 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
dev_err(hsotg->dev, " Speed: %s\n", speed);
- dev_err(hsotg->dev, " Max packet size: %d\n",
- dwc2_hcd_get_mps(&urb->pipe_info));
+ dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
urb->buf, (unsigned long)urb->dma);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index ea3aa640c15c..68bbac64b753 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
- int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+ int bytecount = qh->maxp_mult * qh->maxp;
int ls_search_slice;
int err = 0;
int host_interval_in_sched;
@@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
u32 max_channel_xfer_size;
int status = 0;
- max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
+ max_xfer_size = qh->maxp * qh->maxp_mult;
max_channel_xfer_size = hsotg->params.max_transfer_size;
if (max_xfer_size > max_channel_xfer_size) {
@@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
dev_speed != USB_SPEED_HIGH);
- int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
- int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
+ int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
+ int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
+ int bytecount = maxp_mult * maxp;
char *speed, *type;
/* Initialize QH */
@@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
qh->data_toggle = DWC2_HC_PID_DATA0;
qh->maxp = maxp;
+ qh->maxp_mult = maxp_mult;
INIT_LIST_HEAD(&qh->qtd_list);
INIT_LIST_HEAD(&qh->qh_list_entry);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 263804d154a7..00e3f66836a9 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1342,12 +1342,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
static int fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
return 0;
@@ -1491,6 +1494,8 @@ clean_up:
if (fusb300->ep0_req)
fusb300_free_request(&fusb300->ep[0]->ep,
fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
}
if (reg)
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index d8f1c60793ed..5f1b14f3e5a0 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -937,8 +937,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
dma_addr_t dma;
struct lpc32xx_usbd_dd_gad *dd;
- dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
- udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
+ dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
if (dd)
dd->this_dma = dma;
@@ -3070,9 +3069,9 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
}
udc->udp_baseaddr = devm_ioremap_resource(dev, res);
- if (!udc->udp_baseaddr) {
+ if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
- return -ENOMEM;
+ return PTR_ERR(udc->udp_baseaddr);
}
/* Get USB device clock */
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 1b1bb0ad40c3..6fa16ab31e2e 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -63,6 +63,7 @@
#define ANADIG_USB1_CHRG_DETECT_SET 0x1b4
#define ANADIG_USB1_CHRG_DETECT_CLR 0x1b8
+#define ANADIG_USB2_CHRG_DETECT_SET 0x214
#define ANADIG_USB1_CHRG_DETECT_EN_B BIT(20)
#define ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B BIT(19)
#define ANADIG_USB1_CHRG_DETECT_CHK_CONTACT BIT(18)
@@ -250,6 +251,19 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
+ if (mxs_phy->regmap_anatop) {
+ unsigned int reg = mxs_phy->port_id ?
+ ANADIG_USB1_CHRG_DETECT_SET :
+ ANADIG_USB2_CHRG_DETECT_SET;
+ /*
+ * The external charger detector needs to be disabled,
+ * or the signal at DP will be poor
+ */
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ ANADIG_USB1_CHRG_DETECT_EN_B |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+ }
+
mxs_phy_tx_init(mxs_phy);
return 0;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 83869065b802..a0aaf0635359 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
@@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
+ .driver_info = RSVD(7) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 55122ac84518..d7abde14b3cf 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 559941ca884d..b0175f17d1a2 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -155,3 +155,6 @@
#define SMART_VENDOR_ID 0x0b8c
#define SMART_PRODUCT_ID 0x2303
+/* Allied Telesis VT-Kit3 */
+#define AT_VENDOR_ID 0x0caa
+#define AT_VTKIT3_PRODUCT_ID 0x3001
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 6b2140f966ef..7e14c2d7cf73 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
"USB Card Reader",
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
"Realtek",
"USB Card Reader",
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 76299b6ff06d..74cb3c2ecb34 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -192,7 +192,7 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm);
const struct typec_altmode *
typec_altmode_get_partner(struct typec_altmode *adev)
{
- return &to_altmode(adev)->partner->adev;
+ return adev ? &to_altmode(adev)->partner->adev : NULL;
}
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 9d46aa9e4e35..bf63074675fc 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -862,8 +862,10 @@ static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
not_signed_fw:
wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
- if (!wr_buf)
- return -ENOMEM;
+ if (!wr_buf) {
+ err = -ENOMEM;
+ goto release_fw;
+ }
err = ccg_cmd_enter_flashing(uc);
if (err)
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index 3cc1a05fde1c..ae23151442cb 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -102,56 +102,35 @@ static void mdev_put_parent(struct mdev_parent *parent)
kref_put(&parent->ref, mdev_release_parent);
}
-static int mdev_device_create_ops(struct kobject *kobj,
- struct mdev_device *mdev)
+/* Caller must hold parent unreg_sem read or write lock */
+static void mdev_device_remove_common(struct mdev_device *mdev)
{
- struct mdev_parent *parent = mdev->parent;
- int ret;
-
- ret = parent->ops->create(kobj, mdev);
- if (ret)
- return ret;
-
- ret = sysfs_create_groups(&mdev->dev.kobj,
- parent->ops->mdev_attr_groups);
- if (ret)
- parent->ops->remove(mdev);
-
- return ret;
-}
-
-/*
- * mdev_device_remove_ops gets called from sysfs's 'remove' and when parent
- * device is being unregistered from mdev device framework.
- * - 'force_remove' is set to 'false' when called from sysfs's 'remove' which
- * indicates that if the mdev device is active, used by VMM or userspace
- * application, vendor driver could return error then don't remove the device.
- * - 'force_remove' is set to 'true' when called from mdev_unregister_device()
- * which indicate that parent device is being removed from mdev device
- * framework so remove mdev device forcefully.
- */
-static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
-{
- struct mdev_parent *parent = mdev->parent;
+ struct mdev_parent *parent;
+ struct mdev_type *type;
int ret;
- /*
- * Vendor driver can return error if VMM or userspace application is
- * using this mdev device.
- */
+ type = to_mdev_type(mdev->type_kobj);
+ mdev_remove_sysfs_files(&mdev->dev, type);
+ device_del(&mdev->dev);
+ parent = mdev->parent;
+ lockdep_assert_held(&parent->unreg_sem);
ret = parent->ops->remove(mdev);
- if (ret && !force_remove)
- return ret;
+ if (ret)
+ dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
- sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups);
- return 0;
+ /* Balances with device_initialize() */
+ put_device(&mdev->dev);
+ mdev_put_parent(parent);
}
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- if (dev_is_mdev(dev))
- mdev_device_remove(dev, true);
+ if (dev_is_mdev(dev)) {
+ struct mdev_device *mdev;
+ mdev = to_mdev_device(dev);
+ mdev_device_remove_common(mdev);
+ }
return 0;
}
@@ -193,6 +172,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
}
kref_init(&parent->ref);
+ init_rwsem(&parent->unreg_sem);
parent->dev = dev;
parent->ops = ops;
@@ -251,21 +231,23 @@ void mdev_unregister_device(struct device *dev)
dev_info(dev, "MDEV: Unregistering\n");
list_del(&parent->next);
+ mutex_unlock(&parent_list_lock);
+
+ down_write(&parent->unreg_sem);
+
class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
device_for_each_child(dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
+ up_write(&parent->unreg_sem);
- mutex_unlock(&parent_list_lock);
mdev_put_parent(parent);
}
EXPORT_SYMBOL(mdev_unregister_device);
-static void mdev_device_release(struct device *dev)
+static void mdev_device_free(struct mdev_device *mdev)
{
- struct mdev_device *mdev = to_mdev_device(dev);
-
mutex_lock(&mdev_list_lock);
list_del(&mdev->next);
mutex_unlock(&mdev_list_lock);
@@ -274,6 +256,13 @@ static void mdev_device_release(struct device *dev)
kfree(mdev);
}
+static void mdev_device_release(struct device *dev)
+{
+ struct mdev_device *mdev = to_mdev_device(dev);
+
+ mdev_device_free(mdev);
+}
+
int mdev_device_create(struct kobject *kobj,
struct device *dev, const guid_t *uuid)
{
@@ -310,46 +299,55 @@ int mdev_device_create(struct kobject *kobj,
mdev->parent = parent;
+ /* Check if parent unregistration has started */
+ if (!down_read_trylock(&parent->unreg_sem)) {
+ mdev_device_free(mdev);
+ ret = -ENODEV;
+ goto mdev_fail;
+ }
+
+ device_initialize(&mdev->dev);
mdev->dev.parent = dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
dev_set_name(&mdev->dev, "%pUl", uuid);
+ mdev->dev.groups = parent->ops->mdev_attr_groups;
+ mdev->type_kobj = kobj;
- ret = device_register(&mdev->dev);
- if (ret) {
- put_device(&mdev->dev);
- goto mdev_fail;
- }
+ ret = parent->ops->create(kobj, mdev);
+ if (ret)
+ goto ops_create_fail;
- ret = mdev_device_create_ops(kobj, mdev);
+ ret = device_add(&mdev->dev);
if (ret)
- goto create_fail;
+ goto add_fail;
ret = mdev_create_sysfs_files(&mdev->dev, type);
- if (ret) {
- mdev_device_remove_ops(mdev, true);
- goto create_fail;
- }
+ if (ret)
+ goto sysfs_fail;
- mdev->type_kobj = kobj;
mdev->active = true;
dev_dbg(&mdev->dev, "MDEV: created\n");
+ up_read(&parent->unreg_sem);
return 0;
-create_fail:
- device_unregister(&mdev->dev);
+sysfs_fail:
+ device_del(&mdev->dev);
+add_fail:
+ parent->ops->remove(mdev);
+ops_create_fail:
+ up_read(&parent->unreg_sem);
+ put_device(&mdev->dev);
mdev_fail:
mdev_put_parent(parent);
return ret;
}
-int mdev_device_remove(struct device *dev, bool force_remove)
+int mdev_device_remove(struct device *dev)
{
struct mdev_device *mdev, *tmp;
struct mdev_parent *parent;
- struct mdev_type *type;
- int ret;
mdev = to_mdev_device(dev);
@@ -372,19 +370,13 @@ int mdev_device_remove(struct device *dev, bool force_remove)
mdev->active = false;
mutex_unlock(&mdev_list_lock);
- type = to_mdev_type(mdev->type_kobj);
parent = mdev->parent;
+ /* Check if parent unregistration has started */
+ if (!down_read_trylock(&parent->unreg_sem))
+ return -ENODEV;
- ret = mdev_device_remove_ops(mdev, force_remove);
- if (ret) {
- mdev->active = true;
- return ret;
- }
-
- mdev_remove_sysfs_files(dev, type);
- device_unregister(dev);
- mdev_put_parent(parent);
-
+ mdev_device_remove_common(mdev);
+ up_read(&parent->unreg_sem);
return 0;
}
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 36cbbdb754de..398767526276 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -23,6 +23,8 @@ struct mdev_parent {
struct list_head next;
struct kset *mdev_types_kset;
struct list_head type_list;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
};
struct mdev_device {
@@ -60,6 +62,6 @@ void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
int mdev_device_create(struct kobject *kobj,
struct device *dev, const guid_t *uuid);
-int mdev_device_remove(struct device *dev, bool force_remove);
+int mdev_device_remove(struct device *dev);
#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index cbf94b8165ea..ffa3dcebf201 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -236,11 +236,9 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
if (val && device_remove_file_self(dev, attr)) {
int ret;
- ret = mdev_device_remove(dev, false);
- if (ret) {
- device_create_file(dev, attr);
+ ret = mdev_device_remove(dev);
+ if (ret)
return ret;
- }
}
return count;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 0657b0b57dae..d53f3493a6b9 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -202,6 +202,15 @@ int __ref xen_swiotlb_init(int verbose, bool early)
retry:
bytes = xen_set_nslabs(xen_io_tlb_nslabs);
order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
+
+ /*
+ * IO TLB memory already allocated. Just use it.
+ */
+ if (io_tlb_start != 0) {
+ xen_io_tlb_start = phys_to_virt(io_tlb_start);
+ goto end;
+ }
+
/*
* Get IO TLB memory from any location.
*/
@@ -231,7 +240,6 @@ retry:
m_ret = XEN_SWIOTLB_ENOMEM;
goto error;
}
- xen_io_tlb_end = xen_io_tlb_start + bytes;
/*
* And replace that memory with pages under 4GB.
*/
@@ -258,6 +266,8 @@ retry:
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+end:
+ xen_io_tlb_end = xen_io_tlb_start + bytes;
if (!rc)
swiotlb_set_max_segment(PAGE_SIZE);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1aee51a9f3bf..c7adff343ba9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -11137,13 +11137,11 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
* it while performing the free space search since we have already
* held back allocations.
*/
-static int btrfs_trim_free_extents(struct btrfs_device *device,
- struct fstrim_range *range, u64 *trimmed)
+static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
{
- u64 start, len = 0, end = 0;
+ u64 start = SZ_1M, len = 0, end = 0;
int ret;
- start = max_t(u64, range->start, SZ_1M);
*trimmed = 0;
/* Discard not supported = nothing to do. */
@@ -11186,22 +11184,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
break;
}
- /* Keep going until we satisfy minlen or reach end of space */
- if (len < range->minlen) {
- mutex_unlock(&fs_info->chunk_mutex);
- start += len;
- continue;
- }
-
- /* If we are out of the passed range break */
- if (start > range->start + range->len - 1) {
- mutex_unlock(&fs_info->chunk_mutex);
- break;
- }
-
- start = max(range->start, start);
- len = min(range->len, len);
-
ret = btrfs_issue_discard(device->bdev, start, len,
&bytes);
if (!ret)
@@ -11216,10 +11198,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
start += len;
*trimmed += bytes;
- /* We've trimmed enough */
- if (*trimmed >= range->len)
- break;
-
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -11303,7 +11281,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
- ret = btrfs_trim_free_extents(device, range, &group_trimmed);
+ ret = btrfs_trim_free_extents(device, &group_trimmed);
if (ret) {
dev_failed++;
dev_ret = ret;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index c78ccaf83ef8..93ea1d529aa3 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -991,9 +991,12 @@ static void gfs2_write_unlock(struct inode *inode)
static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
unsigned len, struct iomap *iomap)
{
+ unsigned int blockmask = i_blocksize(inode) - 1;
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ unsigned int blocks;
- return gfs2_trans_begin(sdp, RES_DINODE + (len >> inode->i_blkbits), 0);
+ blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
+ return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
}
static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0fbb486a320e..86a2bd721900 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2777,8 +2777,10 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_eventfd_unregister(ctx);
#if defined(CONFIG_UNIX)
- if (ctx->ring_sock)
+ if (ctx->ring_sock) {
+ ctx->ring_sock->file = NULL; /* so that iput() is called */
sock_release(ctx->ring_sock);
+ }
#endif
io_mem_free(ctx->sq_ring);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 2d016937fdda..42a61eecdacd 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -296,6 +296,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
out_attach:
spin_lock(&dentry_attach_lock);
+ if (unlikely(dentry->d_fsdata && !alias)) {
+ /* d_fsdata is set by a racing thread which is doing
+ * the same thing as this thread is doing. Leave the racing
+ * thread going ahead and we return here.
+ */
+ spin_unlock(&dentry_attach_lock);
+ iput(dl->dl_inode);
+ ocfs2_lock_res_free(&dl->dl_lockres);
+ kfree(dl);
+ return 0;
+ }
+
dentry->d_fsdata = dl;
dl->dl_count++;
spin_unlock(&dentry_attach_lock);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 9d3b5b93102c..c9ca0be54d9a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -471,6 +471,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index d71b079bb021..b4e766e93f6e 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -106,8 +106,6 @@ enum {
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
- CFTYPE_SYMLINKED = (1 << 6), /* pointed to by symlink too */
-
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
@@ -223,6 +221,7 @@ struct css_set {
*/
struct list_head tasks;
struct list_head mg_tasks;
+ struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
@@ -545,7 +544,6 @@ struct cftype {
* end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
- char link_name[MAX_CFTYPE_NAME];
unsigned long private;
/*
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c0077adeea83..0297f930a56e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -43,6 +43,9 @@
/* walk all threaded css_sets in the domain */
#define CSS_TASK_ITER_THREADED (1U << 1)
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED (1U << 16)
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -57,6 +60,7 @@ struct css_task_iter {
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
+ struct list_head *dying_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
@@ -487,7 +491,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -497,7 +501,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 6a381594608c..5c6062206760 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -101,6 +101,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/include/linux/device.h b/include/linux/device.h
index e85264fb6616..848fc71c6ba6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -713,6 +713,7 @@ void __iomem *devm_of_iomap(struct device *dev,
/* allows to add/remove a custom action to devres stack */
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
static inline int devm_add_action_or_reset(struct device *dev,
void (*action)(void *), void *data)
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index dd0a452373e7..a337313e064f 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -75,6 +75,7 @@ struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
atomic_long_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
+ void *owner; /* private data to retrieve at alloc time */
unsigned long start_addr; /* start address of memory chunk */
unsigned long end_addr; /* end address of memory chunk (inclusive) */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
@@ -96,8 +97,15 @@ struct genpool_data_fixed {
extern struct gen_pool *gen_pool_create(int, int);
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
-extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
- size_t, int);
+extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
+ size_t, int, void *);
+
+static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
+ phys_addr_t phys, size_t size, int nid)
+{
+ return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
+}
+
/**
* gen_pool_add - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
@@ -116,12 +124,47 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
return gen_pool_add_virt(pool, addr, -1, size, nid);
}
extern void gen_pool_destroy(struct gen_pool *);
-extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
-extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
- genpool_algo_t algo, void *data);
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data, void **owner);
+
+static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
+ size_t size, void **owner)
+{
+ return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
+ owner);
+}
+
+static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
+ size_t size, genpool_algo_t algo, void *data)
+{
+ return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
+}
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+ return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+
extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
dma_addr_t *dma);
-extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
+ size_t size, void **owner);
+static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
+ size_t size)
+{
+ gen_pool_free_owner(pool, addr, size, NULL);
+}
+
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
extern size_t gen_pool_avail(struct gen_pool *);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index edf9e8f32d70..1dcb763bb610 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -117,9 +117,12 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
+ /* Legacy local VM stats */
+ struct lruvec_stat __percpu *lruvec_stat_local;
+
+ /* Subtree VM stats (batched updates) */
struct lruvec_stat __percpu *lruvec_stat_cpu;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
- atomic_long_t lruvec_stat_local[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
@@ -265,17 +268,18 @@ struct mem_cgroup {
atomic_t moving_account;
struct task_struct *move_lock_task;
- /* memory.stat */
+ /* Legacy local VM stats and events */
+ struct memcg_vmstats_percpu __percpu *vmstats_local;
+
+ /* Subtree VM stats and events (batched updates) */
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
MEMCG_PADDING(_pad2_);
atomic_long_t vmstats[MEMCG_NR_STAT];
- atomic_long_t vmstats_local[MEMCG_NR_STAT];
-
atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
- atomic_long_t vmevents_local[NR_VM_EVENT_ITEMS];
+ /* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -567,7 +571,11 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
int idx)
{
- long x = atomic_long_read(&memcg->vmstats_local[idx]);
+ long x = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -641,13 +649,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
- long x;
+ long x = 0;
+ int cpu;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- x = atomic_long_read(&pn->lruvec_stat_local[idx]);
+ for_each_possible_cpu(cpu)
+ x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f0628660d541..1732dea030b2 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -81,6 +81,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
* @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
* @kill: callback to transition @ref to the dead state
+ * @cleanup: callback to wait for @ref to be idle and reap it
* @dev: host device of the mapping for debug
* @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
@@ -92,6 +93,7 @@ struct dev_pagemap {
struct resource res;
struct percpu_ref *ref;
void (*kill)(struct percpu_ref *ref);
+ void (*cleanup)(struct percpu_ref *ref);
struct device *dev;
void *data;
enum memory_type type;
@@ -100,6 +102,7 @@ struct dev_pagemap {
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
@@ -118,6 +121,11 @@ static inline void *devm_memremap_pages(struct device *dev,
return ERR_PTR(-ENXIO);
}
+static inline void devm_memunmap_pages(struct device *dev,
+ struct dev_pagemap *pgmap)
+{
+}
+
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap)
{
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index a3fda9f024c3..4a7944078cc3 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm)
* followed by taking the mmap_sem for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
+ * It also has to be called when mmgrab() is used in the context of
+ * the process, but then the mm_count refcount is transferred outside
+ * the context of the process to run down_write() on that pinned mm.
+ *
* NOTE: find_extend_vma() called from GUP context is the only place
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
* for reading and outside the context of the process, so it is also
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 3b67c93ff101..3d174e20aa53 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -49,6 +49,7 @@ enum sof_ipc_dai_type {
SOF_DAI_INTEL_SSP, /**< Intel SSP */
SOF_DAI_INTEL_DMIC, /**< Intel DMIC */
SOF_DAI_INTEL_HDA, /**< Intel HD/A */
+ SOF_DAI_INTEL_SOUNDWIRE, /**< Intel SoundWire */
};
/* general purpose DAI configuration */
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index ccb6a004b37b..1efcf7b18ec2 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -48,6 +48,7 @@
#define SOF_IPC_FW_READY SOF_GLB_TYPE(0x7U)
#define SOF_IPC_GLB_DAI_MSG SOF_GLB_TYPE(0x8U)
#define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U)
+#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
/*
* DSP Command Message Types
@@ -78,6 +79,7 @@
#define SOF_IPC_COMP_GET_VALUE SOF_CMD_TYPE(0x002)
#define SOF_IPC_COMP_SET_DATA SOF_CMD_TYPE(0x003)
#define SOF_IPC_COMP_GET_DATA SOF_CMD_TYPE(0x004)
+#define SOF_IPC_COMP_NOTIFICATION SOF_CMD_TYPE(0x005)
/* DAI messages */
#define SOF_IPC_DAI_CONFIG SOF_CMD_TYPE(0x001)
@@ -153,6 +155,27 @@ struct sof_ipc_compound_hdr {
uint32_t count; /**< count of 0 means end of compound sequence */
} __packed;
+/**
+ * OOPS header architecture specific data.
+ */
+struct sof_ipc_dsp_oops_arch_hdr {
+ uint32_t arch; /* Identifier of architecture */
+ uint32_t totalsize; /* Total size of oops message */
+} __packed;
+
+/**
+ * OOPS header platform specific data.
+ */
+struct sof_ipc_dsp_oops_plat_hdr {
+ uint32_t configidhi; /* ConfigID hi 32bits */
+ uint32_t configidlo; /* ConfigID lo 32bits */
+ uint32_t numaregs; /* Special regs num */
+ uint32_t stackoffset; /* Offset to stack pointer from beginning of
+ * oops message
+ */
+ uint32_t stackptr; /* Stack ptr */
+} __packed;
+
/** @}*/
#endif
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 21dae04d8183..16528d2b4a50 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -18,6 +18,14 @@
#define SOF_IPC_MAX_ELEMS 16
+/*
+ * Firmware boot info flag bits (64-bit)
+ */
+#define SOF_IPC_INFO_BUILD BIT(0)
+#define SOF_IPC_INFO_LOCKS BIT(1)
+#define SOF_IPC_INFO_LOCKSV BIT(2)
+#define SOF_IPC_INFO_GDB BIT(3)
+
/* extended data types that can be appended onto end of sof_ipc_fw_ready */
enum sof_ipc_ext_data {
SOF_IPC_EXT_DMA_BUFFER = 0,
@@ -49,16 +57,8 @@ struct sof_ipc_fw_ready {
uint32_t hostbox_size;
struct sof_ipc_fw_version version;
- /* Miscellaneous debug flags showing build/debug features enabled */
- union {
- uint64_t reserved;
- struct {
- uint64_t build:1;
- uint64_t locks:1;
- uint64_t locks_verbose:1;
- uint64_t gdb:1;
- } bits;
- } debug;
+ /* Miscellaneous flags */
+ uint64_t flags;
/* reserved for future use */
uint32_t reserved[4];
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
index a7189984000d..d25c764b10e8 100644
--- a/include/sound/sof/xtensa.h
+++ b/include/sound/sof/xtensa.h
@@ -17,7 +17,8 @@
/* Xtensa Firmware Oops data */
struct sof_ipc_dsp_oops_xtensa {
- struct sof_ipc_hdr hdr;
+ struct sof_ipc_dsp_oops_arch_hdr arch_hdr;
+ struct sof_ipc_dsp_oops_plat_hdr plat_hdr;
uint32_t exccause;
uint32_t excvaddr;
uint32_t ps;
@@ -38,7 +39,11 @@ struct sof_ipc_dsp_oops_xtensa {
uint32_t intenable;
uint32_t interrupt;
uint32_t sar;
- uint32_t stack;
+ uint32_t debugcause;
+ uint32_t windowbase;
+ uint32_t windowstart;
+ uint32_t excsave1;
+ uint32_t ar[];
} __packed;
#endif
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index 37e0a90dc9e6..0868eb47acf7 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 4
+#define SOF_ABI_MINOR 6
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 155048b0eca2..bf9dbffd46b1 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -215,7 +215,8 @@ static struct cftype cgroup_base_files[];
static int cgroup_apply_control(struct cgroup *cgrp);
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
-static void css_task_iter_advance(struct css_task_iter *it);
+static void css_task_iter_skip(struct css_task_iter *it,
+ struct task_struct *task);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss);
@@ -738,6 +739,7 @@ struct css_set init_css_set = {
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
+ .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
@@ -843,6 +845,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
cgroup_update_populated(link->cgrp, populated);
}
+/*
+ * @task is leaving, advance task iterators which are pointing to it so
+ * that they can resume at the next position. Advancing an iterator might
+ * remove it from the list, use safe walk. See css_task_iter_skip() for
+ * details.
+ */
+static void css_set_skip_task_iters(struct css_set *cset,
+ struct task_struct *task)
+{
+ struct css_task_iter *it, *pos;
+
+ list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
+ css_task_iter_skip(it, task);
+}
+
/**
* css_set_move_task - move a task from one css_set to another
* @task: task being moved
@@ -868,22 +885,9 @@ static void css_set_move_task(struct task_struct *task,
css_set_update_populated(to_cset, true);
if (from_cset) {
- struct css_task_iter *it, *pos;
-
WARN_ON_ONCE(list_empty(&task->cg_list));
- /*
- * @task is leaving, advance task iterators which are
- * pointing to it so that they can resume at the next
- * position. Advancing an iterator might remove it from
- * the list, use safe walk. See css_task_iter_advance*()
- * for details.
- */
- list_for_each_entry_safe(it, pos, &from_cset->task_iters,
- iters_node)
- if (it->task_pos == &task->cg_list)
- css_task_iter_advance(it);
-
+ css_set_skip_task_iters(from_cset, task);
list_del_init(&task->cg_list);
if (!css_set_populated(from_cset))
css_set_update_populated(from_cset, false);
@@ -1210,6 +1214,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
cset->dom_cset = cset;
INIT_LIST_HEAD(&cset->tasks);
INIT_LIST_HEAD(&cset->mg_tasks);
+ INIT_LIST_HEAD(&cset->dying_tasks);
INIT_LIST_HEAD(&cset->task_iters);
INIT_LIST_HEAD(&cset->threaded_csets);
INIT_HLIST_NODE(&cset->hlist);
@@ -1460,8 +1465,8 @@ struct cgroup *task_cgroup_from_root(struct task_struct *task,
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
-static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
- char *buf, bool write_link_name)
+static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
+ char *buf)
{
struct cgroup_subsys *ss = cft->ss;
@@ -1471,26 +1476,13 @@ static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
- write_link_name ? cft->link_name : cft->name);
+ cft->name);
} else {
- strscpy(buf, write_link_name ? cft->link_name : cft->name,
- CGROUP_FILE_NAME_MAX);
+ strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
}
return buf;
}
-static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
- char *buf)
-{
- return cgroup_fill_name(cgrp, cft, buf, false);
-}
-
-static char *cgroup_link_name(struct cgroup *cgrp, const struct cftype *cft,
- char *buf)
-{
- return cgroup_fill_name(cgrp, cft, buf, true);
-}
-
/**
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
@@ -1649,9 +1641,6 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
}
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
- if (cft->flags & CFTYPE_SYMLINKED)
- kernfs_remove_by_name(cgrp->kn,
- cgroup_link_name(cgrp, cft, name));
}
/**
@@ -3837,7 +3826,6 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
{
char name[CGROUP_FILE_NAME_MAX];
struct kernfs_node *kn;
- struct kernfs_node *kn_link;
struct lock_class_key *key = NULL;
int ret;
@@ -3868,14 +3856,6 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
spin_unlock_irq(&cgroup_file_kn_lock);
}
- if (cft->flags & CFTYPE_SYMLINKED) {
- kn_link = kernfs_create_link(cgrp->kn,
- cgroup_link_name(cgrp, cft, name),
- kn);
- if (IS_ERR(kn_link))
- return PTR_ERR(kn_link);
- }
-
return 0;
}
@@ -4433,15 +4413,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
it->task_pos = NULL;
return;
}
- } while (!css_set_populated(cset));
+ } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
if (!list_empty(&cset->tasks))
it->task_pos = cset->tasks.next;
- else
+ else if (!list_empty(&cset->mg_tasks))
it->task_pos = cset->mg_tasks.next;
+ else
+ it->task_pos = cset->dying_tasks.next;
it->tasks_head = &cset->tasks;
it->mg_tasks_head = &cset->mg_tasks;
+ it->dying_tasks_head = &cset->dying_tasks;
/*
* We don't keep css_sets locked across iteration steps and thus
@@ -4467,9 +4450,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
list_add(&it->iters_node, &cset->task_iters);
}
+static void css_task_iter_skip(struct css_task_iter *it,
+ struct task_struct *task)
+{
+ lockdep_assert_held(&css_set_lock);
+
+ if (it->task_pos == &task->cg_list) {
+ it->task_pos = it->task_pos->next;
+ it->flags |= CSS_TASK_ITER_SKIPPED;
+ }
+}
+
static void css_task_iter_advance(struct css_task_iter *it)
{
- struct list_head *next;
+ struct task_struct *task;
lockdep_assert_held(&css_set_lock);
repeat:
@@ -4479,25 +4473,40 @@ repeat:
* consumed first and then ->mg_tasks. After ->mg_tasks,
* we move onto the next cset.
*/
- next = it->task_pos->next;
-
- if (next == it->tasks_head)
- next = it->mg_tasks_head->next;
+ if (it->flags & CSS_TASK_ITER_SKIPPED)
+ it->flags &= ~CSS_TASK_ITER_SKIPPED;
+ else
+ it->task_pos = it->task_pos->next;
- if (next == it->mg_tasks_head)
+ if (it->task_pos == it->tasks_head)
+ it->task_pos = it->mg_tasks_head->next;
+ if (it->task_pos == it->mg_tasks_head)
+ it->task_pos = it->dying_tasks_head->next;
+ if (it->task_pos == it->dying_tasks_head)
css_task_iter_advance_css_set(it);
- else
- it->task_pos = next;
} else {
/* called from start, proceed to the first cset */
css_task_iter_advance_css_set(it);
}
- /* if PROCS, skip over tasks which aren't group leaders */
- if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
- !thread_group_leader(list_entry(it->task_pos, struct task_struct,
- cg_list)))
- goto repeat;
+ if (!it->task_pos)
+ return;
+
+ task = list_entry(it->task_pos, struct task_struct, cg_list);
+
+ if (it->flags & CSS_TASK_ITER_PROCS) {
+ /* if PROCS, skip over tasks which aren't group leaders */
+ if (!thread_group_leader(task))
+ goto repeat;
+
+ /* and dying leaders w/o live member threads */
+ if (!atomic_read(&task->signal->live))
+ goto repeat;
+ } else {
+ /* skip all dying ones */
+ if (task->flags & PF_EXITING)
+ goto repeat;
+ }
}
/**
@@ -4553,6 +4562,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
spin_lock_irq(&css_set_lock);
+ /* @it may be half-advanced by skips, finish advancing */
+ if (it->flags & CSS_TASK_ITER_SKIPPED)
+ css_task_iter_advance(it);
+
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
cg_list);
@@ -6034,6 +6047,7 @@ void cgroup_exit(struct task_struct *tsk)
if (!list_empty(&tsk->cg_list)) {
spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
+ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
WARN_ON_ONCE(cgroup_task_frozen(tsk));
@@ -6059,6 +6073,13 @@ void cgroup_release(struct task_struct *task)
do_each_subsys_mask(ss, ssid, have_release_callback) {
ss->release(task);
} while_each_subsys_mask();
+
+ if (use_task_css_set_links) {
+ spin_lock_irq(&css_set_lock);
+ css_set_skip_task_iters(task_css_set(task), task);
+ list_del_init(&task->cg_list);
+ spin_unlock_irq(&css_set_lock);
+ }
}
void cgroup_free(struct task_struct *task)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 6a1942ed781c..515525ff1cfd 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3254,10 +3254,23 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
spin_unlock_irqrestore(&callback_lock, flags);
}
+/**
+ * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
+ * @tsk: pointer to task_struct with which the scheduler is struggling
+ *
+ * Description: In the case that the scheduler cannot find an allowed cpu in
+ * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
+ * mode however, this value is the same as task_cs(tsk)->effective_cpus,
+ * which will not contain a sane cpumask during cases such as cpu hotplugging.
+ * This is the absolute last resort for the scheduler and it is only used if
+ * _every_ other avenue has been traveled.
+ **/
+
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
rcu_read_lock();
- do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
+ do_set_cpus_allowed(tsk, is_in_v2_mode() ?
+ task_cs(tsk)->cpus_allowed : cpu_possible_mask);
rcu_read_unlock();
/*
diff --git a/kernel/cred.c b/kernel/cred.c
index e74ffdc98a92..c73a87a4df13 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -446,6 +446,15 @@ int commit_creds(struct cred *new)
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
+ /*
+ * If a task drops privileges and becomes nondumpable,
+ * the dumpability change must become visible before
+ * the credential change; otherwise, a __ptrace_may_access()
+ * racing with this change may be able to attach to a task it
+ * shouldn't be able to attach to (as if the task had dropped
+ * privileges without becoming nondumpable).
+ * Pairs with a read barrier in __ptrace_may_access().
+ */
smp_wmb();
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 1803efb2922f..a75b6a7f458a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -195,6 +195,7 @@ repeat:
rcu_read_unlock();
proc_flush_task(p);
+ cgroup_release(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
@@ -220,7 +221,6 @@ repeat:
}
write_unlock_irq(&tasklist_lock);
- cgroup_release(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 2398832947c6..c4ce08f43bd6 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -18,6 +18,7 @@
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/completion.h>
+#include <linux/memory.h>
#include <asm/cacheflush.h>
#include "core.h"
#include "patch.h"
@@ -718,16 +719,21 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
+ mutex_lock(&text_mutex);
+
module_disable_ro(patch->mod);
ret = klp_write_object_relocations(patch->mod, obj);
if (ret) {
module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);
return ret;
}
arch_klp_init_object_loaded(patch, obj);
module_enable_ro(patch->mod, true);
+ mutex_unlock(&text_mutex);
+
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 1490e63f69a9..6e1970719dc2 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -95,6 +95,7 @@ static void devm_memremap_pages_release(void *data)
pgmap->kill(pgmap->ref);
for_each_device_pfn(pfn, pgmap)
put_page(pfn_to_page(pfn));
+ pgmap->cleanup(pgmap->ref);
/* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1);
@@ -133,8 +134,8 @@ static void devm_memremap_pages_release(void *data)
* 2/ The altmap field may optionally be initialized, in which case altmap_valid
* must be set to true
*
- * 3/ pgmap->ref must be 'live' on entry and will be killed at
- * devm_memremap_pages_release() time, or if this routine fails.
+ * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
+ * at devm_memremap_pages_release() time, or if this routine fails.
*
* 4/ res is expected to be a host memory range that could feasibly be
* treated as a "System RAM" range, i.e. not a device mmio range, but
@@ -156,8 +157,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
pgprot_t pgprot = PAGE_KERNEL;
int error, nid, is_ram;
- if (!pgmap->ref || !pgmap->kill)
+ if (!pgmap->ref || !pgmap->kill || !pgmap->cleanup) {
+ WARN(1, "Missing reference count teardown definition\n");
return ERR_PTR(-EINVAL);
+ }
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -168,14 +171,16 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
if (conflict_pgmap) {
dev_WARN(dev, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
- return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ goto err_array;
}
conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
if (conflict_pgmap) {
dev_WARN(dev, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
- return ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
+ goto err_array;
}
is_ram = region_intersects(align_start, align_size,
@@ -267,10 +272,18 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
pgmap_array_delete(res);
err_array:
pgmap->kill(pgmap->ref);
+ pgmap->cleanup(pgmap->ref);
+
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(devm_memremap_pages);
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
+{
+ devm_release_action(dev, devm_memremap_pages_release, pgmap);
+}
+EXPORT_SYMBOL_GPL(devm_memunmap_pages);
+
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 5710d07e67cf..8456b6e2205f 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -324,6 +324,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
+ /*
+ * If a task drops privileges and becomes nondumpable (through a syscall
+ * like setresuid()) while we are trying to access it, we must ensure
+ * that the dumpability is read after the credentials; otherwise,
+ * we may be able to attach to a task that we shouldn't be able to
+ * attach to (as if the task had dropped privileges without becoming
+ * nondumpable).
+ * Pairs with a write barrier in commit_creds().
+ */
+ smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -705,6 +715,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (arg.nr < 0)
return -EINVAL;
+ /* Ensure arg.off fits in an unsigned long */
+ if (arg.off > ULONG_MAX)
+ return 0;
+
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
pending = &child->signal->shared_pending;
else
@@ -712,18 +726,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
for (i = 0; i < arg.nr; ) {
kernel_siginfo_t info;
- s32 off = arg.off + i;
+ unsigned long off = arg.off + i;
+ bool found = false;
spin_lock_irq(&child->sighand->siglock);
list_for_each_entry(q, &pending->list, list) {
if (!off--) {
+ found = true;
copy_siginfo(&info, &q->info);
break;
}
}
spin_unlock_irq(&child->sighand->siglock);
- if (off >= 0) /* beyond the end of the list */
+ if (!found) /* beyond the end of the list */
break;
#ifdef CONFIG_COMPAT
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 85f5912d8f70..44b726bab4bd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -808,17 +808,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base, *offset = offsets[offs];
+ u64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
+ nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
} while (read_seqcount_retry(&tk_core.seq, seq));
- return base;
-
+ return base + nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a12aff849c04..38277af44f5c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -34,6 +34,7 @@
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/kprobes.h>
+#include <linux/memory.h>
#include <trace/events/sched.h>
@@ -2610,10 +2611,12 @@ static void ftrace_run_update_code(int command)
{
int ret;
+ mutex_lock(&text_mutex);
+
ret = ftrace_arch_code_modify_prepare();
FTRACE_WARN_ON(ret);
if (ret)
- return;
+ goto out_unlock;
/*
* By default we use stop_machine() to modify the code.
@@ -2625,6 +2628,9 @@ static void ftrace_run_update_code(int command)
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
+
+out_unlock:
+ mutex_unlock(&text_mutex);
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -2935,14 +2941,13 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
p = &pg->records[i];
p->flags = rec_flags;
-#ifndef CC_USING_NOP_MCOUNT
/*
* Do the initial record conversion from mcount jump
* to the NOP instructions.
*/
- if (!ftrace_code_disable(mod, p))
+ if (!__is_defined(CC_USING_NOP_MCOUNT) &&
+ !ftrace_code_disable(mod, p))
break;
-#endif
update_cnt++;
}
@@ -4221,10 +4226,13 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
struct hlist_head *hhd;
- int size = 1 << mapper->hash.size_bits;
- int i;
+ int size, i;
+
+ if (!mapper)
+ return;
if (free_func && mapper->hash.count) {
+ size = 1 << mapper->hash.size_bits;
for (i = 0; i < size; i++) {
hhd = &mapper->hash.buckets[i];
hlist_for_each_entry(entry, hhd, hlist) {
@@ -5776,6 +5784,7 @@ void ftrace_module_enable(struct module *mod)
struct ftrace_page *pg;
mutex_lock(&ftrace_lock);
+ mutex_lock(&text_mutex);
if (ftrace_disabled)
goto out_unlock;
@@ -5837,6 +5846,7 @@ void ftrace_module_enable(struct module *mod)
ftrace_arch_code_modify_post_process();
out_unlock:
+ mutex_unlock(&text_mutex);
mutex_unlock(&ftrace_lock);
process_cached_mods(mod->name);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1c80521fd436..83e08b78dbee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6923,7 +6923,7 @@ struct tracing_log_err {
static DEFINE_MUTEX(tracing_err_log_lock);
-struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
+static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
{
struct tracing_log_err *err;
@@ -8192,7 +8192,7 @@ static const struct file_operations buffer_percent_fops = {
.llseek = default_llseek,
};
-struct dentry *trace_instance_dir;
+static struct dentry *trace_instance_dir;
static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 54373d93e251..ba751f993c3b 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1057,7 +1057,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
trace_seq_puts(s, "<stack trace>\n");
- for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
+ for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
if (trace_seq_has_overflowed(s))
break;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index eb7e06b54741..b55906c77ce0 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -426,8 +426,6 @@ end:
/*
* Argument syntax:
* - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
- *
- * - Remove uprobe: -:[GRP/]EVENT
*/
static int trace_uprobe_create(int argc, const char **argv)
{
@@ -443,10 +441,17 @@ static int trace_uprobe_create(int argc, const char **argv)
ret = 0;
ref_ctr_offset = 0;
- /* argc must be >= 1 */
- if (argv[0][0] == 'r')
+ switch (argv[0][0]) {
+ case 'r':
is_return = true;
- else if (argv[0][0] != 'p' || argc < 2)
+ break;
+ case 'p':
+ break;
+ default:
+ return -ECANCELED;
+ }
+
+ if (argc < 2)
return -ECANCELED;
if (argv[0][1] == ':')
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 7e85d1e37a6e..770c769d7cb7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -168,20 +168,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
EXPORT_SYMBOL(gen_pool_create);
/**
- * gen_pool_add_virt - add a new chunk of special memory to the pool
+ * gen_pool_add_owner- add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
* @virt: virtual starting address of memory chunk to add to pool
* @phys: physical starting address of memory chunk to add to pool
* @size: size in bytes of the memory chunk to add to pool
* @nid: node id of the node the chunk structure and bitmap should be
* allocated on, or -1
+ * @owner: private data the publisher would like to recall at alloc time
*
* Add a new chunk of special memory to the specified pool.
*
* Returns 0 on success or a -ve errno on failure.
*/
-int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
- size_t size, int nid)
+int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
+ size_t size, int nid, void *owner)
{
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
@@ -195,6 +196,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size - 1;
+ chunk->owner = owner;
atomic_long_set(&chunk->avail, size);
spin_lock(&pool->lock);
@@ -203,7 +205,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
return 0;
}
-EXPORT_SYMBOL(gen_pool_add_virt);
+EXPORT_SYMBOL(gen_pool_add_owner);
/**
* gen_pool_virt_to_phys - return the physical address of memory
@@ -260,35 +262,20 @@ void gen_pool_destroy(struct gen_pool *pool)
EXPORT_SYMBOL(gen_pool_destroy);
/**
- * gen_pool_alloc - allocate special memory from the pool
- * @pool: pool to allocate from
- * @size: number of bytes to allocate from the pool
- *
- * Allocate the requested number of bytes from the specified pool.
- * Uses the pool allocation function (with first-fit algorithm by default).
- * Can not be used in NMI handler on architectures without
- * NMI-safe cmpxchg implementation.
- */
-unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
-{
- return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
-}
-EXPORT_SYMBOL(gen_pool_alloc);
-
-/**
- * gen_pool_alloc_algo - allocate special memory from the pool
+ * gen_pool_alloc_algo_owner - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
* @algo: algorithm passed from caller
* @data: data passed to algorithm
+ * @owner: optionally retrieve the chunk owner
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
-unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
- genpool_algo_t algo, void *data)
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data, void **owner)
{
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
@@ -299,6 +286,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
BUG_ON(in_nmi());
#endif
+ if (owner)
+ *owner = NULL;
+
if (size == 0)
return 0;
@@ -326,12 +316,14 @@ retry:
addr = chunk->start_addr + ((unsigned long)start_bit << order);
size = nbits << order;
atomic_long_sub(size, &chunk->avail);
+ if (owner)
+ *owner = chunk->owner;
break;
}
rcu_read_unlock();
return addr;
}
-EXPORT_SYMBOL(gen_pool_alloc_algo);
+EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
/**
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -367,12 +359,14 @@ EXPORT_SYMBOL(gen_pool_dma_alloc);
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
+ * @owner: private data stashed at gen_pool_add() time
*
* Free previously allocated special memory back to the specified
* pool. Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
-void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
+ void **owner)
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
@@ -382,6 +376,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
BUG_ON(in_nmi());
#endif
+ if (owner)
+ *owner = NULL;
+
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
@@ -392,6 +389,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
BUG_ON(remain);
size = nbits << order;
atomic_long_add(size, &chunk->avail);
+ if (owner)
+ *owner = chunk->owner;
rcu_read_unlock();
return;
}
@@ -399,7 +398,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
rcu_read_unlock();
BUG();
}
-EXPORT_SYMBOL(gen_pool_free);
+EXPORT_SYMBOL(gen_pool_free_owner);
/**
* gen_pool_for_each_chunk - call func for every chunk of generic memory pool
diff --git a/mm/hmm.c b/mm/hmm.c
index c5d840e34b28..f702a3895d05 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1354,9 +1354,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
complete(&devmem->completion);
}
-static void hmm_devmem_ref_exit(void *data)
+static void hmm_devmem_ref_exit(struct percpu_ref *ref)
{
- struct percpu_ref *ref = data;
struct hmm_devmem *devmem;
devmem = container_of(ref, struct hmm_devmem, ref);
@@ -1433,10 +1432,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
- if (ret)
- return ERR_PTR(ret);
-
size = ALIGN(size, PA_SECTION_SIZE);
addr = min((unsigned long)iomem_resource.end,
(1UL << MAX_PHYSMEM_BITS) - 1);
@@ -1475,6 +1470,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
devmem->pagemap.ref = &devmem->ref;
devmem->pagemap.data = devmem;
devmem->pagemap.kill = hmm_devmem_ref_kill;
+ devmem->pagemap.cleanup = hmm_devmem_ref_exit;
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
if (IS_ERR(result))
@@ -1512,11 +1508,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
- &devmem->ref);
- if (ret)
- return ERR_PTR(ret);
-
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
devmem->pfn_last = devmem->pfn_first +
(resource_size(devmem->resource) >> PAGE_SHIFT);
@@ -1529,6 +1520,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
devmem->pagemap.ref = &devmem->ref;
devmem->pagemap.data = devmem;
devmem->pagemap.kill = hmm_devmem_ref_kill;
+ devmem->pagemap.cleanup = hmm_devmem_ref_exit;
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
if (IS_ERR(result))
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a335f7c1fac4..0f7419938008 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
+ result = SCAN_ANY_PROCESS;
+ if (!mmget_still_valid(mm))
+ goto out;
result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;
diff --git a/mm/list_lru.c b/mm/list_lru.c
index e4709fdaa8e6..927d85be32f6 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -354,7 +354,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
}
return 0;
fail:
- __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+ __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
return -ENOMEM;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ca0bc6e6be13..ba9138a4a1de 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -691,11 +691,12 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
if (mem_cgroup_disabled())
return;
+ __this_cpu_add(memcg->vmstats_local->stat[idx], val);
+
x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
struct mem_cgroup *mi;
- atomic_long_add(x, &memcg->vmstats_local[idx]);
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
atomic_long_add(x, &mi->vmstats[idx]);
x = 0;
@@ -745,11 +746,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
__mod_memcg_state(memcg, idx, val);
/* Update lruvec */
+ __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
struct mem_cgroup_per_node *pi;
- atomic_long_add(x, &pn->lruvec_stat_local[idx]);
for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
atomic_long_add(x, &pi->lruvec_stat[idx]);
x = 0;
@@ -771,11 +773,12 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
if (mem_cgroup_disabled())
return;
+ __this_cpu_add(memcg->vmstats_local->events[idx], count);
+
x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
if (unlikely(x > MEMCG_CHARGE_BATCH)) {
struct mem_cgroup *mi;
- atomic_long_add(x, &memcg->vmevents_local[idx]);
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
atomic_long_add(x, &mi->vmevents[idx]);
x = 0;
@@ -790,7 +793,12 @@ static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
- return atomic_long_read(&memcg->vmevents_local[event]);
+ long x = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ x += per_cpu(memcg->vmstats_local->events[event], cpu);
+ return x;
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
@@ -2191,11 +2199,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
long x;
x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
- if (x) {
- atomic_long_add(x, &memcg->vmstats_local[i]);
+ if (x)
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
atomic_long_add(x, &memcg->vmstats[i]);
- }
if (i >= NR_VM_NODE_STAT_ITEMS)
continue;
@@ -2205,12 +2211,10 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
pn = mem_cgroup_nodeinfo(memcg, nid);
x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
- if (x) {
- atomic_long_add(x, &pn->lruvec_stat_local[i]);
+ if (x)
do {
atomic_long_add(x, &pn->lruvec_stat[i]);
} while ((pn = parent_nodeinfo(pn, nid)));
- }
}
}
@@ -2218,11 +2222,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
long x;
x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
- if (x) {
- atomic_long_add(x, &memcg->vmevents_local[i]);
+ if (x)
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
atomic_long_add(x, &memcg->vmevents[i]);
- }
}
}
@@ -4483,8 +4485,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!pn)
return 1;
+ pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
+ if (!pn->lruvec_stat_local) {
+ kfree(pn);
+ return 1;
+ }
+
pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
if (!pn->lruvec_stat_cpu) {
+ free_percpu(pn->lruvec_stat_local);
kfree(pn);
return 1;
}
@@ -4506,6 +4515,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
return;
free_percpu(pn->lruvec_stat_cpu);
+ free_percpu(pn->lruvec_stat_local);
kfree(pn);
}
@@ -4516,6 +4526,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
free_percpu(memcg->vmstats_percpu);
+ free_percpu(memcg->vmstats_local);
kfree(memcg);
}
@@ -4544,6 +4555,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (memcg->id.id < 0)
goto fail;
+ memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
+ if (!memcg->vmstats_local)
+ goto fail;
+
memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
if (!memcg->vmstats_percpu)
goto fail;
diff --git a/mm/mlock.c b/mm/mlock.c
index 080f3b36415b..a90099da4fb4 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -636,11 +636,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
* is also counted.
* Return value: previously mlocked page counts
*/
-static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
unsigned long start, size_t len)
{
struct vm_area_struct *vma;
- int count = 0;
+ unsigned long count = 0;
if (mm == NULL)
mm = current->mm;
@@ -797,7 +797,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
unsigned long lock_limit;
int ret;
- if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
+ if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
+ flags == MCL_ONFAULT)
return -EINVAL;
if (!can_do_mlock())
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 99740e1dd273..8c943a6e1696 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -245,14 +245,28 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
{
/*
* If there are parallel threads are doing PTE changes on same range
- * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
- * flush by batching, a thread has stable TLB entry can fail to flush
- * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
- * forcefully if we detect parallel PTE batching threads.
+ * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
+ * flush by batching, one thread may end up seeing inconsistent PTEs
+ * and result in having stale TLB entries. So flush TLB forcefully
+ * if we detect parallel PTE batching threads.
+ *
+ * However, some syscalls, e.g. munmap(), may free page tables, this
+ * needs force flush everything in the given range. Otherwise this
+ * may result in having stale TLB entries for some architectures,
+ * e.g. aarch64, that could specify flush what level TLB.
*/
if (mm_tlb_flush_nested(tlb->mm)) {
+ /*
+ * The aarch64 yields better performance with fullmm by
+ * avoiding multiple CPUs spamming TLBI messages at the
+ * same time.
+ *
+ * On x86 non-fullmm doesn't yield significant difference
+ * against fullmm.
+ */
+ tlb->fullmm = 1;
__tlb_reset_range(tlb);
- __tlb_adjust_range(tlb, start, end - start);
+ tlb->freed_tables = 1;
}
tlb_flush_mmu(tlb);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7350a124524b..4c9e150e5ad3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
/* Handle removing and resetting vm mappings related to the vm_struct. */
static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
{
- unsigned long addr = (unsigned long)area->addr;
unsigned long start = ULONG_MAX, end = 0;
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
+ int flush_dmap = 0;
int i;
/*
@@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
* execute permissions, without leaving a RW+X window.
*/
if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
- set_memory_nx(addr, area->nr_pages);
- set_memory_rw(addr, area->nr_pages);
+ set_memory_nx((unsigned long)area->addr, area->nr_pages);
+ set_memory_rw((unsigned long)area->addr, area->nr_pages);
}
remove_vm_area(area->addr);
@@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
* the vm_unmap_aliases() flush includes the direct map.
*/
for (i = 0; i < area->nr_pages; i++) {
- if (page_address(area->pages[i])) {
+ unsigned long addr = (unsigned long)page_address(area->pages[i]);
+ if (addr) {
start = min(addr, start);
- end = max(addr, end);
+ end = max(addr + PAGE_SIZE, end);
+ flush_dmap = 1;
}
}
@@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
* reset the direct map permissions to the default.
*/
set_area_direct_map(area, set_direct_map_invalid_noflush);
- _vm_unmap_aliases(start, end, 1);
+ _vm_unmap_aliases(start, end, flush_dmap);
set_area_direct_map(area, set_direct_map_default_noflush);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7acd0afdfc2a..7889f583ced9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1505,7 +1505,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page) &&
- !__PageMovable(page)) {
+ !__PageMovable(page) && !PageUnevictable(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
@@ -1953,8 +1953,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (global_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
- reclaim_stat->recent_rotated[0] = stat.nr_activate[0];
- reclaim_stat->recent_rotated[1] = stat.nr_activate[1];
+ reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
+ reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
move_pages_to_lru(lruvec, &page_list);
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index bcdd45df3f51..a7a36209a193 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -73,7 +73,7 @@ parse_symbol() {
if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then
local code=${cache[$module,$address]}
else
- local code=$(addr2line -i -e "$objfile" "$address")
+ local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address")
cache[$module,$address]=$code
fi
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 8346a4f7c5d7..a99be508f93d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -739,14 +739,20 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
&scontext_len);
if (!rc && scontext) {
- audit_log_format(ab, " srawcon=%s", scontext);
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " srawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
kfree(scontext);
}
rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
&scontext_len);
if (!rc && scontext) {
- audit_log_format(ab, " trawcon=%s", scontext);
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " trawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
kfree(scontext);
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 3ec702cf46ca..fea66f6b31bf 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1052,15 +1052,24 @@ static int selinux_add_mnt_opt(const char *option, const char *val, int len,
if (token == Opt_error)
return -EINVAL;
- if (token != Opt_seclabel)
+ if (token != Opt_seclabel) {
val = kmemdup_nul(val, len, GFP_KERNEL);
+ if (!val) {
+ rc = -ENOMEM;
+ goto free_opt;
+ }
+ }
rc = selinux_add_opt(token, val, mnt_opts);
if (unlikely(rc)) {
kfree(val);
- if (*mnt_opts) {
- selinux_free_mnt_opts(*mnt_opts);
- *mnt_opts = NULL;
- }
+ goto free_opt;
+ }
+ return rc;
+
+free_opt:
+ if (*mnt_opts) {
+ selinux_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
}
return rc;
}
@@ -2616,10 +2625,11 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
char *from = options;
char *to = options;
bool first = true;
+ int rc;
while (1) {
int len = opt_len(from);
- int token, rc;
+ int token;
char *arg = NULL;
token = match_opt_prefix(from, len, &arg);
@@ -2635,15 +2645,15 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
*q++ = c;
}
arg = kmemdup_nul(arg, q - arg, GFP_KERNEL);
+ if (!arg) {
+ rc = -ENOMEM;
+ goto free_opt;
+ }
}
rc = selinux_add_opt(token, arg, mnt_opts);
if (unlikely(rc)) {
kfree(arg);
- if (*mnt_opts) {
- selinux_free_mnt_opts(*mnt_opts);
- *mnt_opts = NULL;
- }
- return rc;
+ goto free_opt;
}
} else {
if (!first) { // copy with preceding comma
@@ -2661,6 +2671,13 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
}
*to = '\0';
return 0;
+
+free_opt:
+ if (*mnt_opts) {
+ selinux_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
+ }
+ return rc;
}
static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0de725f88bed..d99450b4f511 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -68,6 +68,7 @@ static struct {
int len;
int opt;
} smk_mount_opts[] = {
+ {"smackfsdef", sizeof("smackfsdef") - 1, Opt_fsdefault},
A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute)
};
#undef A
@@ -682,11 +683,12 @@ static int smack_fs_context_dup(struct fs_context *fc,
}
static const struct fs_parameter_spec smack_param_specs[] = {
- fsparam_string("fsdefault", Opt_fsdefault),
- fsparam_string("fsfloor", Opt_fsfloor),
- fsparam_string("fshat", Opt_fshat),
- fsparam_string("fsroot", Opt_fsroot),
- fsparam_string("fstransmute", Opt_fstransmute),
+ fsparam_string("smackfsdef", Opt_fsdefault),
+ fsparam_string("smackfsdefault", Opt_fsdefault),
+ fsparam_string("smackfsfloor", Opt_fsfloor),
+ fsparam_string("smackfshat", Opt_fshat),
+ fsparam_string("smackfsroot", Opt_fsroot),
+ fsparam_string("smackfstransmute", Opt_fstransmute),
{}
};
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index 37e47fa7b0e3..81f7edc560d0 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -344,7 +344,7 @@ static void destroy_stream(struct snd_motu *motu,
}
amdtp_stream_destroy(stream);
- fw_iso_resources_free(resources);
+ fw_iso_resources_destroy(resources);
}
int snd_motu_stream_init_duplex(struct snd_motu *motu)
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 5e31c460a90f..9fd145cc4b07 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -148,9 +148,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
oxfw->midi_input_ports = 0;
oxfw->midi_output_ports = 0;
- /* Output stream exists but no data channels are useful. */
- oxfw->has_output = false;
-
return snd_oxfw_scs1x_add(oxfw);
}
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index ad1b55a1f045..a3a113ef5d56 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -162,7 +162,6 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
{
snd_hdac_device_exit(hdev);
- kfree(hdev);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index c5e46df9c548..6c51b8363f8b 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -826,7 +826,14 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
if (codec->core.type == HDA_DEV_LEGACY)
snd_hdac_device_unregister(&codec->core);
codec_display_power(codec, false);
- put_device(hda_codec_dev(codec));
+
+ /*
+ * In the case of ASoC HD-audio bus, the device refcount is released in
+ * snd_hdac_ext_bus_device_remove() explicitly.
+ */
+ if (codec->core.type == HDA_DEV_LEGACY)
+ put_device(hda_codec_dev(codec));
+
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 974244978509..5b3c26991f26 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4120,18 +4120,19 @@ static struct coef_fw alc225_pre_hsmode[] = {
static void alc_headset_mode_unplugged(struct hda_codec *codec)
{
static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
{}
};
- static struct coef_fw coef0255_1[] = {
- WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
- {}
- };
static struct coef_fw coef0256[] = {
WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+ WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
{}
};
static struct coef_fw coef0233[] = {
@@ -4194,13 +4195,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
switch (codec->core.vendor_id) {
case 0x10ec0255:
- alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
- alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -4253,6 +4252,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
{}
};
+ static struct coef_fw coef0256[] = {
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
+ WRITE_COEFEX(0x57, 0x03, 0x09a3),
+ WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
+ {}
+ };
static struct coef_fw coef0233[] = {
UPDATE_COEF(0x35, 0, 1<<14),
WRITE_COEF(0x06, 0x2100),
@@ -4300,14 +4305,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
};
switch (codec->core.vendor_id) {
- case 0x10ec0236:
case 0x10ec0255:
- case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0255);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x45, 0xc489);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0256);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
@@ -4389,6 +4399,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
WRITE_COEF(0x49, 0x0049),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xc489),
+ WRITE_COEFEX(0x57, 0x03, 0x0da3),
+ WRITE_COEF(0x49, 0x0049),
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+ WRITE_COEF(0x06, 0x6100),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x06, 0x2100),
WRITE_COEF(0x32, 0x4ea3),
@@ -4439,11 +4457,16 @@ static void alc_headset_mode_default(struct hda_codec *codec)
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
- case 0x10ec0236:
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+ alc_write_coef_idx(codec, 0x45, 0xc089);
+ msleep(50);
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
@@ -4487,8 +4510,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
};
static struct coef_fw coef0256[] = {
WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
- WRITE_COEF(0x1b, 0x0c6b),
- WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ WRITE_COEF(0x1b, 0x0e6b),
{}
};
static struct coef_fw coef0233[] = {
@@ -4606,8 +4628,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
};
static struct coef_fw coef0256[] = {
WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
- WRITE_COEF(0x1b, 0x0c6b),
- WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ WRITE_COEF(0x1b, 0x0e6b),
{}
};
static struct coef_fw coef0233[] = {
@@ -4739,13 +4760,37 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
- case 0x10ec0236:
case 0x10ec0255:
+ alc_process_coef_fw(codec, coef0255);
+ msleep(300);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x0070) == 0x0070;
+ break;
+ case 0x10ec0236:
case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+ alc_write_coef_idx(codec, 0x06, 0x6104);
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
alc_process_coef_fw(codec, coef0255);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
+
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -6210,15 +6255,13 @@ static const struct hda_fixup alc269_fixups[] = {
.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* Enable the Mic */
- { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
- { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
- {}
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
},
.chained = true,
- .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
+ .chain_id = ALC255_FIXUP_HEADSET_MODE
},
[ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -7263,10 +7306,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x19, 0x0181303F},
{0x21, 0x0221102f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
{0x12, 0x90a601c0},
{0x14, 0x90171120},
{0x21, 0x02211030}),
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index 3729b132ad85..fe08dd9f1564 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -812,7 +812,7 @@ static int snd_ice1712_6fire_read_pca(struct snd_ice1712 *ice, unsigned char reg
snd_i2c_lock(ice->i2c);
byte = reg;
- if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1)) {
+ if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1) != 1) {
snd_i2c_unlock(ice->i2c);
dev_err(ice->card->dev, "cannot send pca\n");
return -EIO;
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index eab7c76cfcd9..71562154c0b1 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -304,7 +304,10 @@ static int ak4458_rstn_control(struct snd_soc_component *component, int bit)
AK4458_00_CONTROL1,
AK4458_RSTN_MASK,
0x0);
- return ret;
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static int ak4458_hw_params(struct snd_pcm_substream *substream,
@@ -536,9 +539,10 @@ static void ak4458_power_on(struct ak4458_priv *ak4458)
}
}
-static void ak4458_init(struct snd_soc_component *component)
+static int ak4458_init(struct snd_soc_component *component)
{
struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
+ int ret;
/* External Mute ON */
if (ak4458->mute_gpiod)
@@ -546,21 +550,21 @@ static void ak4458_init(struct snd_soc_component *component)
ak4458_power_on(ak4458);
- snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
+ ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
0x80, 0x80); /* ACKS bit = 1; 10000000 */
+ if (ret < 0)
+ return ret;
- ak4458_rstn_control(component, 1);
+ return ak4458_rstn_control(component, 1);
}
static int ak4458_probe(struct snd_soc_component *component)
{
struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
- ak4458_init(component);
-
ak4458->fs = 48000;
- return 0;
+ return ak4458_init(component);
}
static void ak4458_remove(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index ab27d2b94d02..c0190ec59e74 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,7 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = {
static bool cs4265_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
+ case CS4265_CHIP_ID ... CS4265_MAX_REGISTER:
return true;
default:
return false;
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index ebb9e0cf8364..28a4ac36c4f8 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
msleep(5);
regcache_cache_only(cs42xx8->regmap, false);
+ regcache_mark_dirty(cs42xx8->regmap);
ret = regcache_sync(cs42xx8->regmap);
if (ret) {
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 7619ea31ab50..ada8c25e643d 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1909,6 +1909,21 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
return 0;
}
+static int max98090_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+ unsigned int fmt = max98090->dai_fmt;
+
+ /* Remove 24-bit format support if it is not in right justified mode. */
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_RIGHT_J) {
+ substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(substream->runtime, 0, 16, 16);
+ }
+ return 0;
+}
+
static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -2316,6 +2331,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect);
#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops max98090_dai_ops = {
+ .startup = max98090_dai_startup,
.set_sysclk = max98090_dai_set_sysclk,
.set_fmt = max98090_dai_set_fmt,
.set_tdm_slot = max98090_set_tdm_slot,
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index adf59039a3b6..cdd312db3e78 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -405,6 +405,8 @@ static int rt274_mic_detect(struct snd_soc_component *component,
{
struct rt274_priv *rt274 = snd_soc_component_get_drvdata(component);
+ rt274->jack = jack;
+
if (jack == NULL) {
/* Disable jack detection */
regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL,
@@ -412,7 +414,6 @@ static int rt274_mic_detect(struct snd_soc_component *component,
return 0;
}
- rt274->jack = jack;
regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL,
RT274_IRQ_EN, RT274_IRQ_EN);
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 9a037108b1ae..a746e11ccfe3 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2882,6 +2882,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
RT5670_DEV_GPIO |
RT5670_JD_MODE3),
},
+ {
+ .callback = rt5670_quirk_cb,
+ .ident = "Aegex 10 tablet (RU2)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AEGEX"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RU2"),
+ },
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC2_INR |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE3),
+ },
{}
};
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 84b6bd8b50e1..a4dfa0345c6e 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -101,7 +101,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
u32 word_size = min_t(u32, dstlen, 8);
for (w = 0; w < dstlen; w += word_size) {
- for (i = 0; i < word_size; i++) {
+ for (i = 0; i < word_size && i + w < dstlen; i++) {
si = w + word_size - i - 1;
dst[w + i] = si < srclen ? src[si] : 0;
}
@@ -152,8 +152,9 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
status |= spi_sync(g_spi, &m);
mutex_unlock(&spi_mutex);
+
/* Copy data back to caller buffer */
- rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
+ rt5677_spi_reverse(cb + offset, len - offset, body, t[1].len);
}
return status;
}
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 0b937924d2e4..ea035c12a325 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
return -EINVAL;
}
- if ((outrate > 8000 && outrate < 30000) &&
- (outrate/inrate > 24 || inrate/outrate > 8)) {
+ if ((outrate >= 8000 && outrate <= 30000) &&
+ (outrate > 24 * inrate || inrate > 8 * outrate)) {
pair_err("exceed supported ratio range [1/24, 8] for \
inrate/outrate: %d/%d\n", inrate, outrate);
return -EINVAL;
diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c
index 1ec298dd0e4f..13db2854db3e 100644
--- a/sound/soc/intel/atom/sst/sst_pvt.c
+++ b/sound/soc/intel/atom/sst/sst_pvt.c
@@ -158,11 +158,11 @@ int sst_create_ipc_msg(struct ipc_post **arg, bool large)
{
struct ipc_post *msg;
- msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg)
return -ENOMEM;
if (large) {
- msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
+ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
if (!msg->mailbox_data) {
kfree(msg);
return -ENOMEM;
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 1f00d473793f..2fe1ce879123 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -487,6 +487,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
}
/* override plaform name, if required */
+ byt_cht_es8316_card.dev = dev;
platform_name = mach->mach_params.platform;
ret = snd_soc_fixup_dai_links_platform_name(&byt_cht_es8316_card,
@@ -567,7 +568,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
(quirk & BYT_CHT_ES8316_MONO_SPEAKER) ? "mono" : "stereo",
mic_name[BYT_CHT_ES8316_MAP(quirk)]);
byt_cht_es8316_card.long_name = long_name;
- byt_cht_es8316_card.dev = dev;
snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
ret = devm_snd_soc_register_card(dev, &byt_cht_es8316_card);
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d72623f508b7..613b37172441 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -446,6 +446,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* override plaform name, if required */
+ snd_soc_card_cht.dev = &pdev->dev;
mach = (&pdev->dev)->platform_data;
platform_name = mach->mach_params.platform;
@@ -455,7 +456,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
return ret_val;
/* register the soc card */
- snd_soc_card_cht.dev = &pdev->dev;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
diff --git a/sound/soc/intel/boards/cht_bsw_nau8824.c b/sound/soc/intel/boards/cht_bsw_nau8824.c
index 1c17f82fc922..b0d658e3d3f7 100644
--- a/sound/soc/intel/boards/cht_bsw_nau8824.c
+++ b/sound/soc/intel/boards/cht_bsw_nau8824.c
@@ -249,6 +249,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
/* override plaform name, if required */
+ snd_soc_card_cht.dev = &pdev->dev;
mach = (&pdev->dev)->platform_data;
platform_name = mach->mach_params.platform;
@@ -258,7 +259,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
return ret_val;
/* register the soc card */
- snd_soc_card_cht.dev = &pdev->dev;
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
if (ret_val) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 1731cc0fac25..028e571f6a77 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -418,6 +418,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* override plaform name, if required */
+ snd_soc_card_cht.dev = &pdev->dev;
platform_name = mach->mach_params.platform;
ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
@@ -435,7 +436,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
/* register the soc card */
- snd_soc_card_cht.dev = &pdev->dev;
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
if (ret_val) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index f28fb98cc306..3343dbcd506f 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -29,9 +29,10 @@
#define SOF_RT5682_MCLK_EN BIT(3)
#define SOF_RT5682_MCLK_24MHZ BIT(4)
#define SOF_SPEAKER_AMP_PRESENT BIT(5)
-#define SOF_RT5682_SSP_AMP(quirk) ((quirk) & GENMASK(8, 6))
-#define SOF_RT5682_SSP_AMP_MASK (GENMASK(8, 6))
#define SOF_RT5682_SSP_AMP_SHIFT 6
+#define SOF_RT5682_SSP_AMP_MASK (GENMASK(8, 6))
+#define SOF_RT5682_SSP_AMP(quirk) \
+ (((quirk) << SOF_RT5682_SSP_AMP_SHIFT) & SOF_RT5682_SSP_AMP_MASK)
/* Default: MCLK on, MCLK 19.2M, SSP0 */
static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
@@ -144,9 +145,9 @@ static int sof_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
jack = &ctx->sof_headset;
snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
ret = snd_soc_component_set_jack(component, jack, NULL);
if (ret) {
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index abd34fa27749..55e80c3d2af0 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -13,6 +13,7 @@ static unsigned long byt_machine_id;
#define BYT_THINKPAD_10 1
#define BYT_POV_P1006W 2
+#define BYT_AEGEX_10 3
static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
{
@@ -26,6 +27,12 @@ static int byt_pov_p1006w_quirk_cb(const struct dmi_system_id *id)
return 1;
}
+static int byt_aegex10_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_machine_id = BYT_AEGEX_10;
+ return 1;
+}
+
static const struct dmi_system_id byt_table[] = {
{
.callback = byt_thinkpad10_quirk_cb,
@@ -66,9 +73,18 @@ static const struct dmi_system_id byt_table[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
},
},
+ {
+ /* Aegex 10 tablet (RU2) */
+ .callback = byt_aegex10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AEGEX"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RU2"),
+ },
+ },
{ }
};
+/* The Thinkapd 10 and Aegex 10 tablets have the same ID problem */
static struct snd_soc_acpi_mach byt_thinkpad_10 = {
.id = "10EC5640",
.drv_name = "cht-bsw-rt5672",
@@ -95,6 +111,7 @@ static struct snd_soc_acpi_mach *byt_quirk(void *arg)
switch (byt_machine_id) {
case BYT_THINKPAD_10:
+ case BYT_AEGEX_10:
return &byt_thinkpad_10;
case BYT_POV_P1006W:
return &byt_pov_p1006w;
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index df7c52cad5c3..c36c0aa4f683 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -29,17 +29,17 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
.sof_tplg_filename = "sof-cnl-rt274.tplg",
},
{
- .id = "10EC5682",
+ .id = "MX98357A",
.drv_name = "sof_rt5682",
+ .quirk_data = &cml_codecs,
.sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682.tplg",
+ .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
},
{
- .id = "MX98357A",
+ .id = "10EC5682",
.drv_name = "sof_rt5682",
- .quirk_data = &cml_codecs,
.sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
+ .sof_tplg_filename = "sof-cml-rt5682.tplg",
},
{},
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 933ab51af05b..111e44b64b38 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -133,7 +133,7 @@ config SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A
config SND_SOC_MT8183_DA7219_MAX98357A
tristate "ASoC Audio driver for MT8183 with DA7219 MAX98357A codec"
- depends on SND_SOC_MT8183
+ depends on SND_SOC_MT8183 && I2C
select SND_SOC_MT6358
select SND_SOC_MAX98357A
select SND_SOC_DA7219
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 2403bec2fccf..41c0cfaf2db5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -228,7 +228,10 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
{
+ if (!card->debugfs_card_root)
+ return;
debugfs_remove_recursive(card->debugfs_card_root);
+ card->debugfs_card_root = NULL;
}
static void snd_soc_debugfs_init(void)
@@ -2037,8 +2040,10 @@ match:
static int soc_cleanup_card_resources(struct snd_soc_card *card)
{
/* free the ALSA card at first; this syncs with pending operations */
- if (card->snd_card)
+ if (card->snd_card) {
snd_card_free(card->snd_card);
+ card->snd_card = NULL;
+ }
/* remove and free each DAI */
soc_remove_dai_links(card);
@@ -2065,6 +2070,16 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
int ret, i, order;
mutex_lock(&client_mutex);
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = soc_init_dai_link(card, dai_link);
+ if (ret) {
+ soc_cleanup_platform(card);
+ dev_err(card->dev, "ASoC: failed to init link %s: %d\n",
+ dai_link->name, ret);
+ mutex_unlock(&client_mutex);
+ return ret;
+ }
+ }
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
card->dapm.bias_level = SND_SOC_BIAS_OFF;
@@ -2789,26 +2804,9 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
*/
int snd_soc_register_card(struct snd_soc_card *card)
{
- int i, ret;
- struct snd_soc_dai_link *link;
-
if (!card->name || !card->dev)
return -EINVAL;
- mutex_lock(&client_mutex);
- for_each_card_prelinks(card, i, link) {
-
- ret = soc_init_dai_link(card, link);
- if (ret) {
- soc_cleanup_platform(card);
- dev_err(card->dev, "ASoC: failed to init link %s\n",
- link->name);
- mutex_unlock(&client_mutex);
- return ret;
- }
- }
- mutex_unlock(&client_mutex);
-
dev_set_drvdata(card->dev, card);
snd_soc_initialize_card_lists(card);
@@ -2839,12 +2837,14 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
snd_soc_dapm_shutdown(card);
snd_soc_flush_all_delayed_work(card);
+ mutex_lock(&client_mutex);
/* remove all components used by DAI links on this card */
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
soc_remove_link_components(card, rtd, order);
}
}
+ mutex_unlock(&client_mutex);
soc_cleanup_card_resources(card);
if (!unregister)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 81a7a12196ff..55f8278077f4 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2193,7 +2193,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
{
+ if (!dapm->debugfs_dapm)
+ return;
debugfs_remove_recursive(dapm->debugfs_dapm);
+ dapm->debugfs_dapm = NULL;
}
#else
@@ -3831,8 +3834,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
ret);
goto out;
}
- source->active++;
}
+ source->active++;
ret = soc_dai_hw_params(&substream, params, source);
if (ret < 0)
goto out;
@@ -3853,8 +3856,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
ret);
goto out;
}
- sink->active++;
}
+ sink->active++;
ret = soc_dai_hw_params(&substream, params, sink);
if (ret < 0)
goto out;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 0a4f60c7a188..c46ad0f66292 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2479,7 +2479,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 048c7b034d70..71d87a86f060 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -45,7 +45,10 @@ config SND_SOC_SOF_OPTIONS
if SND_SOC_SOF_OPTIONS
config SND_SOC_SOF_NOCODEC
- tristate "SOF nocodec mode Support"
+ tristate
+
+config SND_SOC_SOF_NOCODEC_SUPPORT
+ bool "SOF nocodec mode support"
help
This adds support for a dummy/nocodec machine driver fallback
option if no known codec is detected. This is typically only
@@ -81,7 +84,7 @@ if SND_SOC_SOF_DEBUG
config SND_SOC_SOF_FORCE_NOCODEC_MODE
bool "SOF force nocodec Mode"
- depends on SND_SOC_SOF_NOCODEC
+ depends on SND_SOC_SOF_NOCODEC_SUPPORT
help
This forces SOF to use dummy/nocodec as machine driver, even
though there is a codec detected on the real platform. This is
@@ -136,6 +139,7 @@ endif ## SND_SOC_SOF_OPTIONS
config SND_SOC_SOF
tristate
select SND_SOC_TOPOLOGY
+ select SND_SOC_SOF_NOCODEC if SND_SOC_SOF_NOCODEC_SUPPORT
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index 11762c4580f1..84e2cbfbbcbb 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -349,6 +349,7 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
struct snd_sof_dev *sdev = scontrol->sdev;
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
struct sof_abi_hdr *data = cdata->data;
+ size_t size = data->size + sizeof(*data);
int ret, err;
if (be->max > sizeof(ucontrol->value.bytes.data)) {
@@ -358,10 +359,10 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- if (data->size > be->max) {
+ if (size > be->max) {
dev_err_ratelimited(sdev->dev,
- "error: size too big %d bytes max is %d\n",
- data->size, be->max);
+ "error: size too big %zu bytes max is %d\n",
+ size, be->max);
return -EINVAL;
}
@@ -375,7 +376,7 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
}
/* copy from kcontrol */
- memcpy(data, ucontrol->value.bytes.data, data->size);
+ memcpy(data, ucontrol->value.bytes.data, size);
/* notify DSP of byte control updates */
snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 32105e0fabe8..5beda47cdf9f 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -382,7 +382,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
if (IS_ERR(plat_data->pdev_mach)) {
ret = PTR_ERR(plat_data->pdev_mach);
- goto comp_err;
+ goto fw_run_err;
}
dev_dbg(sdev->dev, "created machine %s\n",
@@ -393,8 +393,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
return 0;
-comp_err:
- snd_soc_unregister_component(sdev->dev);
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
fw_run_err:
snd_sof_fw_unload(sdev);
fw_load_err:
@@ -403,6 +402,21 @@ ipc_err:
snd_sof_free_debug(sdev);
dbg_err:
snd_sof_remove(sdev);
+#else
+
+ /*
+ * when the probe_continue is handled in a work queue, the
+ * probe does not fail so we don't release resources here.
+ * They will be released with an explicit call to
+ * snd_sof_device_remove() when the PCI/ACPI device is removed
+ */
+
+fw_run_err:
+fw_load_err:
+ipc_err:
+dbg_err:
+
+#endif
return ret;
}
@@ -484,7 +498,6 @@ int snd_sof_device_remove(struct device *dev)
snd_sof_ipc_free(sdev);
snd_sof_free_debug(sdev);
snd_sof_free_trace(sdev);
- snd_sof_remove(sdev);
/*
* Unregister machine driver. This will unbind the snd_card which
@@ -494,6 +507,14 @@ int snd_sof_device_remove(struct device *dev)
if (!IS_ERR_OR_NULL(pdata->pdev_mach))
platform_device_unregister(pdata->pdev_mach);
+ /*
+ * Unregistering the machine driver results in unloading the topology.
+ * Some widgets, ex: scheduler, attempt to power down the core they are
+ * scheduled on, when they are unloaded. Therefore, the DSP must be
+ * removed only after the topology has been unloaded.
+ */
+ snd_sof_remove(sdev);
+
/* release firmware */
release_firmware(pdata->fw);
pdata->fw = NULL;
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 065cb868bdfa..70d524ef9bc0 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -220,17 +220,20 @@ static void bdw_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
- /* first read regsisters */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read registers */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
/* then get panic info */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
- panic_info, sizeof(*panic_info));
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* then get the stack */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
- sizeof(*panic_info), stack,
- stack_words * sizeof(u32));
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
}
static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
@@ -283,6 +286,8 @@ static irqreturn_t bdw_irq_thread(int irq, void *context)
SHIM_IMRX, SHIM_IMRX_DONE,
SHIM_IMRX_DONE);
+ spin_lock_irq(&sdev->ipc_lock);
+
/*
* handle immediate reply from DSP core. If the msg is
* found, set done bit in cmd_done which is called at the
@@ -294,6 +299,8 @@ static irqreturn_t bdw_irq_thread(int irq, void *context)
snd_sof_ipc_reply(sdev, ipcx);
bdw_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
}
ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
@@ -485,7 +492,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
{
struct snd_sof_ipc_msg *msg = sdev->msg;
struct sof_ipc_reply reply;
- unsigned long flags;
int ret = 0;
/*
@@ -501,8 +507,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
/* get reply */
sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
- spin_lock_irqsave(&sdev->ipc_lock, flags);
-
if (reply.error < 0) {
memcpy(msg->reply_data, &reply, sizeof(reply));
ret = reply.error;
@@ -521,8 +525,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
}
msg->reply_error = ret;
-
- spin_unlock_irqrestore(&sdev->ipc_lock, flags);
}
static void bdw_host_done(struct snd_sof_dev *sdev)
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index 7bf9143d3106..39d1ae01c45d 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -265,17 +265,20 @@ static void byt_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
+ u32 offset = sdev->dsp_oops_offset;
+
/* first read regsisters */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
/* then get panic info */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
- panic_info, sizeof(*panic_info));
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* then get the stack */
- sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
- sizeof(*panic_info), stack,
- stack_words * sizeof(u32));
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
}
static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
@@ -329,6 +332,9 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
SHIM_IMRX,
SHIM_IMRX_DONE,
SHIM_IMRX_DONE);
+
+ spin_lock_irq(&sdev->ipc_lock);
+
/*
* handle immediate reply from DSP core. If the msg is
* found, set done bit in cmd_done which is called at the
@@ -340,6 +346,8 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
snd_sof_ipc_reply(sdev, ipcx);
byt_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
}
/* new message from DSP */
@@ -383,7 +391,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
{
struct snd_sof_ipc_msg *msg = sdev->msg;
struct sof_ipc_reply reply;
- unsigned long flags;
int ret = 0;
/*
@@ -399,8 +406,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
/* get reply */
sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
- spin_lock_irqsave(&sdev->ipc_lock, flags);
-
if (reply.error < 0) {
memcpy(msg->reply_data, &reply, sizeof(reply));
ret = reply.error;
@@ -419,8 +424,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
}
msg->reply_error = ret;
-
- spin_unlock_irqrestore(&sdev->ipc_lock, flags);
}
static void byt_host_done(struct snd_sof_dev *sdev)
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index 08a1a3d3c08d..b2eba7adcad8 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -64,6 +64,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
CNL_DSP_REG_HIPCCTL,
CNL_DSP_REG_HIPCCTL_DONE, 0);
+ spin_lock_irq(&sdev->ipc_lock);
+
/* handle immediate reply from DSP core */
hda_dsp_ipc_get_reply(sdev);
snd_sof_ipc_reply(sdev, msg);
@@ -75,6 +77,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
cnl_ipc_dsp_done(sdev);
+ spin_unlock_irq(&sdev->ipc_lock);
+
ret = IRQ_HANDLED;
}
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
index 2c3645736e1f..07bc123112c9 100644
--- a/sound/soc/sof/intel/hda-ctrl.c
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -161,21 +161,105 @@ int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
return 0;
}
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-/*
- * While performing reset, controller may not come back properly and causing
- * issues, so recommendation is to set CGCTL.MISCBDCGE to 0 then do reset
- * (init chip) and then again set CGCTL.MISCBDCGE to 1
- */
int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
{
struct hdac_bus *bus = sof_to_bus(sdev);
- int ret;
+ struct hdac_stream *stream;
+ int sd_offset, ret = 0;
+
+ if (bus->chip_init)
+ return 0;
hda_dsp_ctrl_misc_clock_gating(sdev, false);
- ret = snd_hdac_bus_init_chip(bus, full_reset);
+
+ if (full_reset) {
+ /* clear WAKESTS */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+ /* reset HDA controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+ return ret;
+ }
+
+ usleep_range(500, 1000);
+
+ /* exit HDA controller reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ return ret;
+ }
+
+ usleep_range(1000, 1200);
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check to see if controller is ready */
+ if (!snd_hdac_chip_readb(bus, GCTL)) {
+ dev_dbg(bus->dev, "controller not ready!\n");
+ return -EBUSY;
+ }
+
+ /* Accept unsolicited responses */
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+
+ /* detect codecs */
+ if (!bus->codec_mask) {
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+ }
+#endif
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb status */
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* initialize the codec command I/O */
+ snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+ /* enable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* program the position buffer */
+ if (bus->use_posbuf && bus->posbuf.addr) {
+ snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
+ snd_hdac_chip_writel(bus, DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ }
+#endif
+
+ bus->chip_init = true;
+
hda_dsp_ctrl_misc_clock_gating(sdev, true);
return ret;
}
-#endif
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 73ead7070cde..51b285103394 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -72,7 +72,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
struct snd_sof_ipc_msg *msg = sdev->msg;
struct sof_ipc_reply reply;
struct sof_ipc_cmd_hdr *hdr;
- unsigned long flags;
int ret = 0;
/*
@@ -84,7 +83,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
return;
}
- spin_lock_irqsave(&sdev->ipc_lock, flags);
hdr = msg->msg_data;
if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE)) {
@@ -123,7 +121,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
out:
msg->reply_error = ret;
- spin_unlock_irqrestore(&sdev->ipc_lock, flags);
}
static bool hda_dsp_ipc_is_sof(uint32_t msg)
@@ -172,6 +169,18 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_DONE, 0);
+ /*
+ * Make sure the interrupt thread cannot be preempted between
+ * waking up the sender and re-enabling the interrupt. Also
+ * protect against a theoretical race with sof_ipc_tx_message():
+ * if the DSP is fast enough to receive an IPC message, reply to
+ * it, and the host interrupt processing calls this function on
+ * a different core from the one, where the sending is taking
+ * place, the message might not yet be marked as expecting a
+ * reply.
+ */
+ spin_lock_irq(&sdev->ipc_lock);
+
/* handle immediate reply from DSP core - ignore ROM messages */
if (hda_dsp_ipc_is_sof(msg)) {
hda_dsp_ipc_get_reply(sdev);
@@ -187,6 +196,8 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
/* set the done bit */
hda_dsp_ipc_dsp_done(sdev);
+ spin_unlock_irq(&sdev->ipc_lock);
+
ret = IRQ_HANDLED;
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 7e3980a2f7ba..faf1a8ada091 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -108,17 +108,21 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
+ u32 offset = sdev->dsp_oops_offset;
+
/* first read registers */
- sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset, xoops,
- sizeof(*xoops));
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
/* then get panic info */
- sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
- sizeof(*xoops), panic_info, sizeof(*panic_info));
+ offset += xoops->arch_hdr.totalsize;
+ sof_block_read(sdev, sdev->mmio_bar, offset,
+ panic_info, sizeof(*panic_info));
/* then get the stack */
- sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
- sizeof(*xoops) + sizeof(*panic_info), stack,
+ offset += sizeof(*panic_info);
+ sof_block_read(sdev, sdev->mmio_bar, offset, stack,
stack_words * sizeof(u32));
}
@@ -223,7 +227,9 @@ static int hda_init(struct snd_sof_dev *sdev)
/* initialise hdac bus */
bus->addr = pci_resource_start(pci, 0);
+#if IS_ENABLED(CONFIG_PCI)
bus->remap_addr = pci_ioremap_bar(pci, 0);
+#endif
if (!bus->remap_addr) {
dev_err(bus->dev, "error: ioremap error\n");
return -ENXIO;
@@ -264,9 +270,12 @@ static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
return tplg_filename;
}
+#endif
+
static int hda_init_caps(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
struct hdac_ext_link *hlink;
struct snd_soc_acpi_mach_params *mach_params;
struct snd_soc_acpi_mach *hda_mach;
@@ -274,8 +283,9 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
struct snd_soc_acpi_mach *mach;
const char *tplg_filename;
int codec_num = 0;
- int ret = 0;
int i;
+#endif
+ int ret = 0;
device_disable_async_suspend(bus->dev);
@@ -283,6 +293,14 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
if (bus->ppcap)
dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: init chip failed with ret: %d\n",
+ ret);
+ return ret;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(bus);
@@ -293,12 +311,6 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
return ret;
}
- ret = hda_dsp_ctrl_init_chip(sdev, true);
- if (ret < 0) {
- dev_err(bus->dev, "error: init chip failed with ret: %d\n", ret);
- goto out;
- }
-
/* codec detection */
if (!bus->codec_mask) {
dev_info(bus->dev, "no hda codecs found!\n");
@@ -339,8 +351,10 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
/* use local variable for readability */
tplg_filename = pdata->tplg_filename;
tplg_filename = fixup_tplg_name(sdev, tplg_filename);
- if (!tplg_filename)
- goto out;
+ if (!tplg_filename) {
+ hda_codec_i915_exit(sdev);
+ return ret;
+ }
pdata->tplg_filename = tplg_filename;
}
}
@@ -364,35 +378,10 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
*/
list_for_each_entry(hlink, &bus->hlink_list, list)
snd_hdac_ext_bus_link_put(bus, hlink);
-
- return 0;
-
-out:
- hda_codec_i915_exit(sdev);
- return ret;
-}
-
-#else
-
-static int hda_init_caps(struct snd_sof_dev *sdev)
-{
- /*
- * set CGCTL.MISCBDCGE to 0 during reset and set back to 1
- * when reset finished.
- * TODO: maybe no need for init_caps?
- */
- hda_dsp_ctrl_misc_clock_gating(sdev, 0);
-
- /* clear WAKESTS */
- snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
- SOF_HDA_WAKESTS_INT_MASK,
- SOF_HDA_WAKESTS_INT_MASK);
-
+#endif
return 0;
}
-#endif
-
static const struct sof_intel_dsp_desc
*get_chip_info(struct snd_sof_pdata *pdata)
{
@@ -409,9 +398,8 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
struct pci_dev *pci = to_pci_dev(sdev->dev);
struct sof_intel_hda_dev *hdev;
struct hdac_bus *bus;
- struct hdac_stream *stream;
const struct sof_intel_dsp_desc *chip;
- int sd_offset, ret = 0;
+ int ret = 0;
/*
* detect DSP by checking class/subclass/prog-id information
@@ -468,7 +456,9 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
goto hdac_bus_unmap;
/* DSP base */
+#if IS_ENABLED(CONFIG_PCI)
sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+#endif
if (!sdev->bar[HDA_DSP_BAR]) {
dev_err(sdev->dev, "error: ioremap error\n");
ret = -ENXIO;
@@ -558,56 +548,9 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
if (ret < 0)
goto free_ipc_irq;
- /* reset HDA controller */
- ret = hda_dsp_ctrl_link_reset(sdev, true);
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to reset HDA controller\n");
- goto free_ipc_irq;
- }
-
- /* exit HDA controller reset */
- ret = hda_dsp_ctrl_link_reset(sdev, false);
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
- goto free_ipc_irq;
- }
-
- /* clear stream status */
- list_for_each_entry(stream, &bus->stream_list, list) {
- sd_offset = SOF_STREAM_SD_OFFSET(stream);
- snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
- sd_offset +
- SOF_HDA_ADSP_REG_CL_SD_STS,
- SOF_HDA_CL_DMA_SD_INT_MASK,
- SOF_HDA_CL_DMA_SD_INT_MASK);
- }
-
- /* clear WAKESTS */
- snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
- SOF_HDA_WAKESTS_INT_MASK,
- SOF_HDA_WAKESTS_INT_MASK);
-
- /* clear interrupt status register */
- snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
- SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
-
- /* enable CIE and GIE interrupts */
- snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
- SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
- SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
-
- /* re-enable CGCTL.MISCBDCGE after reset */
- hda_dsp_ctrl_misc_clock_gating(sdev, true);
-
- device_disable_async_suspend(&pci->dev);
-
- /* enable DSP features */
- snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
- SOF_HDA_PPCTL_GPROCEN, SOF_HDA_PPCTL_GPROCEN);
-
- /* enable DSP IRQ */
- snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
- SOF_HDA_PPCTL_PIE, SOF_HDA_PPCTL_PIE);
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
/* initialize waitq for code loading */
init_waitqueue_head(&sdev->waitq);
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index f0b9d3c53f6f..2414640a32d1 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -115,7 +115,7 @@ static void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
}
break;
case SOF_IPC_GLB_COMP_MSG:
- str = "GLB_COMP_MSG: SET_VALUE";
+ str = "GLB_COMP_MSG";
switch (type) {
case SOF_IPC_COMP_SET_VALUE:
str2 = "SET_VALUE"; break;
@@ -308,19 +308,8 @@ EXPORT_SYMBOL(sof_ipc_tx_message);
int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
{
struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
- unsigned long flags;
-
- /*
- * Protect against a theoretical race with sof_ipc_tx_message(): if the
- * DSP is fast enough to receive an IPC message, reply to it, and the
- * host interrupt processing calls this function on a different core
- * from the one, where the sending is taking place, the message might
- * not yet be marked as expecting a reply.
- */
- spin_lock_irqsave(&sdev->ipc_lock, flags);
if (msg->ipc_complete) {
- spin_unlock_irqrestore(&sdev->ipc_lock, flags);
dev_err(sdev->dev, "error: no reply expected, received 0x%x",
msg_id);
return -EINVAL;
@@ -330,8 +319,6 @@ int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
msg->ipc_complete = true;
wake_up(&msg->waitq);
- spin_unlock_irqrestore(&sdev->ipc_lock, flags);
-
return 0;
}
EXPORT_SYMBOL(snd_sof_ipc_reply);
@@ -776,16 +763,19 @@ int snd_sof_ipc_valid(struct snd_sof_dev *sdev)
}
}
- if (ready->debug.bits.build) {
+ if (ready->flags & SOF_IPC_INFO_BUILD) {
dev_info(sdev->dev,
"Firmware debug build %d on %s-%s - options:\n"
" GDB: %s\n"
" lock debug: %s\n"
" lock vdebug: %s\n",
v->build, v->date, v->time,
- ready->debug.bits.gdb ? "enabled" : "disabled",
- ready->debug.bits.locks ? "enabled" : "disabled",
- ready->debug.bits.locks_verbose ? "enabled" : "disabled");
+ ready->flags & SOF_IPC_INFO_GDB ?
+ "enabled" : "disabled",
+ ready->flags & SOF_IPC_INFO_LOCKS ?
+ "enabled" : "disabled",
+ ready->flags & SOF_IPC_INFO_LOCKSV ?
+ "enabled" : "disabled");
}
/* copy the fw_version into debugfs at first boot */
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index 81c7452aae17..628fae552442 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -372,6 +372,8 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev)
msecs_to_jiffies(sdev->boot_timeout));
if (ret == 0) {
dev_err(sdev->dev, "error: firmware boot failure\n");
+ /* after this point FW_READY msg should be ignored */
+ sdev->boot_complete = true;
snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
SOF_DBG_TEXT | SOF_DBG_PCI);
return -EIO;
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 649968841dad..dace6c4cd91e 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -211,8 +211,8 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
/* save pcm hw_params */
memcpy(&spcm->params[substream->stream], params, sizeof(*params));
- INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
- sof_pcm_period_elapsed_work);
+ /* clear hw_params_upon_resume flag */
+ spcm->hw_params_upon_resume[substream->stream] = 0;
return ret;
}
@@ -429,8 +429,8 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
dev_dbg(sdev->dev, "pcm: open stream %d dir %d\n", spcm->pcm.pcm_id,
substream->stream);
- /* clear hw_params_upon_resume flag */
- spcm->hw_params_upon_resume[substream->stream] = 0;
+ INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
+ sof_pcm_period_elapsed_work);
caps = &spcm->pcm.caps[substream->stream];
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index c3ad23a85b99..46a4905a9dce 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -110,7 +110,7 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
u32 stack_words)
{
struct sof_ipc_dsp_oops_xtensa *xoops = oops;
- u32 stack_ptr = xoops->stack;
+ u32 stack_ptr = xoops->plat_hdr.stackptr;
/* 4 * 8chars + 3 ws + 1 terminating NUL */
unsigned char buf[4 * 8 + 3 + 1];
int i;
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index f2deffe026c5..9e1f00e8c32b 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1320,6 +1320,15 @@ static int sun4i_codec_spk_event(struct snd_soc_dapm_widget *w,
gpiod_set_value_cansleep(scodec->gpio_pa,
!!SND_SOC_DAPM_EVENT_ON(event));
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /*
+ * Need a delay to wait for DAC to push the data. 700ms seems
+ * to be the best compromise not to feel this delay while
+ * playing a sound.
+ */
+ msleep(700);
+ }
+
return 0;
}
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index c53bfed8d4c2..fd7c37596f21 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -106,7 +106,7 @@
#define SUN8I_I2S_TX_CHAN_MAP_REG 0x44
#define SUN8I_I2S_TX_CHAN_SEL_REG 0x34
-#define SUN8I_I2S_TX_CHAN_OFFSET_MASK GENMASK(13, 11)
+#define SUN8I_I2S_TX_CHAN_OFFSET_MASK GENMASK(13, 12)
#define SUN8I_I2S_TX_CHAN_OFFSET(offset) (offset << 12)
#define SUN8I_I2S_TX_CHAN_EN_MASK GENMASK(11, 4)
#define SUN8I_I2S_TX_CHAN_EN(num_chan) (((1 << num_chan) - 1) << 4)
@@ -456,6 +456,10 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
regmap_update_bits(i2s->regmap, SUN8I_I2S_TX_CHAN_SEL_REG,
SUN8I_I2S_TX_CHAN_OFFSET_MASK,
SUN8I_I2S_TX_CHAN_OFFSET(offset));
+
+ regmap_update_bits(i2s->regmap, SUN8I_I2S_RX_CHAN_SEL_REG,
+ SUN8I_I2S_TX_CHAN_OFFSET_MASK,
+ SUN8I_I2S_TX_CHAN_OFFSET(offset));
}
regmap_field_write(i2s->field_fmt_mode, val);
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 280015c22598..076df22e4bda 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -100,7 +100,9 @@ static void nfit_test_kill(void *_pgmap)
{
struct dev_pagemap *pgmap = _pgmap;
+ WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup);
pgmap->kill(pgmap->ref);
+ pgmap->cleanup(pgmap->ref);
}
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)