summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml6
-rw-r--r--Documentation/driver-api/media/drivers/vidtv.rst120
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arc/include/asm/bitops.h4
-rw-r--r--arch/arc/kernel/stacktrace.c56
-rw-r--r--arch/arc/mm/tlb.c24
-rw-r--r--arch/arm64/include/asm/pgtable.h34
-rw-r--r--arch/arm64/include/asm/probes.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S5
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c22
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S13
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c7
-rw-r--r--arch/s390/kernel/asm-offsets.c10
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/uv.c9
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/pv.c3
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kernel/tboot.c5
-rw-r--r--arch/x86/kvm/irq.c85
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/svm/sev.c2
-rw-r--r--arch/x86/kvm/svm/svm.c4
-rw-r--r--arch/x86/kvm/x86.c18
-rw-r--r--block/keyslot-manager.c7
-rw-r--r--drivers/acpi/arm64/iort.c8
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c17
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c60
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_request.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c61
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h11
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c244
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c68
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h16
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h6
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c13
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c37
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c10
-rw-r--r--drivers/iommu/amd/init.c27
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c4
-rw-r--r--drivers/iommu/intel/dmar.c3
-rw-r--r--drivers/iommu/intel/iommu.c4
-rw-r--r--drivers/iommu/iommu.c10
-rw-r--r--drivers/media/platform/Kconfig28
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c174
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h7
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h52
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c73
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c110
-rw-r--r--drivers/media/platform/qcom/venus/core.h15
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc.c31
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c14
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c116
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.h4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c312
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.h11
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_common.h1
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.h11
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_encoder.h9
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_mux.c248
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_mux.h21
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_pes.c179
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_pes.h8
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c1521
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.h282
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.c125
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.h9
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.h5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.h1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c12
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c11
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c24
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c8
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c29
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c30
-rw-r--r--drivers/mtd/nand/raw/gpio.c11
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c23
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c26
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c19
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c19
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c19
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c20
-rw-r--r--drivers/mtd/nand/raw/r852.c40
-rw-r--r--drivers/mtd/nand/raw/r852.h1
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c32
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c21
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c33
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c14
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c18
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/intel-vbtn.c6
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c13
-rw-r--r--drivers/platform/x86/toshiba_acpi.c3
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c50
-rw-r--r--drivers/spi/spi-dw-core.c3
-rw-r--r--drivers/spi/spi-imx.c1
-rw-r--r--drivers/spi/spi-nxp-fspi.c7
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c7
-rw-r--r--fs/btrfs/ctree.h5
-rw-r--r--fs/btrfs/file.c57
-rw-r--r--fs/btrfs/inode.c58
-rw-r--r--fs/btrfs/qgroup.c88
-rw-r--r--fs/btrfs/tests/inode-tests.c12
-rw-r--r--fs/btrfs/tree-checker.c3
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/cifs/smb2ops.c88
-rw-r--r--include/linux/memcontrol.h28
-rw-r--r--include/trace/events/writeback.h8
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/printk/printk_ringbuffer.c2
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/page-writeback.c6
154 files changed, 3971 insertions, 1684 deletions
diff --git a/.mailmap b/.mailmap
index 505b3d771964..d9fb83d67055 100644
--- a/.mailmap
+++ b/.mailmap
@@ -290,6 +290,7 @@ Santosh Shilimkar <ssantosh@kernel.org>
Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
+Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
index 03a76729d26c..7ce06f9f9f8e 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
@@ -76,6 +76,12 @@ properties:
resets:
maxItems: 1
+ wifi-2.4ghz-coexistence:
+ type: boolean
+ description: >
+ Should the pixel frequencies in the WiFi frequencies range be
+ avoided?
+
required:
- compatible
- reg
diff --git a/Documentation/driver-api/media/drivers/vidtv.rst b/Documentation/driver-api/media/drivers/vidtv.rst
index 65115448c52d..673bdff919ea 100644
--- a/Documentation/driver-api/media/drivers/vidtv.rst
+++ b/Documentation/driver-api/media/drivers/vidtv.rst
@@ -149,11 +149,11 @@ vidtv_psi.[ch]
Because the generator is implemented in a separate file, it can be
reused elsewhere in the media subsystem.
- Currently vidtv supports working with 3 PSI tables: PAT, PMT and
- SDT.
+ Currently vidtv supports working with 5 PSI tables: PAT, PMT,
+ SDT, NIT and EIT.
The specification for PAT and PMT can be found in *ISO 13818-1:
- Systems*, while the specification for the SDT can be found in *ETSI
+ Systems*, while the specification for the SDT, NIT, EIT can be found in *ETSI
EN 300 468: Specification for Service Information (SI) in DVB
systems*.
@@ -197,6 +197,8 @@ vidtv_channel.[ch]
#. Their programs will be concatenated to populate the PAT
+ #. Their events will be concatenated to populate the EIT
+
#. For each program in the PAT, a PMT section will be created
#. The PMT section for a channel will be assigned its streams.
@@ -256,6 +258,42 @@ Using dvb-fe-tool
The first step to check whether the demod loaded successfully is to run::
$ dvb-fe-tool
+ Device Dummy demod for DVB-T/T2/C/S/S2 (/dev/dvb/adapter0/frontend0) capabilities:
+ CAN_FEC_1_2
+ CAN_FEC_2_3
+ CAN_FEC_3_4
+ CAN_FEC_4_5
+ CAN_FEC_5_6
+ CAN_FEC_6_7
+ CAN_FEC_7_8
+ CAN_FEC_8_9
+ CAN_FEC_AUTO
+ CAN_GUARD_INTERVAL_AUTO
+ CAN_HIERARCHY_AUTO
+ CAN_INVERSION_AUTO
+ CAN_QAM_16
+ CAN_QAM_32
+ CAN_QAM_64
+ CAN_QAM_128
+ CAN_QAM_256
+ CAN_QAM_AUTO
+ CAN_QPSK
+ CAN_TRANSMISSION_MODE_AUTO
+ DVB API Version 5.11, Current v5 delivery system: DVBC/ANNEX_A
+ Supported delivery systems:
+ DVBT
+ DVBT2
+ [DVBC/ANNEX_A]
+ DVBS
+ DVBS2
+ Frequency range for the current standard:
+ From: 51.0 MHz
+ To: 2.15 GHz
+ Step: 62.5 kHz
+ Tolerance: 29.5 MHz
+ Symbol rate ranges for the current standard:
+ From: 1.00 MBauds
+ To: 45.0 MBauds
This should return what is currently set up at the demod struct, i.e.::
@@ -314,7 +352,7 @@ For this, one should provide a configuration file known as a 'scan file',
here's an example::
[Channel]
- FREQUENCY = 330000000
+ FREQUENCY = 474000000
MODULATION = QAM/AUTO
SYMBOL_RATE = 6940000
INNER_FEC = AUTO
@@ -335,6 +373,14 @@ You can browse scan tables online here: `dvb-scan-tables
Assuming this channel is named 'channel.conf', you can then run::
$ dvbv5-scan channel.conf
+ dvbv5-scan ~/vidtv.conf
+ ERROR command BANDWIDTH_HZ (5) not found during retrieve
+ Cannot calc frequency shift. Either bandwidth/symbol-rate is unavailable (yet).
+ Scanning frequency #1 330000000
+ (0x00) Signal= -68.00dBm
+ Scanning frequency #2 474000000
+ Lock (0x1f) Signal= -34.45dBm C/N= 33.74dB UCB= 0
+ Service Beethoven, provider LinuxTV.org: digital television
For more information on dvb-scan, check its documentation online here:
`dvb-scan Documentation <https://www.linuxtv.org/wiki/index.php/Dvbscan>`_.
@@ -344,23 +390,38 @@ Using dvb-zap
dvbv5-zap is a command line tool that can be used to record MPEG-TS to disk. The
typical use is to tune into a channel and put it into record mode. The example
-below - which is taken from the documentation - illustrates that::
+below - which is taken from the documentation - illustrates that\ [1]_::
- $ dvbv5-zap -c dvb_channel.conf "trilhas sonoras" -r
- using demux '/dev/dvb/adapter0/demux0'
+ $ dvbv5-zap -c dvb_channel.conf "beethoven" -o music.ts -P -t 10
+ using demux 'dvb0.demux0'
reading channels from file 'dvb_channel.conf'
- service has pid type 05: 204
- tuning to 573000000 Hz
- audio pid 104
- dvb_set_pesfilter 104
- Lock (0x1f) Quality= Good Signal= 100.00% C/N= -13.80dB UCB= 70 postBER= 3.14x10^-3 PER= 0
- DVR interface '/dev/dvb/adapter0/dvr0' can now be opened
+ tuning to 474000000 Hz
+ pass all PID's to TS
+ dvb_set_pesfilter 8192
+ dvb_dev_set_bufsize: buffer set to 6160384
+ Lock (0x1f) Quality= Good Signal= -34.66dBm C/N= 33.41dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
+ Lock (0x1f) Quality= Good Signal= -34.57dBm C/N= 33.46dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
+ Record to file 'music.ts' started
+ received 24587768 bytes (2401 Kbytes/sec)
+ Lock (0x1f) Quality= Good Signal= -34.42dBm C/N= 33.89dB UCB= 0 postBER= 0 preBER= 2.44x10^-3 PER= 0
+
+.. [1] In this example, it records 10 seconds with all program ID's stored
+ at the music.ts file.
+
-The channel can be watched by playing the contents of the DVR interface, with
-some player that recognizes the MPEG-TS format, such as *mplayer* or *vlc*.
+The channel can be watched by playing the contents of the stream with some
+player that recognizes the MPEG-TS format, such as ``mplayer`` or ``vlc``.
By playing the contents of the stream one can visually inspect the workings of
-vidtv, e.g.::
+vidtv, e.g., to play a recorded TS file with::
+
+ $ mplayer music.ts
+
+or, alternatively, running this command on one terminal::
+
+ $ dvbv5-zap -c dvb_channel.conf "beethoven" -P -r &
+
+And, on a second terminal, playing the contents from DVR interface with::
$ mplayer /dev/dvb/adapter0/dvr0
@@ -423,3 +484,30 @@ A nice addition is to simulate some noise when the signal quality is bad by:
- Updating the error statistics accordingly (e.g. BER, etc).
- Simulating some noise in the encoded data.
+
+Functions and structs used within vidtv
+---------------------------------------
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_bridge.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_channel.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_demod.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_encoder.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_mux.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_pes.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_psi.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_s302m.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_ts.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_common.c
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.c
diff --git a/MAINTAINERS b/MAINTAINERS
index a008b70f3c16..0d80c24c114e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9646,6 +9646,7 @@ F: Documentation/virt/kvm/s390*
F: arch/s390/include/asm/gmap.h
F: arch/s390/include/asm/kvm*
F: arch/s390/include/uapi/asm/kvm*
+F: arch/s390/kernel/uv.c
F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
F: tools/testing/selftests/kvm/*/s390x/
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index c6606f4d20d6..fb98440c0bd4 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -243,10 +243,8 @@ static inline int constant_fls(unsigned int x)
x <<= 2;
r -= 2;
}
- if (!(x & 0x80000000u)) {
- x <<= 1;
+ if (!(x & 0x80000000u))
r -= 1;
- }
return r;
}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index b23986f98450..f73da203b170 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -38,15 +38,27 @@
#ifdef CONFIG_ARC_DW2_UNWIND
-static void seed_unwind_frame_info(struct task_struct *tsk,
- struct pt_regs *regs,
- struct unwind_frame_info *frame_info)
+static int
+seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
{
- /*
- * synchronous unwinding (e.g. dump_stack)
- * - uses current values of SP and friends
- */
- if (tsk == NULL && regs == NULL) {
+ if (regs) {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ } else if (tsk == NULL || tsk == current) {
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -63,13 +75,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
- } else if (regs == NULL) {
+ } else {
/*
- * Asynchronous unwinding of sleeping task
- * - Gets SP etc from task's pt_regs (saved bottom of kernel
- * mode stack of task)
+ * Asynchronous unwinding of a likely sleeping task
+ * - first ensure it is actually sleeping
+ * - if so, it will be in __switch_to, kernel mode SP of task
+ * is safe-kept and BLINK at a well known location in there
*/
+ if (tsk->state == TASK_RUNNING)
+ return -1;
+
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
@@ -90,19 +106,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
- } else {
- /*
- * Asynchronous unwinding of intr/exception
- * - Just uses the pt_regs passed
- */
- frame_info->task = tsk;
-
- frame_info->regs.r27 = regs->fp;
- frame_info->regs.r28 = regs->sp;
- frame_info->regs.r31 = regs->blink;
- frame_info->regs.r63 = regs->ret;
- frame_info->call_frame = 0;
}
+ return 0;
}
#endif
@@ -116,7 +121,8 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
unsigned int address;
struct unwind_frame_info frame_info;
- seed_unwind_frame_info(tsk, regs, &frame_info);
+ if (seed_unwind_frame_info(tsk, regs, &frame_info))
+ return 0;
while (1) {
address = UNW_PC(&frame_info);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index c340acd989a0..9bb3c24f3677 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -30,14 +30,14 @@
* -Changes related to MMU v2 (Rel 4.8)
*
* Vineetg: Aug 29th 2008
- * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
* flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
* it fails. Thus need to load it with ANY valid value before invoking
* TLBIVUTLB cmd
*
* Vineetg: Aug 21th 2008:
* -Reduced the duration of IRQ lockouts in TLB Flush routines
- * -Multiple copies of TLB erase code seperated into a "single" function
+ * -Multiple copies of TLB erase code separated into a "single" function
* -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
* in interrupt-safe region.
*
@@ -66,7 +66,7 @@
*
* Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
* much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
- * Given this, the thrasing problem should never happen because once the 3
+ * Given this, the thrashing problem should never happen because once the 3
* J-TLB entries are created (even though 3rd will knock out one of the prev
* two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
*
@@ -127,7 +127,7 @@ static void utlb_invalidate(void)
* There was however an obscure hardware bug, where uTLB flush would
* fail when a prior probe for J-TLB (both totally unrelated) would
* return lkup err - because the entry didn't exist in MMU.
- * The Workround was to set Index reg with some valid value, prior to
+ * The Workaround was to set Index reg with some valid value, prior to
* flush. This was fixed in MMU v3
*/
unsigned int idx;
@@ -272,7 +272,7 @@ noinline void local_flush_tlb_all(void)
}
/*
- * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
*/
noinline void local_flush_tlb_mm(struct mm_struct *mm)
{
@@ -303,7 +303,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
* Difference between this and Kernel Range Flush is
* -Here the fastest way (if range is too large) is to move to next ASID
* without doing any explicit Shootdown
- * -In case of kernel Flush, entry has to be shot down explictly
+ * -In case of kernel Flush, entry has to be shot down explicitly
*/
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
@@ -620,7 +620,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
- * The exact THP size a Linx configuration will support is a function of:
+ * The exact THP size a Linux configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
@@ -698,7 +698,7 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#endif
-/* Read the Cache Build Confuration Registers, Decode them and save into
+/* Read the Cache Build Configuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
@@ -803,13 +803,13 @@ void arc_mmu_init(void)
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
/*
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
/*
* stack top size sanity check,
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
@@ -881,7 +881,7 @@ void arc_mmu_init(void)
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
@@ -948,7 +948,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/***********************************************************************
* Diagnostic Routines
- * -Called from Low Level TLB Hanlders if things don;t look good
+ * -Called from Low Level TLB Handlers if things don;t look good
**********************************************************************/
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4ff12a7adcfd..5628289b9d5e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -115,8 +115,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@@ -124,9 +122,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
+ * Note that we can't make any assumptions based on the state of the access
+ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
+ * TLB.
*/
#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@@ -164,13 +165,6 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
return pmd;
}
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
- return pte;
-}
-
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -196,6 +190,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ /*
+ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+ * clear), set the PTE_DIRTY bit.
+ */
+ if (pte_hw_dirty(pte))
+ pte = pte_mkdirty(pte);
+
+ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
+}
+
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@@ -845,12 +853,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
- /*
- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
- * clear), set the PTE_DIRTY bit.
- */
- if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 4266262101fe..006946745352 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -7,6 +7,8 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
+#include <asm/insn.h>
+
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index bb2d986ff696..a797abace13f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -13,6 +13,11 @@
SECTIONS {
HYP_SECTION(.text)
+ /*
+ * .hyp..data..percpu needs to be page aligned to maintain the same
+ * alignment for when linking into vmlinux.
+ */
+ . = ALIGN(PAGE_SIZE);
HYP_SECTION_NAME(.data..percpu) : {
PERCPU_INPUT(L1_CACHE_BYTES)
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 52d6f24f65dc..15a6c98ee92f 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
return extract_bytes(value, addr & 7, len);
}
+static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ int target_vcpu_id = vcpu->vcpu_id;
+ u64 value;
+
+ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
+ value |= ((target_vcpu_id & 0xffff) << 8);
+
+ if (vgic_has_its(vcpu->kvm))
+ value |= GICR_TYPER_PLPIS;
+
+ /* reporting of the Last bit is not supported for userspace */
+ return extract_bytes(value, addr & 7, len);
+}
+
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
@@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
+ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
+ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a4d56f0a41d9..16b8336f91dd 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -248,7 +248,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string)
cpu-as-$(CONFIG_40x) += -Wa,-m405
cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 28716e2f13e3..a39e2d193fdc 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -63,6 +63,8 @@
#else /* !__ASSEMBLY__ */
+#include <linux/jump_label.h>
+
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_KUAP
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f63a3d3bca3d..4d01f09ecf80 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
* Vectors for the FWNMI option. Share common code.
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
- /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
- __IKVM_REAL(system_reset)=0
GEN_INT_ENTRY system_reset, virt=0
#endif /* CONFIG_PPC_PSERIES */
@@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* If none is found, do a Linux page fault. Linux page faults can happen in
* kernel mode due to user copy operations of course.
*
+ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
+ * MMU context, which may cause a DSI in the host, which must go to the
+ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
+ * always be used regardless of AIL setting.
+ *
* - Radix MMU
* The hardware loads from the Linux page table directly, so a fault goes
* immediately to Linux page fault.
@@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
IVEC=0x300
IDAR=1
IDSISR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access)
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
@@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
* on user-handler data structures.
*
+ * KVM: Same as 0x300, DSLB must test for KVM guest.
+ *
* A dedicated save area EXSLB is used (XXX: but it actually need not be
* these days, we could use EXGEN).
*/
@@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
IAREA=PACA_EXSLB
IRECONCILE=0
IDAR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access_slb)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 2aa16d5368e1..a0dda2a1f2df 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -156,6 +156,7 @@ __after_mmu_off:
bl initial_bats
bl load_segment_registers
BEGIN_MMU_FTR_SECTION
+ bl reloc_offset
bl early_hash_table
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#if defined(CONFIG_BOOTX_TEXT)
@@ -920,7 +921,7 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6
lis r6, early_hash@h
- lis r3, Hash@ha
+ addis r3, r3, Hash@ha
stw r6, Hash@l(r3)
blr
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index d0c2db0e07fa..a59a94f02733 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
}
state = &sb->irq_state[src];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_devel("%s: source %lx invalid !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2012c1cf0853..483051e10db3 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -53,11 +53,11 @@ int main(void)
/* stack_frame offsets */
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
OFFSET(__SF_GPRS, stack_frame, gprs);
- OFFSET(__SF_EMPTY, stack_frame, empty1);
- OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
- OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
- OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
- OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
+ OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
+ OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
+ OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
+ OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
+ OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
BLANK();
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
BLANK();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5346545b9860..26bb0603c5a1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1068,6 +1068,7 @@ EXPORT_SYMBOL(save_fpu_regs)
* %r4
*/
load_fpu_regs:
+ stnsm __SF_EMPTY(%r15),0xfc
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
@@ -1099,6 +1100,7 @@ load_fpu_regs:
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
.Lload_fpu_regs_exit:
+ ssm __SF_EMPTY(%r15)
BR_EX %r14
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 14bd9d58edc9..883bfed9f5c2 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -129,8 +129,15 @@ int uv_destroy_page(unsigned long paddr)
.paddr = paddr
};
- if (uv_call(0, (u64)&uvcb))
+ if (uv_call(0, (u64)&uvcb)) {
+ /*
+ * Older firmware uses 107/d as an indication of a non secure
+ * page. Let us emulate the newer variant (no-op).
+ */
+ if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
+ return 0;
return -EINVAL;
+ }
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6b74b92c1a58..425d3d75320b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
struct kvm_s390_pv_unp unp = {};
r = -EINVAL;
- if (!kvm_s390_pv_is_protected(kvm))
+ if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
break;
r = -EFAULT;
@@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
- vcpu->arch.sie_block->cpnc = 0;
}
}
@@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
regs->etoken = 0;
regs->etoken_extension = 0;
- regs->diag318 = 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index eb99e2f95ebe..f5847f9dec7c 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
- atomic_set(&kvm->mm->context.is_protected, 1);
return 0;
}
@@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
*rc, *rrc);
+ if (!cc)
+ atomic_set(&kvm->mm->context.is_protected, 1);
return cc ? -EINVAL : 0;
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cfb0017f33a7..64795d034926 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
#include <linux/sched/mm.h>
void s390_reset_acc(struct mm_struct *mm)
{
+ if (!mm_is_protected(mm))
+ return;
/*
* we might be called during
* reset: we walk the pages and clear
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 324ddd7fd0aa..7e5f33a0d0e2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1656,6 +1656,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 420be871d9d4..ae64f98ec2ab 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,13 +514,10 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || dmar_disabled)
pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
-#ifdef CONFIG_SWIOTLB
- swiotlb = 0;
-#endif
no_iommu = 0;
return 1;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 99d118ffc67d..814698e5b152 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
* check if there is pending interrupt from
* non-APIC source without intack.
*/
-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
-{
- u8 accept = kvm_apic_accept_pic_intr(v);
-
- if (accept) {
- if (irqchip_split(v->kvm))
- return pending_userspace_extint(v);
- else
- return v->kvm->arch.vpic->output;
- } else
- return 0;
-}
-
-/*
- * check if there is injectable interrupt:
- * when virtual interrupt delivery enabled,
- * interrupt from apic will handled by hardware,
- * we don't need to check it here.
- */
-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
/*
- * FIXME: interrupt.injected represents an interrupt that it's
+ * FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
@@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (!kvm_apic_accept_pic_intr(v))
+ return 0;
+
+ if (irqchip_split(v->kvm))
+ return pending_userspace_extint(v);
+ else
+ return v->kvm->arch.vpic->output;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
if (kvm_cpu_has_extint(v))
return 1;
@@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- /*
- * FIXME: interrupt.injected represents an interrupt that it's
- * side-effects have already been applied (e.g. bit from IRR
- * already moved to ISR). Therefore, it is incorrect to rely
- * on interrupt.injected to know if there is a pending
- * interrupt in the user-mode LAPIC.
- * This leads to nVMX/nSVM not be able to distinguish
- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
- * pending interrupt or should re-inject an injected
- * interrupt.
- */
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.injected;
-
if (kvm_cpu_has_extint(v))
return 1;
@@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
- if (kvm_cpu_has_extint(v)) {
- if (irqchip_split(v->kvm)) {
- int vector = v->arch.pending_external_vector;
-
- v->arch.pending_external_vector = -1;
- return vector;
- } else
- return kvm_pic_read_irq(v->kvm); /* PIC */
- } else
+ if (!kvm_cpu_has_extint(v)) {
+ WARN_ON(!lapic_in_kernel(v));
return -1;
+ }
+
+ if (!lapic_in_kernel(v))
+ return v->arch.interrupt.nr;
+
+ if (irqchip_split(v->kvm)) {
+ int vector = v->arch.pending_external_vector;
+
+ v->arch.pending_external_vector = -1;
+ return vector;
+ } else
+ return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
@@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
- int vector;
-
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.nr;
-
- vector = kvm_cpu_get_extint(v);
-
+ int vector = kvm_cpu_get_extint(v);
if (vector != -1)
return vector; /* PIC */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 105e7859d1f2..86c33d53c90a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2465,7 +2465,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (!kvm_apic_hw_enabled(apic))
+ if (!kvm_apic_present(vcpu))
return -1;
__apic_update_ppr(apic, &ppr);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5bb1939b65d8..7a6ae9e90bd7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3517,7 +3517,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
u64 sptes[PT64_ROOT_MAX_LEVEL];
struct rsvd_bits_validate *rsvd_check;
- int root = vcpu->arch.mmu->root_level;
+ int root = vcpu->arch.mmu->shadow_root_level;
int leaf;
int level;
bool reserved = false;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c0b14106258a..566f4d18185b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -642,8 +642,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
* Its safe to read more than we are asked, caller should ensure that
* destination has enough space.
*/
- src_paddr = round_down(src_paddr, 16);
offset = src_paddr & 15;
+ src_paddr = round_down(src_paddr, 16);
sz = round_up(sz + offset, 16);
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1e81cfebd491..79b3a564f1c9 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1309,8 +1309,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->avic_is_running = true;
svm->msrpm = svm_vcpu_alloc_msrpm();
- if (!svm->msrpm)
+ if (!svm->msrpm) {
+ err = -ENOMEM;
goto error_free_vmcb_page;
+ }
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 078a39d489fe..e545a8a613b1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4051,21 +4051,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
+ /*
+ * We can accept userspace's request for interrupt injection
+ * as long as we have a place to store the interrupt number.
+ * The actual injection will happen when the CPU is able to
+ * deliver the interrupt.
+ */
+ if (kvm_cpu_has_extint(vcpu))
+ return false;
+
+ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
return (!lapic_in_kernel(vcpu) ||
kvm_apic_accept_pic_intr(vcpu));
}
-/*
- * if userspace requested an interrupt window, check that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
return kvm_arch_interrupt_allowed(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu) &&
- !kvm_event_needs_reinjection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
}
diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
index 35abcb1ec051..86f8195d8039 100644
--- a/block/keyslot-manager.c
+++ b/block/keyslot-manager.c
@@ -103,6 +103,13 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots)
spin_lock_init(&ksm->idle_slots_lock);
slot_hashtable_size = roundup_pow_of_two(num_slots);
+ /*
+ * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
+ * buckets. This only makes a difference when there is only 1 keyslot.
+ */
+ if (slot_hashtable_size < 2)
+ slot_hashtable_size = 2;
+
ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
sizeof(ksm->slot_hashtable[0]),
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9929ff50c0c0..770d84071a32 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(iort_fwnode_lock);
* iort_set_fwnode() - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
- * @node: IORT table node associated with the IOMMU
+ * @iort_node: IORT table node associated with the IOMMU
* @fwnode: fwnode associated with the IORT node
*
* Returns: 0 on success
@@ -673,7 +673,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 id,
/**
* iort_get_device_domain() - Find MSI domain related to a device
* @dev: The device.
- * @req_id: Requester ID for the device.
+ * @id: Requester ID for the device.
+ * @bus_token: irq domain bus token.
*
* Returns: the MSI domain for this device, NULL otherwise
*/
@@ -1136,7 +1137,7 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*
* @dev: device to configure
* @dma_addr: device DMA address result pointer
- * @size: DMA range size result pointer
+ * @dma_size: DMA range size result pointer
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
@@ -1526,6 +1527,7 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
/**
* iort_add_platform_device() - Allocate a platform device for IORT node
* @node: Pointer to device ACPI IORT node
+ * @ops: Pointer to IORT device config struct
*
* Returns: 0 on success, <0 failure
*/
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 78318508a6d6..8286205c7165 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -236,13 +236,15 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
if (!handle || !handle->perf_ops)
return -ENODEV;
+#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
if (of_find_property(dev->of_node, "#clock-cells", NULL))
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+#endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
if (ret) {
- dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
__func__, ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e3783f5a459d..026789b466db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4852,7 +4852,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
- if (ras && ras->supported)
+ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
return amdgpu_dpm_baco_enter(adev);
@@ -4871,7 +4871,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
if (ret)
return ret;
- if (ras && ras->supported)
+ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8039d2399584..a0248d78190f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -69,10 +69,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
- uint64_t size)
+ uint64_t size_in_page)
{
return ttm_range_man_init(&adev->mman.bdev, type,
- false, size >> PAGE_SHIFT);
+ false, size_in_page);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 5eb63288d157..edbb8194ee81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -67,6 +67,7 @@ struct amdgpu_uvd {
unsigned harvest_config;
/* store image width to adjust nb memory state */
unsigned decode_image_width;
+ uint32_t keyselect;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 3579565e0eab..55f4b8c3b933 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 7cf4b11a65c5..41800fcad410 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
*/
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
{
- void *ptr;
- uint32_t ucode_len, i;
- uint32_t keysel;
-
- ptr = adev->uvd.inst[0].cpu_addr;
- ptr += 192 + 16;
- memcpy(&ucode_len, ptr, 4);
- ptr += ucode_len;
- memcpy(&keysel, ptr, 4);
+ int i;
+ uint32_t keysel = adev->uvd.keyselect;
WREG32(mmUVD_FW_START, keysel);
@@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ void *ptr;
+ uint32_t ucode_len;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
@@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
if (r)
return r;
+ /* Retrieval firmware validate key */
+ ptr = adev->uvd.inst[0].cpu_addr;
+ ptr += 192 + 16;
+ memcpy(&ucode_len, ptr, 4);
+ ptr += ucode_len;
+ memcpy(&adev->uvd.keyselect, ptr, 4);
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0e7118000919..9b6809f309f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1041,7 +1041,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN) {
+ if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 834a156e3a75..0a1e1cf57e19 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
@@ -778,6 +777,21 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
static void
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+ struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
+ struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (old_ast_crtc_state->format != ast_crtc_state->format)
+ ast_crtc_load_lut(ast, crtc);
+}
+
+static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -830,6 +844,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6417f374b923..951d5f708e92 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC Exynos Series"
- depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on OF && DRM && COMMON_CLK
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d8b206e53660..cf6e05ea4d8f 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -30,18 +30,21 @@
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
-static void irq_enable(struct intel_engine_cs *engine)
+static bool irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
- return;
+ return false;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock);
+
+ return true;
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || b->irq_armed)
- return;
-
- if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
+ /*
+ * Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference.
+ */
+ if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
return;
/*
@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
*/
WRITE_ONCE(b->irq_armed, true);
- /*
- * Since we are waiting on a request, the GPU should be busy
- * and should have its own rpm reference. This is tracked
- * by i915->gt.awake, we can forgo holding our own wakref
- * for the interrupt as before i915->gt.awake is released (when
- * the driver is idle) we disarm the breadcrumbs.
- */
-
- if (!b->irq_enabled++)
- irq_enable(b->irq_engine);
+ /* Requests may have completed before we could enable the interrupt. */
+ if (!b->irq_enabled++ && irq_enable(b->irq_engine))
+ irq_work_queue(&b->irq_work);
}
-static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || !b->irq_armed)
+ if (!b->irq_engine)
return;
+ spin_lock(&b->irq_lock);
+ if (!b->irq_armed)
+ __intel_breadcrumbs_arm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
irq_disable(b->irq_engine);
@@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
{
intel_context_get(ce);
list_add_tail(&ce->signal_link, &b->signalers);
- if (list_is_first(&ce->signal_link, &b->signalers))
- __intel_breadcrumbs_arm_irq(b);
}
static void remove_signaling_context(struct intel_breadcrumbs *b,
@@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq, struct list_head *signals)
+static bool __signal_request(struct i915_request *rq)
{
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
}
- list_add_tail(&rq->signal_link, signals);
return true;
}
+static struct llist_node *
+slist_add(struct llist_node *node, struct llist_node *head)
+{
+ node->next = head;
+ return node;
+}
+
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
+ struct llist_node *signal, *sn;
struct intel_context *ce, *cn;
struct list_head *pos, *next;
- LIST_HEAD(signal);
+
+ signal = NULL;
+ if (unlikely(!llist_empty(&b->signaled_requests)))
+ signal = llist_del_all(&b->signaled_requests);
spin_lock(&b->irq_lock);
- if (list_empty(&b->signalers))
+ /*
+ * Keep the irq armed until the interrupt after all listeners are gone.
+ *
+ * Enabling/disabling the interrupt is rather costly, roughly a couple
+ * of hundred microseconds. If we are proactive and enable/disable
+ * the interrupt around every request that wants a breadcrumb, we
+ * quickly drown in the extra orders of magnitude of latency imposed
+ * on request submission.
+ *
+ * So we try to be lazy, and keep the interrupts enabled until no
+ * more listeners appear within a breadcrumb interrupt interval (that
+ * is until a request completes that no one cares about). The
+ * observation is that listeners come in batches, and will often
+ * listen to a bunch of requests in succession. Though note on icl+,
+ * interrupts are always enabled due to concerns with rc6 being
+ * dysfunctional with per-engine interrupt masking.
+ *
+ * We also try to avoid raising too many interrupts, as they may
+ * be generated by userspace batches and it is unfortunately rather
+ * too easy to drown the CPU under a flood of GPU interrupts. Thus
+ * whenever no one appears to be listening, we turn off the interrupts.
+ * Fewer interrupts should conserve power -- at the very least, fewer
+ * interrupt draw less ire from other users of the system and tools
+ * like powertop.
+ */
+ if (!signal && b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
- list_splice_init(&b->signaled_requests, &signal);
-
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals));
@@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- __signal_request(rq, &signal);
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ if (__signal_request(rq))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
}
/*
@@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
spin_unlock(&b->irq_lock);
- list_for_each_safe(pos, next, &signal) {
+ llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
+ llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list;
spin_lock(&rq->lock);
@@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+
+ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ intel_breadcrumbs_arm_irq(b);
}
struct intel_breadcrumbs *
@@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
- INIT_LIST_HEAD(&b->signaled_requests);
+ init_llist_head(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
@@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- unsigned long flags;
-
- if (!READ_ONCE(b->irq_armed))
- return;
-
- spin_lock_irqsave(&b->irq_lock, flags);
- __intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
-
- if (!list_empty(&b->signalers))
- irq_work_queue(&b->irq_work);
+ /* Kick the work once more to drain the signalers */
+ irq_work_sync(&b->irq_work);
+ while (unlikely(READ_ONCE(b->irq_armed))) {
+ local_irq_disable();
+ signal_irq_work(&b->irq_work);
+ local_irq_enable();
+ cond_resched();
+ }
+ GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
+ irq_work_sync(&b->irq_work);
+ GEM_BUG_ON(!list_empty(&b->signalers));
+ GEM_BUG_ON(b->irq_armed);
kfree(b);
}
@@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
* its signal completion.
*/
if (__request_completed(rq)) {
- if (__signal_request(rq, &b->signaled_requests))
+ if (__signal_request(rq) &&
+ llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
@@ -362,9 +400,12 @@ static void insert_breadcrumb(struct i915_request *rq,
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- /* Check after attaching to irq, interrupt may have already fired. */
- if (__request_completed(rq))
- irq_work_queue(&b->irq_work);
+ /*
+ * Defer enabling the interrupt to after HW submission and recheck
+ * the request as it may have completed and raised the interrupt as
+ * we were attaching it into the lists.
+ */
+ irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 8e53b9942695..3fa19820b37a 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -35,7 +35,7 @@ struct intel_breadcrumbs {
struct intel_engine_cs *irq_engine;
struct list_head signalers;
- struct list_head signaled_requests;
+ struct llist_head signaled_requests;
struct irq_work irq_work; /* for use from inside irq_lock */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9bb16bdf93cf..0952bf157234 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -182,6 +182,7 @@
struct virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
+ struct rcu_work rcu;
/*
* We allow only a single request through the virtual engine at a time
@@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
return &ve->base.execlists.default_priolist.requests[0];
}
-static void virtual_context_destroy(struct kref *kref)
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
+ container_of(wrk, typeof(*ve), rcu.work);
unsigned int n;
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight);
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
- unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irqsave(&sibling->active.lock, flags);
+ spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irqrestore(&sibling->active.lock, flags);
+ spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
if (ve->context.state)
__execlists_context_fini(&ve->context);
@@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
kfree(ve);
}
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
int swp;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9831361f181e..a81cf0f01e78 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -255,7 +255,7 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
-/* This reg has been accessed through GPU commands */
+/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware
* logical context image
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e94976976571..3640d0e229d2 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- intel_uncore_write(uncore, oastatus_reg,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+
+ intel_uncore_rmw(uncore, oastatus_reg,
+ GEN8_OASTATUS_COUNTER_OVERFLOW |
+ GEN8_OASTATUS_REPORT_LOST,
+ IS_GEN_RANGE(uncore->i915, 8, 10) ?
+ (GEN8_OASTATUS_HEAD_POINTER_WRAP |
+ GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
return gen8_append_oa_reports(stream, buf, count, offset);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 664f3bf9af03..5cd83eac940c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
+#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
+#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 16b721080195..874af6db6103 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -176,7 +176,11 @@ struct i915_request {
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
- struct list_head signal_link;
+
+ union {
+ struct list_head signal_link;
+ struct llist_node signal_node;
+ };
/*
* The rcu epoch of when this request was allocated. Used to judiciously
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index cf11c4850b40..52f11a63a330 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -522,15 +522,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
- .destroy = mtk_dpi_encoder_destroy,
-};
-
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4a188a942c38..65fd99c528af 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
+ u32 horizontal_front_back_byte;
+ u32 data_phy_cycles_byte;
u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
+ horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
else
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
- dsi_tmp_buf_bpp;
+ dsi_tmp_buf_bpp - 10;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
- data_phy_cycles * dsi->lanes + 18) {
- horizontal_frontporch_byte =
- vm->hfront_porch * dsi_tmp_buf_bpp -
- (data_phy_cycles * dsi->lanes + 18) *
- vm->hfront_porch /
- (vm->hfront_porch + vm->hback_porch);
-
- horizontal_backporch_byte =
- horizontal_backporch_byte -
- (data_phy_cycles * dsi->lanes + 18) *
- vm->hback_porch /
- (vm->hfront_porch + vm->hback_porch);
- } else {
- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp;
- }
+ timing->da_hs_zero + timing->da_hs_exit + 3;
+
+ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+ data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
+
+ if (horizontal_front_back_byte > data_phy_cycles_byte) {
+ horizontal_frontporch_byte -= data_phy_cycles_byte *
+ horizontal_frontporch_byte /
+ horizontal_front_back_byte;
+
+ horizontal_backporch_byte -= data_phy_cycles_byte *
+ horizontal_backporch_byte /
+ horizontal_front_back_byte;
} else {
- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
- data_phy_cycles * dsi->lanes + 12) {
- horizontal_frontporch_byte =
- vm->hfront_porch * dsi_tmp_buf_bpp -
- (data_phy_cycles * dsi->lanes + 12) *
- vm->hfront_porch /
- (vm->hfront_porch + vm->hback_porch);
- horizontal_backporch_byte = horizontal_backporch_byte -
- (data_phy_cycles * dsi->lanes + 12) *
- vm->hback_porch /
- (vm->hfront_porch + vm->hback_porch);
- } else {
- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp;
- }
+ DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
}
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 549bc67feabb..c2051380d18c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -558,8 +558,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, chan, NULL, NULL);
return ret;
+ } else if (ret > 0) {
+ *apply_relocs = true;
}
- *apply_relocs = ret;
+
return 0;
}
@@ -662,7 +664,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
- u_free(reloc);
return ret;
}
@@ -872,9 +873,10 @@ out:
break;
}
}
- u_free(reloc);
}
out_prevalid:
+ if (!IS_ERR(reloc))
+ u_free(reloc);
u_free(bo);
u_free(push);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 19b75bebd35f..c5f2944d5bc6 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -219,6 +219,7 @@ struct vc4_dev {
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
/* List of vc4_debugfs_info_entry for adding to debugfs once
@@ -531,6 +532,9 @@ struct vc4_crtc_state {
unsigned int top;
unsigned int bottom;
} margins;
+
+ /* Transitional state below, only valid during atomic commits */
+ bool update_muxing;
};
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 95779d50cca0..afc178b0d89f 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -760,12 +760,54 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
{
}
+#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
+#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
+
+static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long long pixel_rate = mode->clock * 1000;
+ unsigned long long tmds_rate;
+
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return -EINVAL;
+
+ /*
+ * The 1440p@60 pixel rate is in the same range than the first
+ * WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
+ * bandwidth). Slightly lower the frequency to bring it out of
+ * the WiFi range.
+ */
+ tmds_rate = pixel_rate * 10;
+ if (vc4_hdmi->disable_wifi_frequencies &&
+ (tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
+ tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
+ mode->clock = 238560;
+ pixel_rate = mode->clock * 1000;
+ }
+
+ if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
+ return -EINVAL;
+
+ return 0;
+}
+
static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return MODE_H_ILLEGAL;
+
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
@@ -773,6 +815,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
+ .atomic_check = vc4_hdmi_encoder_atomic_check,
.mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
@@ -1694,6 +1737,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
+ vc4_hdmi->disable_wifi_frequencies =
+ of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1817,6 +1863,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
PHY_LANE_2,
PHY_LANE_CK,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
@@ -1842,6 +1889,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
PHY_LANE_CK,
PHY_LANE_2,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 63c6f8bddf1d..0526a9cf608a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -62,6 +62,9 @@ struct vc4_hdmi_variant {
*/
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
+ /* The BCM2711 cannot deal with odd horizontal pixel timings */
+ bool unsupported_odd_h_timings;
+
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
@@ -139,6 +142,14 @@ struct vc4_hdmi {
int hpd_gpio;
bool hpd_active_low;
+ /*
+ * On some systems (like the RPi4), some modes are in the same
+ * frequency range than the WiFi channels (1440p@60Hz for
+ * example). Should we take evasive actions because that system
+ * has a wifi adapter?
+ */
+ bool disable_wifi_frequencies;
+
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 2b951cae04ad..ba310c0ab5f6 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -24,6 +24,8 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
+#define HVS_NUM_CHANNELS 3
+
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
@@ -35,6 +37,17 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_hvs_state {
+ struct drm_private_state base;
+ unsigned int unassigned_channels;
+};
+
+static struct vc4_hvs_state *
+to_vc4_hvs_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_hvs_state, base);
+}
+
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
@@ -113,7 +126,7 @@ static int vc4_ctm_obj_init(struct vc4_dev *vc4)
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
- return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
+ return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
}
/* Converts a DRM S31.32 value to the HW S0.9 format. */
@@ -169,6 +182,19 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
+static struct vc4_hvs_state *
+vc4_hvs_get_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_atomic_state *state)
{
@@ -213,10 +239,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned char dsp2_mux = 0;
- unsigned char dsp3_mux = 3;
- unsigned char dsp4_mux = 3;
- unsigned char dsp5_mux = 3;
+ unsigned char mux;
unsigned int i;
u32 reg;
@@ -224,50 +247,59 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- if (!crtc_state->active)
+ if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
- dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ HVS_WRITE(SCALER_DISPECTRL,
+ (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
break;
case 3:
- dsp3_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ HVS_WRITE(SCALER_DISPCTRL,
+ (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
break;
case 4:
- dsp4_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ HVS_WRITE(SCALER_DISPEOLN,
+ (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
+
break;
case 5:
- dsp5_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ HVS_WRITE(SCALER_DISPDITHER,
+ (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
break;
default:
break;
}
}
-
- reg = HVS_READ(SCALER_DISPECTRL);
- HVS_WRITE(SCALER_DISPECTRL,
- (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
- VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
-
- reg = HVS_READ(SCALER_DISPCTRL);
- HVS_WRITE(SCALER_DISPCTRL,
- (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
- VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
-
- reg = HVS_READ(SCALER_DISPEOLN);
- HVS_WRITE(SCALER_DISPEOLN,
- (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
- VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
-
- reg = HVS_READ(SCALER_DISPDITHER);
- HVS_WRITE(SCALER_DISPDITHER,
- (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
- VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
}
static void
@@ -657,53 +689,123 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
&load_state->base,
&vc4_load_tracker_state_funcs);
- return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
+ return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
}
-#define NUM_OUTPUTS 6
-#define NUM_CHANNELS 3
+static struct drm_private_state *
+vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
+{
+ struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
+ struct vc4_hvs_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
-static int
-vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ state->unassigned_channels = old_state->unassigned_channels;
+
+ return &state->base;
+}
+
+static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
- unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct drm_crtc *crtc;
- int i, ret;
+ struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
- /*
- * Since the HVS FIFOs are shared across all the pixelvalves and
- * the TXP (and thus all the CRTCs), we need to pull the current
- * state of all the enabled CRTCs so that an update to a single
- * CRTC still keeps the previous FIFOs enabled and assigned to
- * the same CRTCs, instead of evaluating only the CRTC being
- * modified.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_state *crtc_state;
+ kfree(hvs_state);
+}
- if (!crtc->state->enable)
- continue;
+static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
+ .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
+ .atomic_destroy_state = vc4_hvs_channels_destroy_state,
+};
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_private_obj_fini(&vc4->hvs_channels);
+}
+
+static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_hvs_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
+ drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
+ &state->base,
+ &vc4_hvs_state_funcs);
+
+ return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
+}
+
+/*
+ * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
+ * the TXP (and therefore all the CRTCs found on that platform).
+ *
+ * The naive (and our initial) implementation would just iterate over
+ * all the active CRTCs, try to find a suitable FIFO, and then remove it
+ * from the pool of available FIFOs. However, there are a few corner
+ * cases that need to be considered:
+ *
+ * - When running in a dual-display setup (so with two CRTCs involved),
+ * we can update the state of a single CRTC (for example by changing
+ * its mode using xrandr under X11) without affecting the other. In
+ * this case, the other CRTC wouldn't be in the state at all, so we
+ * need to consider all the running CRTCs in the DRM device to assign
+ * a FIFO, not just the one in the state.
+ *
+ * - To fix the above, we can't use drm_atomic_get_crtc_state on all
+ * enabled CRTCs to pull their CRTC state into the global state, since
+ * a page flip would start considering their vblank to complete. Since
+ * we don't have a guarantee that they are actually active, that
+ * vblank might never happen, and shouldn't even be considered if we
+ * want to do a page flip on a single CRTC. That can be tested by
+ * doing a modetest -v first on HDMI1 and then on HDMI0.
+ *
+ * - Since we need the pixelvalve to be disabled and enabled back when
+ * the FIFO is changed, we should keep the FIFO assigned for as long
+ * as the CRTC is enabled, only considering it free again once that
+ * CRTC has been disabled. This can be tested by booting X11 on a
+ * single display, and changing the resolution down and then back up.
+ */
+static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct vc4_hvs_state *hvs_new_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ hvs_new_state = vc4_hvs_get_global_state(state);
+ if (!hvs_new_state)
+ return -EINVAL;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct vc4_crtc_state *old_vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
struct vc4_crtc_state *new_vc4_crtc_state =
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
- if (old_crtc_state->enable && !new_crtc_state->enable)
- new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
-
- if (!new_crtc_state->enable)
+ /* Nothing to do here, let's skip it */
+ if (old_crtc_state->enable == new_crtc_state->enable)
continue;
- if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
- unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
+ /* Muxing will need to be modified, mark it as such */
+ new_vc4_crtc_state->update_muxing = true;
+
+ /* If we're disabling our CRTC, we put back our channel */
+ if (!new_crtc_state->enable) {
+ hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+ new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -731,17 +833,29 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
* the future, we will need to have something smarter,
* but it works so far.
*/
- matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
if (matching_channels) {
unsigned int channel = ffs(matching_channels) - 1;
new_vc4_crtc_state->assigned_channel = channel;
- unassigned_channels &= ~BIT(channel);
+ hvs_new_state->unassigned_channels &= ~BIT(channel);
} else {
return -EINVAL;
}
}
+ return 0;
+}
+
+static int
+vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = vc4_pv_muxing_atomic_check(dev, state);
+ if (ret)
+ return ret;
+
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
return ret;
@@ -808,6 +922,10 @@ int vc4_kms_load(struct drm_device *dev)
if (ret)
return ret;
+ ret = vc4_hvs_channels_obj_init(vc4);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 8ca51e43cf53..329ee4f48d95 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->rec_cpu_num = -1; /* no cpu affinity by default */
- fd->mm = current->mm;
- mmgrab(fd->mm);
fd->dd = dd;
fp->private_data = fd;
return 0;
@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
deallocate_ctxt(uctxt);
done:
- mmdrop(fdata->mm);
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b4c6bff60a4e..e09e8244a94c 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock;
- struct mm_struct *mm;
};
extern struct xarray hfi1_dev_table;
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 24ca17b77b72..f3fb28e3d5d7 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -48,23 +49,11 @@
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
+#include <linux/sched/mm.h>
#include "mmu_rb.h"
#include "trace.h"
-struct mmu_rb_handler {
- struct mmu_notifier mn;
- struct rb_root_cached root;
- void *ops_arg;
- spinlock_t lock; /* protect the RB tree */
- struct mmu_rb_ops *ops;
- struct mm_struct *mm;
- struct list_head lru_list;
- struct work_struct del_work;
- struct list_head del_list;
- struct workqueue_struct *wq;
-};
-
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *,
@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
return PAGE_ALIGN(node->addr + node->len) - 1;
}
-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler)
{
- struct mmu_rb_handler *handlr;
+ struct mmu_rb_handler *h;
int ret;
- handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
- if (!handlr)
+ h = kmalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
return -ENOMEM;
- handlr->root = RB_ROOT_CACHED;
- handlr->ops = ops;
- handlr->ops_arg = ops_arg;
- INIT_HLIST_NODE(&handlr->mn.hlist);
- spin_lock_init(&handlr->lock);
- handlr->mn.ops = &mn_opts;
- handlr->mm = mm;
- INIT_WORK(&handlr->del_work, handle_remove);
- INIT_LIST_HEAD(&handlr->del_list);
- INIT_LIST_HEAD(&handlr->lru_list);
- handlr->wq = wq;
-
- ret = mmu_notifier_register(&handlr->mn, handlr->mm);
+ h->root = RB_ROOT_CACHED;
+ h->ops = ops;
+ h->ops_arg = ops_arg;
+ INIT_HLIST_NODE(&h->mn.hlist);
+ spin_lock_init(&h->lock);
+ h->mn.ops = &mn_opts;
+ INIT_WORK(&h->del_work, handle_remove);
+ INIT_LIST_HEAD(&h->del_list);
+ INIT_LIST_HEAD(&h->lru_list);
+ h->wq = wq;
+
+ ret = mmu_notifier_register(&h->mn, current->mm);
if (ret) {
- kfree(handlr);
+ kfree(h);
return ret;
}
- *handler = handlr;
+ *handler = h;
return 0;
}
@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
struct list_head del_list;
/* Unregister first so we don't get any more notifications. */
- mmu_notifier_unregister(&handler->mn, handler->mm);
+ mmu_notifier_unregister(&handler->mn, handler->mn.mm);
/*
* Make sure the wq delete handler is finished running. It will not
@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
int ret = 0;
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
+
+ if (current->mm != handler->mn.mm)
+ return -EPERM;
+
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
if (node) {
@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
__mmu_int_rb_remove(mnode, &handler->root);
list_del(&mnode->list); /* remove from LRU list */
}
+ mnode->handler = handler;
unlock:
spin_unlock_irqrestore(&handler->lock, flags);
return ret;
@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
unsigned long flags;
bool ret = false;
+ if (current->mm != handler->mn.mm)
+ return ret;
+
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
if (node) {
@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
unsigned long flags;
bool stop = false;
+ if (current->mm != handler->mn.mm)
+ return;
+
INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
{
unsigned long flags;
+ if (current->mm != handler->mn.mm)
+ return;
+
/* Validity of handler and node pointers has been checked by caller. */
trace_hfi1_mmu_rb_remove(node->addr, node->len);
spin_lock_irqsave(&handler->lock, flags);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index f04cec1e99d1..423aacc67e94 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -54,6 +55,7 @@ struct mmu_rb_node {
unsigned long len;
unsigned long __last;
struct rb_node node;
+ struct mmu_rb_handler *handler;
struct list_head list;
};
@@ -71,7 +73,19 @@ struct mmu_rb_ops {
void *evict_arg, bool *stop);
};
-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+struct mmu_rb_handler {
+ struct mmu_notifier mn;
+ struct rb_root_cached root;
+ void *ops_arg;
+ spinlock_t lock; /* protect the RB tree */
+ struct mmu_rb_ops *ops;
+ struct list_head lru_list;
+ struct work_struct del_work;
+ struct list_head del_list;
+ struct workqueue_struct *wq;
+};
+
+int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index f81ca20f4b69..b94fc7fd75a9 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
{
struct page **pages;
struct hfi1_devdata *dd = fd->uctxt->dd;
+ struct mm_struct *mm;
if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr,
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
pages = &node->pages[idx];
+ mm = mm_from_tid_node(node);
} else {
pages = &tidbuf->pages[idx];
+ mm = current->mm;
}
- hfi1_release_user_pages(fd->mm, pages, npages, mapped);
+ hfi1_release_user_pages(mm, pages, npages, mapped);
fd->tid_n_pinned -= npages;
}
@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
* pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers.
*/
- if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
+ if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
kfree(pages);
return -ENOMEM;
}
- pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
+ pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
if (pinned <= 0) {
kfree(pages);
return pinned;
@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
if (fd->use_mn) {
ret = mmu_interval_notifier_insert(
- &node->notifier, fd->mm,
+ &node->notifier, current->mm,
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
&tid_mn_ops);
if (ret)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 332abb446861..d45c7b6988d4 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_USER_EXP_RCV_H
#define _HFI1_USER_EXP_RCV_H
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
struct hfi1_tid_info *tinfo);
+static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
+{
+ return node->notifier.mm;
+}
+
#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a92346e88628..4a4956f96a7e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0);
- pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL, NULL);
@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
cq->nentries = hfi1_sdma_comp_ring_size;
- ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
&pq->handler);
if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret);
@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
npages -= node->npages;
retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
+ if (!hfi1_can_pin_pages(pq->dd, current->mm,
atomic_read(&pq->n_locked), npages)) {
cleared = sdma_cache_evict(pq, npages);
if (cleared >= npages)
goto retry;
}
- pinned = hfi1_acquire_user_pages(pq->mm,
+ pinned = hfi1_acquire_user_pages(current->mm,
((unsigned long)iovec->iov.iov_base +
(node->npages * PAGE_SIZE)), npages, 0,
pages + node->npages);
@@ -995,7 +995,7 @@ retry:
return pinned;
}
if (pinned != npages) {
- unpin_vector_pages(pq->mm, pages, node->npages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
return -EFAULT;
}
kfree(node->pages);
@@ -1008,7 +1008,8 @@ retry:
static void unpin_sdma_pages(struct sdma_mmu_node *node)
{
if (node->npages) {
- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
atomic_sub(node->npages, &node->pq->n_locked);
}
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 9972e0e6545e..1e8c02fe8ad1 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
unsigned long unpinned;
struct mmu_rb_handler *handler;
atomic_t n_locked;
- struct mm_struct *mm;
};
struct hfi1_user_sdma_comp_q {
@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count);
+static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
+{
+ return node->rb.handler->mn.mm;
+}
+
#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 6d30850696c5..0468028ffe39 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2936,6 +2936,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
@@ -4989,11 +4990,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S);
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S);
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S);
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 29c9dd4bcbc6..be7f2fe1e883 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1661,7 +1661,7 @@ struct hns_roce_query_pf_caps_d {
__le32 rsv_uars_rsv_qps;
};
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
-#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 2408b279e4c2..584932d3cc44 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -54,10 +54,6 @@
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
-static int push_mode;
-module_param(push_mode, int, 0644);
-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
-
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
@@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
if (status)
goto exit;
iwdev->obj_next = iwdev->obj_mem;
- iwdev->push_mode = push_mode;
init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 581ecbadf586..533f3caecb7a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
*/
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
- struct i40iw_ucontext *ucontext;
- u64 db_addr_offset, push_offset, pfn;
-
- ucontext = to_ucontext(context);
- if (ucontext->iwdev->sc_dev.is_pf) {
- db_addr_offset = I40IW_DB_ADDR_OFFSET;
- push_offset = I40IW_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
- } else {
- db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
- push_offset = I40IW_VF_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
- }
+ struct i40iw_ucontext *ucontext = to_ucontext(context);
+ u64 dbaddr;
- vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
-
- if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- } else {
- if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- }
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
- pfn = vma->vm_pgoff +
- (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
- PAGE_SHIFT);
+ dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
- return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
- vma->vm_page_prot, NULL);
+ return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot), NULL);
}
/**
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index c3cfea243af8..119b2573c9a0 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_arm;
+ }
cq_context = mailbox->buf;
@@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_irq(&dev->cq_table.lock);
- if (mthca_array_set(&dev->cq_table.cq,
- cq->cqn & (dev->limits.num_cqs - 1),
- cq)) {
+ err = mthca_array_set(&dev->cq_table.cq,
+ cq->cqn & (dev->limits.num_cqs - 1), cq);
+ if (err) {
spin_unlock_irq(&dev->cq_table.lock);
goto err_out_free_mr;
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 82e4af8f09bb..23a790f8f550 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -29,6 +29,7 @@
#include <asm/iommu_table.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
+#include <asm/set_memory.h>
#include <linux/crash_dump.h>
@@ -672,11 +673,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
}
+static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
+ gfp_t gfp, size_t size)
+{
+ int order = get_order(size);
+ void *buf = (void *)__get_free_pages(gfp, order);
+
+ if (buf &&
+ iommu_feature(iommu, FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, (1 << order))) {
+ free_pages((unsigned long)buf, order);
+ buf = NULL;
+ }
+
+ return buf;
+}
+
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
{
- iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(EVT_BUFFER_SIZE));
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ EVT_BUFFER_SIZE);
return iommu->evt_buf ? 0 : -ENOMEM;
}
@@ -715,8 +732,8 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_ppr_log(struct amd_iommu *iommu)
{
- iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(PPR_LOG_SIZE));
+ iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ PPR_LOG_SIZE);
return iommu->ppr_log ? 0 : -ENOMEM;
}
@@ -838,7 +855,7 @@ static int iommu_init_ga(struct amd_iommu *iommu)
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
- iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
+ iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index be4318044f96..702fbaa6c9ad 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -69,6 +69,10 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
struct qcom_smmu *qsmmu;
+ /* Check to make sure qcom_scm has finished probing */
+ if (!qcom_scm_is_available())
+ return ERR_PTR(-EPROBE_DEFER);
+
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 11319e4dce4a..b46dbfa6d0ed 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -986,7 +986,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
- iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
+ if (ecap_vcs(iommu->ecap))
+ iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index c71a9c279a33..a49afa11673c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1833,7 +1833,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
- if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
+ if (vccap_pasid(iommu->vccap))
ioasid_unregister_allocator(&iommu->pasid_allocator);
#endif
@@ -3212,7 +3212,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
* is active. All vIOMMU allocators will eventually be calling the same
* host allocator.
*/
- if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
+ if (!vccap_pasid(iommu->vccap))
return;
pr_info("Register custom PASID allocator\n");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b53446bb8c6b..0f4dc25d46c9 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -264,16 +264,18 @@ int iommu_probe_device(struct device *dev)
*/
iommu_alloc_default_domain(group, dev);
- if (group->default_domain)
+ if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
+ if (ret) {
+ iommu_group_put(group);
+ goto err_release;
+ }
+ }
iommu_create_device_direct_mappings(group, dev);
iommu_group_put(group);
- if (ret)
- goto err_release;
-
if (ops->probe_finalize)
ops->probe_finalize(dev);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index a3cb104956d5..7e152bbb4fa6 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -253,17 +253,31 @@ config VIDEO_MEDIATEK_VCODEC
depends on MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on VIDEO_MEDIATEK_VPU || MTK_SCP
+ # The two following lines ensure we have the same state ("m" or "y") as
+ # our dependencies, to avoid missing symbols during link.
+ depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
+ depends on MTK_SCP || !MTK_SCP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- select VIDEO_MEDIATEK_VPU
- select MTK_SCP
+ select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
+ select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
help
- Mediatek video codec driver provides HW capability to
- encode and decode in a range of video formats
- This driver rely on VPU driver to communicate with VPU.
+ Mediatek video codec driver provides HW capability to
+ encode and decode in a range of video formats on MT8173
+ and MT8183.
+
+ Note that support for MT8173 requires VIDEO_MEDIATEK_VPU to
+ also be selected. Support for MT8183 depends on MTK_SCP.
+
+ To compile this driver as modules, choose M here: the
+ modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
+
+config VIDEO_MEDIATEK_VCODEC_VPU
+ bool
- To compile this driver as modules, choose M here: the
- modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
+config VIDEO_MEDIATEK_VCODEC_SCP
+ bool
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index cd902b180669..63fce1b85d26 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -307,6 +307,7 @@ static int mmpcam_platform_remove(struct platform_device *pdev)
* Suspend/resume support.
*/
+#ifdef CONFIG_PM
static int mmpcam_runtime_resume(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
@@ -352,6 +353,7 @@ static int __maybe_unused mmpcam_resume(struct device *dev)
return mccic_resume(&cam->mcam);
return 0;
}
+#endif
static const struct dev_pm_ops mmpcam_pm_ops = {
SET_RUNTIME_PM_OPS(mmpcam_runtime_suspend, mmpcam_runtime_resume, NULL)
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index f679c6e1a3e9..4618d43dbbc8 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -24,4 +24,12 @@ mtk-vcodec-enc-y := venc/venc_vp8_if.o \
mtk-vcodec-common-y := mtk_vcodec_intr.o \
mtk_vcodec_util.o \
- mtk_vcodec_fw.o
+ mtk_vcodec_fw.o \
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU),)
+mtk-vcodec-common-y += mtk_vcodec_fw_vpu.o
+endif
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP),)
+mtk-vcodec-common-y += mtk_vcodec_fw_scp.o
+endif
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index d14bc208ea5e..145686d2c219 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -241,7 +241,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_DEC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, DECODER);
if (IS_ERR(dev->fw_handler))
return PTR_ERR(dev->fw_handler);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index dcfa2c2d4def..3be8a04c4c67 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -293,7 +293,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_ENC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, ENCODER);
if (IS_ERR(dev->fw_handler))
return PTR_ERR(dev->fw_handler);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
index 6c2a2568d844..94b39ae5c2e1 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
@@ -1,193 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
#include "mtk_vcodec_fw.h"
+#include "mtk_vcodec_fw_priv.h"
#include "mtk_vcodec_util.h"
#include "mtk_vcodec_drv.h"
-struct mtk_vcodec_fw_ops {
- int (*load_firmware)(struct mtk_vcodec_fw *fw);
- unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw);
- unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw);
- void * (*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr);
- int (*ipi_register)(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler, const char *name, void *priv);
- int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait);
-};
-
-struct mtk_vcodec_fw {
- enum mtk_vcodec_fw_type type;
- const struct mtk_vcodec_fw_ops *ops;
- struct platform_device *pdev;
- struct mtk_scp *scp;
-};
-
-static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
-{
- return vpu_load_firmware(fw->pdev);
-}
-
-static unsigned int mtk_vcodec_vpu_get_vdec_capa(struct mtk_vcodec_fw *fw)
-{
- return vpu_get_vdec_hw_capa(fw->pdev);
-}
-
-static unsigned int mtk_vcodec_vpu_get_venc_capa(struct mtk_vcodec_fw *fw)
-{
- return vpu_get_venc_hw_capa(fw->pdev);
-}
-
-static void *mtk_vcodec_vpu_map_dm_addr(struct mtk_vcodec_fw *fw,
- u32 dtcm_dmem_addr)
-{
- return vpu_mapping_dm_addr(fw->pdev, dtcm_dmem_addr);
-}
-
-static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler,
- const char *name, void *priv)
-{
- /*
- * The handler we receive takes a void * as its first argument. We
- * cannot change this because it needs to be passed down to the rproc
- * subsystem when SCP is used. VPU takes a const argument, which is
- * more constrained, so the conversion below is safe.
- */
- ipi_handler_t handler_const = (ipi_handler_t)handler;
-
- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
-}
-
-static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait)
-{
- return vpu_ipi_send(fw->pdev, id, buf, len);
-}
-
-static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
- .load_firmware = mtk_vcodec_vpu_load_firmware,
- .get_vdec_capa = mtk_vcodec_vpu_get_vdec_capa,
- .get_venc_capa = mtk_vcodec_vpu_get_venc_capa,
- .map_dm_addr = mtk_vcodec_vpu_map_dm_addr,
- .ipi_register = mtk_vcodec_vpu_set_ipi_register,
- .ipi_send = mtk_vcodec_vpu_ipi_send,
-};
-
-static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
-{
- return rproc_boot(scp_get_rproc(fw->scp));
-}
-
-static unsigned int mtk_vcodec_scp_get_vdec_capa(struct mtk_vcodec_fw *fw)
-{
- return scp_get_vdec_hw_capa(fw->scp);
-}
-
-static unsigned int mtk_vcodec_scp_get_venc_capa(struct mtk_vcodec_fw *fw)
-{
- return scp_get_venc_hw_capa(fw->scp);
-}
-
-static void *mtk_vcodec_vpu_scp_dm_addr(struct mtk_vcodec_fw *fw,
- u32 dtcm_dmem_addr)
-{
- return scp_mapping_dm_addr(fw->scp, dtcm_dmem_addr);
-}
-
-static int mtk_vcodec_scp_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler,
- const char *name, void *priv)
-{
- return scp_ipi_register(fw->scp, id, handler, priv);
-}
-
-static int mtk_vcodec_scp_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait)
-{
- return scp_ipi_send(fw->scp, id, buf, len, wait);
-}
-
-static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
- .load_firmware = mtk_vcodec_scp_load_firmware,
- .get_vdec_capa = mtk_vcodec_scp_get_vdec_capa,
- .get_venc_capa = mtk_vcodec_scp_get_venc_capa,
- .map_dm_addr = mtk_vcodec_vpu_scp_dm_addr,
- .ipi_register = mtk_vcodec_scp_set_ipi_register,
- .ipi_send = mtk_vcodec_scp_ipi_send,
-};
-
-static void mtk_vcodec_reset_handler(void *priv)
-{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
-
- mtk_v4l2_err("Watchdog timeout!!");
-
- mutex_lock(&dev->dev_mutex);
- list_for_each_entry(ctx, &dev->ctx_list, list) {
- ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
- ctx->id);
- }
- mutex_unlock(&dev->dev_mutex);
-}
-
struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
enum mtk_vcodec_fw_type type,
- enum rst_id rst_id)
+ enum mtk_vcodec_fw_use fw_use)
{
- const struct mtk_vcodec_fw_ops *ops;
- struct mtk_vcodec_fw *fw;
- struct platform_device *fw_pdev = NULL;
- struct mtk_scp *scp = NULL;
-
switch (type) {
case VPU:
- ops = &mtk_vcodec_vpu_msg;
- fw_pdev = vpu_get_plat_device(dev->plat_dev);
- if (!fw_pdev) {
- mtk_v4l2_err("firmware device is not ready");
- return ERR_PTR(-EINVAL);
- }
- vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_reset_handler,
- dev, rst_id);
- break;
+ return mtk_vcodec_fw_vpu_init(dev, fw_use);
case SCP:
- ops = &mtk_vcodec_rproc_msg;
- scp = scp_get(dev->plat_dev);
- if (!scp) {
- mtk_v4l2_err("could not get vdec scp handle");
- return ERR_PTR(-EPROBE_DEFER);
- }
- break;
+ return mtk_vcodec_fw_scp_init(dev);
default:
mtk_v4l2_err("invalid vcodec fw type");
return ERR_PTR(-EINVAL);
}
-
- fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
- if (!fw)
- return ERR_PTR(-EINVAL);
-
- fw->type = type;
- fw->ops = ops;
- fw->pdev = fw_pdev;
- fw->scp = scp;
-
- return fw;
}
EXPORT_SYMBOL_GPL(mtk_vcodec_fw_select);
void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw)
{
- switch (fw->type) {
- case VPU:
- put_device(&fw->pdev->dev);
- break;
- case SCP:
- scp_put(fw->scp);
- break;
- }
+ fw->ops->release(fw);
}
EXPORT_SYMBOL_GPL(mtk_vcodec_fw_release);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
index fadbbe6ba6cd..539bb626772c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
@@ -15,6 +15,11 @@ enum mtk_vcodec_fw_type {
SCP,
};
+enum mtk_vcodec_fw_use {
+ DECODER,
+ ENCODER,
+};
+
struct mtk_vcodec_fw;
typedef void (*mtk_vcodec_ipi_handler) (void *data,
@@ -22,7 +27,7 @@ typedef void (*mtk_vcodec_ipi_handler) (void *data,
struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
enum mtk_vcodec_fw_type type,
- enum rst_id rst_id);
+ enum mtk_vcodec_fw_use fw_use);
void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw);
int mtk_vcodec_fw_load_firmware(struct mtk_vcodec_fw *fw);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h
new file mode 100644
index 000000000000..b41e66185cec
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MTK_VCODEC_FW_PRIV_H_
+#define _MTK_VCODEC_FW_PRIV_H_
+
+#include "mtk_vcodec_fw.h"
+
+struct mtk_vcodec_dev;
+
+struct mtk_vcodec_fw {
+ enum mtk_vcodec_fw_type type;
+ const struct mtk_vcodec_fw_ops *ops;
+ struct platform_device *pdev;
+ struct mtk_scp *scp;
+};
+
+struct mtk_vcodec_fw_ops {
+ int (*load_firmware)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw);
+ void *(*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr);
+ int (*ipi_register)(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler, const char *name,
+ void *priv);
+ int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait);
+ void (*release)(struct mtk_vcodec_fw *fw);
+};
+
+#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU)
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use);
+#else
+static inline struct mtk_vcodec_fw *
+mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_VPU */
+
+#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP)
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev);
+#else
+static inline struct mtk_vcodec_fw *
+mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_SCP */
+
+#endif /* _MTK_VCODEC_FW_PRIV_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c
new file mode 100644
index 000000000000..d8e66b645bd8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "mtk_vcodec_fw_priv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_drv.h"
+
+static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return rproc_boot(scp_get_rproc(fw->scp));
+}
+
+static unsigned int mtk_vcodec_scp_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_vdec_hw_capa(fw->scp);
+}
+
+static unsigned int mtk_vcodec_scp_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_venc_hw_capa(fw->scp);
+}
+
+static void *mtk_vcodec_vpu_scp_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return scp_mapping_dm_addr(fw->scp, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_scp_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ return scp_ipi_register(fw->scp, id, handler, priv);
+}
+
+static int mtk_vcodec_scp_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return scp_ipi_send(fw->scp, id, buf, len, wait);
+}
+
+static void mtk_vcodec_scp_release(struct mtk_vcodec_fw *fw)
+{
+ scp_put(fw->scp);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
+ .load_firmware = mtk_vcodec_scp_load_firmware,
+ .get_vdec_capa = mtk_vcodec_scp_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_scp_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_scp_dm_addr,
+ .ipi_register = mtk_vcodec_scp_set_ipi_register,
+ .ipi_send = mtk_vcodec_scp_ipi_send,
+ .release = mtk_vcodec_scp_release,
+};
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+{
+ struct mtk_vcodec_fw *fw;
+ struct mtk_scp *scp;
+
+ scp = scp_get(dev->plat_dev);
+ if (!scp) {
+ mtk_v4l2_err("could not get vdec scp handle");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ fw->type = SCP;
+ fw->ops = &mtk_vcodec_rproc_msg;
+ fw->scp = scp;
+
+ return fw;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
new file mode 100644
index 000000000000..cd27f637dbe7
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "mtk_vcodec_fw_priv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_drv.h"
+
+static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return vpu_load_firmware(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_vdec_hw_capa(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_venc_hw_capa(fw->pdev);
+}
+
+static void *mtk_vcodec_vpu_map_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return vpu_mapping_dm_addr(fw->pdev, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ /*
+ * The handler we receive takes a void * as its first argument. We
+ * cannot change this because it needs to be passed down to the rproc
+ * subsystem when SCP is used. VPU takes a const argument, which is
+ * more constrained, so the conversion below is safe.
+ */
+ ipi_handler_t handler_const = (ipi_handler_t)handler;
+
+ return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
+}
+
+static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return vpu_ipi_send(fw->pdev, id, buf, len);
+}
+
+static void mtk_vcodec_vpu_release(struct mtk_vcodec_fw *fw)
+{
+ put_device(&fw->pdev->dev);
+}
+
+static void mtk_vcodec_vpu_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
+ .load_firmware = mtk_vcodec_vpu_load_firmware,
+ .get_vdec_capa = mtk_vcodec_vpu_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_vpu_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_map_dm_addr,
+ .ipi_register = mtk_vcodec_vpu_set_ipi_register,
+ .ipi_send = mtk_vcodec_vpu_ipi_send,
+ .release = mtk_vcodec_vpu_release,
+};
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use)
+{
+ struct platform_device *fw_pdev;
+ struct mtk_vcodec_fw *fw;
+ enum rst_id rst_id;
+
+ switch (fw_use) {
+ case ENCODER:
+ rst_id = VPU_RST_ENC;
+ break;
+ case DECODER:
+ default:
+ rst_id = VPU_RST_DEC;
+ break;
+ }
+
+ fw_pdev = vpu_get_plat_device(dev->plat_dev);
+ if (!fw_pdev) {
+ mtk_v4l2_err("firmware device is not ready");
+ return ERR_PTR(-EINVAL);
+ }
+ vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
+
+ fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ fw->type = VPU;
+ fw->ops = &mtk_vcodec_vpu_msg;
+ fw->pdev = fw_pdev;
+
+ return fw;
+}
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 7b79a33dc9d6..05c9fbd51f0c 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -243,8 +243,19 @@ struct venc_controls {
u32 header_mode;
- u32 profile;
- u32 level;
+ struct {
+ u32 h264;
+ u32 mpeg4;
+ u32 hevc;
+ u32 vp8;
+ u32 vp9;
+ } profile;
+ struct {
+ u32 h264;
+ u32 mpeg4;
+ u32 hevc;
+ u32 vp9;
+ } level;
};
struct venus_buffer {
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 57877eacecf0..a9538c2cc3c9 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -794,7 +794,7 @@ skip_pmdomains:
return 0;
opp_dl_add_err:
- dev_pm_domain_detach(core->opp_pmdomain, true);
+ dev_pm_opp_detach_genpd(core->opp_table);
opp_attach_err:
if (core->pd_dl_venus) {
device_link_del(core->pd_dl_venus);
@@ -832,7 +832,7 @@ skip_pmdomains:
if (core->opp_dl_venus)
device_link_del(core->opp_dl_venus);
- dev_pm_domain_detach(core->opp_pmdomain, true);
+ dev_pm_opp_detach_genpd(core->opp_table);
}
static int core_get_v4(struct device *dev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index f8b1484e7dcd..47246528ac7e 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -537,6 +537,7 @@ static int venc_set_properties(struct venus_inst *inst)
struct hfi_quantization quant;
struct hfi_quantization_range quant_range;
u32 ptype, rate_control, bitrate;
+ u32 profile, level;
int ret;
ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
@@ -684,7 +685,35 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
- ret = venus_helper_set_profile_level(inst, ctr->profile, ctr->level);
+ switch (inst->hfi_codec) {
+ case HFI_VIDEO_CODEC_H264:
+ profile = ctr->profile.h264;
+ level = ctr->level.h264;
+ break;
+ case HFI_VIDEO_CODEC_MPEG4:
+ profile = ctr->profile.mpeg4;
+ level = ctr->level.mpeg4;
+ break;
+ case HFI_VIDEO_CODEC_VP8:
+ profile = ctr->profile.vp8;
+ level = 0;
+ break;
+ case HFI_VIDEO_CODEC_VP9:
+ profile = ctr->profile.vp9;
+ level = ctr->level.vp9;
+ break;
+ case HFI_VIDEO_CODEC_HEVC:
+ profile = ctr->profile.hevc;
+ level = ctr->level.hevc;
+ break;
+ case HFI_VIDEO_CODEC_MPEG2:
+ default:
+ profile = 0;
+ level = 0;
+ break;
+ }
+
+ ret = venus_helper_set_profile_level(inst, profile, level);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 0708b3b89d0c..cf860e6446c0 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -103,15 +103,25 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
ctr->h264_entropy_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ ctr->profile.mpeg4 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctr->profile.h264 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ ctr->profile.hevc = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- ctr->profile = ctrl->val;
+ ctr->profile.vp8 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level.mpeg4 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctr->level.h264 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- ctr->level = ctrl->val;
+ ctr->level.hevc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctr->h264_i_qp = ctrl->val;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 74b054947bbe..fc64d0c8492a 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -4,36 +4,43 @@
* validate the existing APIs in the media subsystem. It can also aid
* developers working on userspace applications.
*
- * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner' and 'dvb_vidtv_demod'.
+ * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner'
+ * and 'dvb_vidtv_demod'.
*
* Copyright (C) 2020 Daniel W. S. Almeida
*/
+#include <linux/dev_printk.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/dev_printk.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "vidtv_bridge.h"
+#include "vidtv_common.h"
#include "vidtv_demod.h"
-#include "vidtv_tuner.h"
-#include "vidtv_ts.h"
#include "vidtv_mux.h"
-#include "vidtv_common.h"
+#include "vidtv_ts.h"
+#include "vidtv_tuner.h"
-//#define MUX_BUF_MAX_SZ
-//#define MUX_BUF_MIN_SZ
+#define MUX_BUF_MIN_SZ 90164
+#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
#define TUNER_DEFAULT_ADDR 0x68
#define DEMOD_DEFAULT_ADDR 0x60
+#define VIDTV_DEFAULT_NETWORK_ID 0xff44
+#define VIDTV_DEFAULT_NETWORK_NAME "LinuxTV.org"
+#define VIDTV_DEFAULT_TS_ID 0x4081
-/* LNBf fake parameters: ranges used by an Universal (extended) European LNBf */
-#define LNB_CUT_FREQUENCY 11700000
-#define LNB_LOW_FREQ 9750000
-#define LNB_HIGH_FREQ 10600000
-
+/*
+ * The LNBf fake parameters here are the ranges used by an
+ * Universal (extended) European LNBf, which is likely the most common LNBf
+ * found on Satellite digital TV system nowadays.
+ */
+#define LNB_CUT_FREQUENCY 11700000 /* high IF frequency */
+#define LNB_LOW_FREQ 9750000 /* low IF frequency */
+#define LNB_HIGH_FREQ 10600000 /* transition frequency */
static unsigned int drop_tslock_prob_on_low_snr;
module_param(drop_tslock_prob_on_low_snr, uint, 0);
@@ -92,7 +99,8 @@ MODULE_PARM_DESC(si_period_msec, "How often to send SI packets. Default: 40ms");
static unsigned int pcr_period_msec = 40;
module_param(pcr_period_msec, uint, 0);
-MODULE_PARM_DESC(pcr_period_msec, "How often to send PCR packets. Default: 40ms");
+MODULE_PARM_DESC(pcr_period_msec,
+ "How often to send PCR packets. Default: 40ms");
static unsigned int mux_rate_kbytes_sec = 4096;
module_param(mux_rate_kbytes_sec, uint, 0);
@@ -104,16 +112,14 @@ MODULE_PARM_DESC(pcr_pid, "PCR PID for all channels: defaults to 0x200");
static unsigned int mux_buf_sz_pkts;
module_param(mux_buf_sz_pkts, uint, 0);
-MODULE_PARM_DESC(mux_buf_sz_pkts, "Size for the internal mux buffer in multiples of 188 bytes");
-
-#define MUX_BUF_MIN_SZ 90164
-#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
+MODULE_PARM_DESC(mux_buf_sz_pkts,
+ "Size for the internal mux buffer in multiples of 188 bytes");
static u32 vidtv_bridge_mux_buf_sz_for_mux_rate(void)
{
u32 max_elapsed_time_msecs = VIDTV_MAX_SLEEP_USECS / USEC_PER_MSEC;
- u32 nbytes_expected;
u32 mux_buf_sz = mux_buf_sz_pkts * TS_PACKET_LEN;
+ u32 nbytes_expected;
nbytes_expected = mux_rate_kbytes_sec;
nbytes_expected *= max_elapsed_time_msecs;
@@ -143,14 +149,12 @@ static bool vidtv_bridge_check_demod_lock(struct vidtv_dvb *dvb, u32 n)
FE_HAS_LOCK);
}
-static void
-vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
+/*
+ * called on a separate thread by the mux when new packets become available
+ */
+static void vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
{
- /*
- * called on a separate thread by the mux when new packets become
- * available
- */
- struct vidtv_dvb *dvb = (struct vidtv_dvb *)priv;
+ struct vidtv_dvb *dvb = priv;
/* drop packets if we lose the lock */
if (vidtv_bridge_check_demod_lock(dvb, 0))
@@ -159,7 +163,17 @@ vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
static int vidtv_start_streaming(struct vidtv_dvb *dvb)
{
- struct vidtv_mux_init_args mux_args = {0};
+ struct vidtv_mux_init_args mux_args = {
+ .mux_rate_kbytes_sec = mux_rate_kbytes_sec,
+ .on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail,
+ .pcr_period_usecs = pcr_period_msec * USEC_PER_MSEC,
+ .si_period_usecs = si_period_msec * USEC_PER_MSEC,
+ .pcr_pid = pcr_pid,
+ .transport_stream_id = VIDTV_DEFAULT_TS_ID,
+ .network_id = VIDTV_DEFAULT_NETWORK_ID,
+ .network_name = VIDTV_DEFAULT_NETWORK_NAME,
+ .priv = dvb,
+ };
struct device *dev = &dvb->pdev->dev;
u32 mux_buf_sz;
@@ -168,19 +182,17 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
return 0;
}
- mux_buf_sz = (mux_buf_sz_pkts) ? mux_buf_sz_pkts : vidtv_bridge_mux_buf_sz_for_mux_rate();
+ if (mux_buf_sz_pkts)
+ mux_buf_sz = mux_buf_sz_pkts;
+ else
+ mux_buf_sz = vidtv_bridge_mux_buf_sz_for_mux_rate();
- mux_args.mux_rate_kbytes_sec = mux_rate_kbytes_sec;
- mux_args.on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail;
- mux_args.mux_buf_sz = mux_buf_sz;
- mux_args.pcr_period_usecs = pcr_period_msec * 1000;
- mux_args.si_period_usecs = si_period_msec * 1000;
- mux_args.pcr_pid = pcr_pid;
- mux_args.transport_stream_id = VIDTV_DEFAULT_TS_ID;
- mux_args.priv = dvb;
+ mux_args.mux_buf_sz = mux_buf_sz;
dvb->streaming = true;
- dvb->mux = vidtv_mux_init(dvb->fe[0], dev, mux_args);
+ dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
+ if (!dvb->mux)
+ return -ENOMEM;
vidtv_mux_start_thread(dvb->mux);
dev_dbg_ratelimited(dev, "Started streaming\n");
@@ -204,8 +216,8 @@ static int vidtv_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct vidtv_dvb *dvb = demux->priv;
- int rc;
int ret;
+ int rc;
if (!demux->dmx.frontend)
return -EINVAL;
@@ -243,9 +255,9 @@ static int vidtv_stop_feed(struct dvb_demux_feed *feed)
static struct dvb_frontend *vidtv_get_frontend_ptr(struct i2c_client *c)
{
- /* the demod will set this when its probe function runs */
struct vidtv_demod_state *state = i2c_get_clientdata(c);
+ /* the demod will set this when its probe function runs */
return &state->frontend;
}
@@ -253,6 +265,11 @@ static int vidtv_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[],
int num)
{
+ /*
+ * Right now, this virtual driver doesn't really send or receive
+ * messages from I2C. A real driver will require an implementation
+ * here.
+ */
return 0;
}
@@ -320,11 +337,10 @@ static int vidtv_bridge_dmxdev_init(struct vidtv_dvb *dvb)
static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
{
- struct vidtv_demod_config cfg = {};
-
- cfg.drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr;
- cfg.recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr;
-
+ struct vidtv_demod_config cfg = {
+ .drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr,
+ .recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr,
+ };
dvb->i2c_client_demod[n] = dvb_module_probe("dvb_vidtv_demod",
NULL,
&dvb->i2c_adapter,
@@ -343,14 +359,14 @@ static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
{
- struct vidtv_tuner_config cfg = {};
+ struct vidtv_tuner_config cfg = {
+ .fe = dvb->fe[n],
+ .mock_power_up_delay_msec = mock_power_up_delay_msec,
+ .mock_tune_delay_msec = mock_tune_delay_msec,
+ };
u32 freq;
int i;
- cfg.fe = dvb->fe[n];
- cfg.mock_power_up_delay_msec = mock_power_up_delay_msec;
- cfg.mock_tune_delay_msec = mock_tune_delay_msec;
-
/* TODO: check if the frequencies are at a valid range */
memcpy(cfg.vidtv_valid_dvb_t_freqs,
@@ -389,9 +405,7 @@ static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb)
{
- int ret;
- int i;
- int j;
+ int ret, i, j;
ret = vidtv_bridge_i2c_register_adap(dvb);
if (ret < 0)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.h b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
index 78fe8472fa37..2528adaee27d 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
@@ -20,9 +20,11 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+
#include <media/dmxdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
+
#include "vidtv_mux.h"
/**
@@ -32,7 +34,7 @@
* @adapter: Represents a DTV adapter. See 'dvb_register_adapter'.
* @demux: The demux used by the dvb_dmx_swfilter_packets() call.
* @dmx_dev: Represents a demux device.
- * @dmx_frontend: The frontends associated with the demux.
+ * @dmx_fe: The frontends associated with the demux.
* @i2c_adapter: The i2c_adapter associated with the bridge driver.
* @i2c_client_demod: The i2c_clients associated with the demodulator modules.
* @i2c_client_tuner: The i2c_clients associated with the tuner modules.
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
index f2b97cf08e87..8ad6c0744d36 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
@@ -9,6 +9,7 @@
* When vidtv boots, it will create some hardcoded channels.
* Their services will be concatenated to populate the SDT.
* Their programs will be concatenated to populate the PAT
+ * Their events will be concatenated to populate the EIT
* For each program in the PAT, a PMT section will be created
* The PMT section for a channel will be assigned its streams.
* Every stream will have its corresponding encoder polled to produce TS packets
@@ -18,22 +19,22 @@
* Copyright (C) 2020 Daniel W. S. Almeida
*/
-#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/dev_printk.h>
#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include "vidtv_channel.h"
-#include "vidtv_psi.h"
+#include "vidtv_common.h"
#include "vidtv_encoder.h"
#include "vidtv_mux.h"
-#include "vidtv_common.h"
+#include "vidtv_psi.h"
#include "vidtv_s302m.h"
static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
{
- struct vidtv_encoder *curr = e;
struct vidtv_encoder *tmp = NULL;
+ struct vidtv_encoder *curr = e;
while (curr) {
/* forward the call to the derived type */
@@ -44,55 +45,88 @@ static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
}
#define ENCODING_ISO8859_15 "\x0b"
+#define TS_NIT_PID 0x10
+/*
+ * init an audio only channel with a s302m encoder
+ */
struct vidtv_channel
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id)
{
- /*
- * init an audio only channel with a s302m encoder
- */
- const u16 s302m_service_id = 0x880;
- const u16 s302m_program_num = 0x880;
- const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
- const u16 s302m_es_pid = 0x111; /* packet id for the ES */
const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
-
- char *name = ENCODING_ISO8859_15 "Beethoven";
+ char *event_text = ENCODING_ISO8859_15 "Bagatelle No. 25 in A minor for solo piano, also known as F\xfcr Elise, composed by Ludwig van Beethoven";
+ char *event_name = ENCODING_ISO8859_15 "Ludwig van Beethoven: F\xfcr Elise";
+ struct vidtv_s302m_encoder_init_args encoder_args = {};
+ char *iso_language_code = ENCODING_ISO8859_15 "eng";
char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
+ char *name = ENCODING_ISO8859_15 "Beethoven";
+ const u16 s302m_es_pid = 0x111; /* packet id for the ES */
+ const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
+ const u16 s302m_service_id = 0x880;
+ const u16 s302m_program_num = 0x880;
+ const u16 s302m_beethoven_event_id = 1;
+ struct vidtv_channel *s302m;
- struct vidtv_channel *s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
- struct vidtv_s302m_encoder_init_args encoder_args = {};
+ s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
+ if (!s302m)
+ return NULL;
s302m->name = kstrdup(name, GFP_KERNEL);
+ if (!s302m->name)
+ goto free_s302m;
- s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id);
+ s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id, false, true);
+ if (!s302m->service)
+ goto free_name;
s302m->service->descriptor = (struct vidtv_psi_desc *)
vidtv_psi_service_desc_init(NULL,
- DIGITAL_TELEVISION_SERVICE,
+ DIGITAL_RADIO_SOUND_SERVICE,
name,
provider);
+ if (!s302m->service->descriptor)
+ goto free_service;
s302m->transport_stream_id = transport_stream_id;
s302m->program = vidtv_psi_pat_program_init(NULL,
s302m_service_id,
s302m_program_pid);
+ if (!s302m->program)
+ goto free_service;
s302m->program_num = s302m_program_num;
s302m->streams = vidtv_psi_pmt_stream_init(NULL,
STREAM_PRIVATE_DATA,
s302m_es_pid);
+ if (!s302m->streams)
+ goto free_program;
s302m->streams->descriptor = (struct vidtv_psi_desc *)
vidtv_psi_registration_desc_init(NULL,
s302m_fid,
NULL,
0);
+ if (!s302m->streams->descriptor)
+ goto free_streams;
+
encoder_args.es_pid = s302m_es_pid;
s302m->encoders = vidtv_s302m_encoder_init(encoder_args);
+ if (!s302m->encoders)
+ goto free_streams;
+
+ s302m->events = vidtv_psi_eit_event_init(NULL, s302m_beethoven_event_id);
+ if (!s302m->events)
+ goto free_encoders;
+ s302m->events->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_short_event_desc_init(NULL,
+ iso_language_code,
+ event_name,
+ event_text);
+ if (!s302m->events->descriptor)
+ goto free_events;
if (head) {
while (head->next)
@@ -102,6 +136,68 @@ struct vidtv_channel
}
return s302m;
+
+free_events:
+ vidtv_psi_eit_event_destroy(s302m->events);
+free_encoders:
+ vidtv_s302m_encoder_destroy(s302m->encoders);
+free_streams:
+ vidtv_psi_pmt_stream_destroy(s302m->streams);
+free_program:
+ vidtv_psi_pat_program_destroy(s302m->program);
+free_service:
+ vidtv_psi_sdt_service_destroy(s302m->service);
+free_name:
+ kfree(s302m->name);
+free_s302m:
+ kfree(s302m);
+
+ return NULL;
+}
+
+static struct vidtv_psi_table_eit_event
+*vidtv_channel_eit_event_cat_into_new(struct vidtv_mux *m)
+{
+ /* Concatenate the events */
+ const struct vidtv_channel *cur_chnl = m->channels;
+ struct vidtv_psi_table_eit_event *curr = NULL;
+ struct vidtv_psi_table_eit_event *head = NULL;
+ struct vidtv_psi_table_eit_event *tail = NULL;
+ struct vidtv_psi_desc *desc = NULL;
+ u16 event_id;
+
+ if (!cur_chnl)
+ return NULL;
+
+ while (cur_chnl) {
+ curr = cur_chnl->events;
+
+ if (!curr)
+ dev_warn_ratelimited(m->dev,
+ "No events found for channel %s\n",
+ cur_chnl->name);
+
+ while (curr) {
+ event_id = be16_to_cpu(curr->event_id);
+ tail = vidtv_psi_eit_event_init(tail, event_id);
+ if (!tail) {
+ vidtv_psi_eit_event_destroy(head);
+ return NULL;
+ }
+
+ desc = vidtv_psi_desc_clone(curr->descriptor);
+ vidtv_psi_desc_assign(&tail->descriptor, desc);
+
+ if (!head)
+ head = tail;
+
+ curr = curr->next;
+ }
+
+ cur_chnl = cur_chnl->next;
+ }
+
+ return head;
}
static struct vidtv_psi_table_sdt_service
@@ -125,13 +221,21 @@ static struct vidtv_psi_table_sdt_service
if (!curr)
dev_warn_ratelimited(m->dev,
- "No services found for channel %s\n", cur_chnl->name);
+ "No services found for channel %s\n",
+ cur_chnl->name);
while (curr) {
service_id = be16_to_cpu(curr->service_id);
- tail = vidtv_psi_sdt_service_init(tail, service_id);
+ tail = vidtv_psi_sdt_service_init(tail,
+ service_id,
+ curr->EIT_schedule,
+ curr->EIT_present_following);
+ if (!tail)
+ goto free;
desc = vidtv_psi_desc_clone(curr->descriptor);
+ if (!desc)
+ goto free_tail;
vidtv_psi_desc_assign(&tail->descriptor, desc);
if (!head)
@@ -144,6 +248,12 @@ static struct vidtv_psi_table_sdt_service
}
return head;
+
+free_tail:
+ vidtv_psi_sdt_service_destroy(tail);
+free:
+ vidtv_psi_sdt_service_destroy(head);
+ return NULL;
}
static struct vidtv_psi_table_pat_program*
@@ -174,6 +284,10 @@ vidtv_channel_pat_prog_cat_into_new(struct vidtv_mux *m)
tail = vidtv_psi_pat_program_init(tail,
serv_id,
pid);
+ if (!tail) {
+ vidtv_psi_pat_program_destroy(head);
+ return NULL;
+ }
if (!head)
head = tail;
@@ -183,30 +297,30 @@ vidtv_channel_pat_prog_cat_into_new(struct vidtv_mux *m)
cur_chnl = cur_chnl->next;
}
+ /* Add the NIT table */
+ vidtv_psi_pat_program_init(tail, 0, TS_NIT_PID);
return head;
}
+/*
+ * Match channels to their respective PMT sections, then assign the
+ * streams
+ */
static void
vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
struct vidtv_psi_table_pmt **sections,
u32 nsections)
{
- /*
- * Match channels to their respective PMT sections, then assign the
- * streams
- */
struct vidtv_psi_table_pmt *curr_section = NULL;
- struct vidtv_channel *cur_chnl = channels;
-
- struct vidtv_psi_table_pmt_stream *s = NULL;
struct vidtv_psi_table_pmt_stream *head = NULL;
struct vidtv_psi_table_pmt_stream *tail = NULL;
-
+ struct vidtv_psi_table_pmt_stream *s = NULL;
+ struct vidtv_channel *cur_chnl = channels;
struct vidtv_psi_desc *desc = NULL;
- u32 j;
- u16 curr_id;
u16 e_pid; /* elementary stream pid */
+ u16 curr_id;
+ u32 j;
while (cur_chnl) {
for (j = 0; j < nsections; ++j) {
@@ -232,7 +346,8 @@ vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
head = tail;
desc = vidtv_psi_desc_clone(s->descriptor);
- vidtv_psi_desc_assign(&tail->descriptor, desc);
+ vidtv_psi_desc_assign(&tail->descriptor,
+ desc);
s = s->next;
}
@@ -246,17 +361,103 @@ vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
}
}
-void vidtv_channel_si_init(struct vidtv_mux *m)
+static void
+vidtv_channel_destroy_service_list(struct vidtv_psi_desc_service_list_entry *e)
+{
+ struct vidtv_psi_desc_service_list_entry *tmp;
+
+ while (e) {
+ tmp = e;
+ e = e->next;
+ kfree(tmp);
+ }
+}
+
+static struct vidtv_psi_desc_service_list_entry
+*vidtv_channel_build_service_list(struct vidtv_psi_table_sdt_service *s)
+{
+ struct vidtv_psi_desc_service_list_entry *curr_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *head_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *prev_e = NULL;
+ struct vidtv_psi_desc *desc = s->descriptor;
+ struct vidtv_psi_desc_service *s_desc;
+
+ while (s) {
+ while (desc) {
+ if (s->descriptor->type != SERVICE_DESCRIPTOR)
+ goto next_desc;
+
+ s_desc = (struct vidtv_psi_desc_service *)desc;
+
+ curr_e = kzalloc(sizeof(*curr_e), GFP_KERNEL);
+ if (!curr_e) {
+ vidtv_channel_destroy_service_list(head_e);
+ return NULL;
+ }
+
+ curr_e->service_id = s->service_id;
+ curr_e->service_type = s_desc->service_type;
+
+ if (!head_e)
+ head_e = curr_e;
+ if (prev_e)
+ prev_e->next = curr_e;
+
+ prev_e = curr_e;
+
+next_desc:
+ desc = desc->next;
+ }
+ s = s->next;
+ }
+ return head_e;
+}
+
+int vidtv_channel_si_init(struct vidtv_mux *m)
{
+ struct vidtv_psi_desc_service_list_entry *service_list = NULL;
struct vidtv_psi_table_pat_program *programs = NULL;
struct vidtv_psi_table_sdt_service *services = NULL;
+ struct vidtv_psi_table_eit_event *events = NULL;
m->si.pat = vidtv_psi_pat_table_init(m->transport_stream_id);
+ if (!m->si.pat)
+ return -ENOMEM;
- m->si.sdt = vidtv_psi_sdt_table_init(m->transport_stream_id);
+ m->si.sdt = vidtv_psi_sdt_table_init(m->network_id,
+ m->transport_stream_id);
+ if (!m->si.sdt)
+ goto free_pat;
programs = vidtv_channel_pat_prog_cat_into_new(m);
+ if (!programs)
+ goto free_sdt;
services = vidtv_channel_sdt_serv_cat_into_new(m);
+ if (!services)
+ goto free_programs;
+
+ events = vidtv_channel_eit_event_cat_into_new(m);
+ if (!events)
+ goto free_services;
+
+ /* look for a service descriptor for every service */
+ service_list = vidtv_channel_build_service_list(services);
+ if (!service_list)
+ goto free_events;
+
+ /* use these descriptors to build the NIT */
+ m->si.nit = vidtv_psi_nit_table_init(m->network_id,
+ m->transport_stream_id,
+ m->network_name,
+ service_list);
+ if (!m->si.nit)
+ goto free_service_list;
+
+ m->si.eit = vidtv_psi_eit_table_init(m->network_id,
+ m->transport_stream_id,
+ programs->service_id);
+ if (!m->si.eit)
+ goto free_nit;
/* assemble all programs and assign to PAT */
vidtv_psi_pat_program_assign(m->si.pat, programs);
@@ -264,31 +465,65 @@ void vidtv_channel_si_init(struct vidtv_mux *m)
/* assemble all services and assign to SDT */
vidtv_psi_sdt_service_assign(m->si.sdt, services);
- m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat, m->pcr_pid);
+ /* assemble all events and assign to EIT */
+ vidtv_psi_eit_event_assign(m->si.eit, events);
+
+ m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat,
+ m->pcr_pid);
+ if (!m->si.pmt_secs)
+ goto free_eit;
vidtv_channel_pmt_match_sections(m->channels,
m->si.pmt_secs,
- m->si.pat->programs);
+ m->si.pat->num_pmt);
+
+ vidtv_channel_destroy_service_list(service_list);
+
+ return 0;
+
+free_eit:
+ vidtv_psi_eit_table_destroy(m->si.eit);
+free_nit:
+ vidtv_psi_nit_table_destroy(m->si.nit);
+free_service_list:
+ vidtv_channel_destroy_service_list(service_list);
+free_events:
+ vidtv_psi_eit_event_destroy(events);
+free_services:
+ vidtv_psi_sdt_service_destroy(services);
+free_programs:
+ vidtv_psi_pat_program_destroy(programs);
+free_sdt:
+ vidtv_psi_sdt_table_destroy(m->si.sdt);
+free_pat:
+ vidtv_psi_pat_table_destroy(m->si.pat);
+ return 0;
}
void vidtv_channel_si_destroy(struct vidtv_mux *m)
{
u32 i;
- u16 num_programs = m->si.pat->programs;
vidtv_psi_pat_table_destroy(m->si.pat);
- for (i = 0; i < num_programs; ++i)
+ for (i = 0; i < m->si.pat->num_pmt; ++i)
vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
kfree(m->si.pmt_secs);
vidtv_psi_sdt_table_destroy(m->si.sdt);
+ vidtv_psi_nit_table_destroy(m->si.nit);
+ vidtv_psi_eit_table_destroy(m->si.eit);
}
-void vidtv_channels_init(struct vidtv_mux *m)
+int vidtv_channels_init(struct vidtv_mux *m)
{
/* this is the place to add new 'channels' for vidtv */
m->channels = vidtv_channel_s302m_init(NULL, m->transport_stream_id);
+
+ if (!m->channels)
+ return -ENOMEM;
+
+ return 0;
}
void vidtv_channels_destroy(struct vidtv_mux *m)
@@ -302,6 +537,7 @@ void vidtv_channels_destroy(struct vidtv_mux *m)
vidtv_psi_pat_program_destroy(curr->program);
vidtv_psi_pmt_stream_destroy(curr->streams);
vidtv_channel_encoder_destroy(curr->encoders);
+ vidtv_psi_eit_event_destroy(curr->events);
tmp = curr;
curr = curr->next;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.h b/drivers/media/test-drivers/vidtv/vidtv_channel.h
index 2c3cba4313b0..fff2e501d375 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.h
@@ -9,6 +9,7 @@
* When vidtv boots, it will create some hardcoded channels.
* Their services will be concatenated to populate the SDT.
* Their programs will be concatenated to populate the PAT
+ * Their events will be concatenated to populate the EIT
* For each program in the PAT, a PMT section will be created
* The PMT section for a channel will be assigned its streams.
* Every stream will have its corresponding encoder polled to produce TS packets
@@ -22,9 +23,10 @@
#define VIDTV_CHANNEL_H
#include <linux/types.h>
-#include "vidtv_psi.h"
+
#include "vidtv_encoder.h"
#include "vidtv_mux.h"
+#include "vidtv_psi.h"
/**
* struct vidtv_channel - A 'channel' abstraction
@@ -37,6 +39,7 @@
* Every stream will have its corresponding encoder polled to produce TS packets
* These packets may be interleaved by the mux and then delivered to the bridge
*
+ * @name: name of the channel
* @transport_stream_id: a number to identify the TS, chosen at will.
* @service: A _single_ service. Will be concatenated into the SDT.
* @program_num: The link between PAT, PMT and SDT.
@@ -44,6 +47,7 @@
* Will be concatenated into the PAT.
* @streams: A stream loop used to populate the PMT section for 'program'
* @encoders: A encoder loop. There must be one encoder for each stream.
+ * @events: Optional event information. This will feed into the EIT.
* @next: Optionally chain this channel.
*/
struct vidtv_channel {
@@ -54,6 +58,7 @@ struct vidtv_channel {
struct vidtv_psi_table_pat_program *program;
struct vidtv_psi_table_pmt_stream *streams;
struct vidtv_encoder *encoders;
+ struct vidtv_psi_table_eit_event *events;
struct vidtv_channel *next;
};
@@ -61,14 +66,14 @@ struct vidtv_channel {
* vidtv_channel_si_init - Init the PSI tables from the channels in the mux
* @m: The mux containing the channels.
*/
-void vidtv_channel_si_init(struct vidtv_mux *m);
+int vidtv_channel_si_init(struct vidtv_mux *m);
void vidtv_channel_si_destroy(struct vidtv_mux *m);
/**
* vidtv_channels_init - Init hardcoded, fake 'channels'.
* @m: The mux to store the channels into.
*/
-void vidtv_channels_init(struct vidtv_mux *m);
+int vidtv_channels_init(struct vidtv_mux *m);
struct vidtv_channel
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id);
void vidtv_channels_destroy(struct vidtv_mux *m);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_common.h b/drivers/media/test-drivers/vidtv/vidtv_common.h
index 818e7f2b9ec5..42f63fdee681 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_common.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_common.h
@@ -16,7 +16,6 @@
#define CLOCK_UNIT_27MHZ 27000000
#define VIDTV_SLEEP_USECS 10000
#define VIDTV_MAX_SLEEP_USECS (2 * VIDTV_SLEEP_USECS)
-#define VIDTV_DEFAULT_TS_ID 0x744
u32 vidtv_memcpy(void *to,
size_t to_offset,
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index eba7fe1a1b48..b7823d97b30d 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
+
#include <media/dvb_frontend.h>
#include "vidtv_demod.h"
@@ -192,7 +193,6 @@ static void vidtv_demod_update_stats(struct dvb_frontend *fe)
c->cnr.stat[0].svalue = state->tuner_cnr;
c->cnr.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
-
}
static int vidtv_demod_read_status(struct dvb_frontend *fe,
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.h b/drivers/media/test-drivers/vidtv/vidtv_demod.h
index 87651b0193e6..2b8404661348 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.h
@@ -12,6 +12,7 @@
#define VIDTV_DEMOD_H
#include <linux/dvb/frontend.h>
+
#include <media/dvb_frontend.h>
/**
@@ -19,6 +20,9 @@
* modulation and fec_inner
* @modulation: see enum fe_modulation
* @fec: see enum fe_fec_rate
+ * @cnr_ok: S/N threshold to consider the signal as OK. Below that, there's
+ * a chance of losing sync.
+ * @cnr_good: S/N threshold to consider the signal strong.
*
* This struct matches values for 'good' and 'ok' CNRs given the combination
* of modulation and fec_inner in use. We might simulate some noise if the
@@ -52,13 +56,8 @@ struct vidtv_demod_config {
* struct vidtv_demod_state - The demodulator state
* @frontend: The frontend structure allocated by the demod.
* @config: The config used to init the demod.
- * @poll_snr: The task responsible for periodically checking the simulated
- * signal quality, eventually dropping or reacquiring the TS lock.
* @status: the demod status.
- * @cold_start: Whether the demod has not been init yet.
- * @poll_snr_thread_running: Whether the task responsible for periodically
- * checking the simulated signal quality is running.
- * @poll_snr_thread_restart: Whether we should restart the poll_snr task.
+ * @tuner_cnr: current S/N ratio for the signal carrier
*/
struct vidtv_demod_state {
struct dvb_frontend frontend;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_encoder.h b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
index 65d81daef4c3..50e3cf4eb4eb 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_encoder.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
@@ -28,7 +28,7 @@ struct vidtv_access_unit {
struct vidtv_access_unit *next;
};
-/* Some musical notes, used by a tone generator */
+/* Some musical notes, used by a tone generator. Values are in Hz */
enum musical_notes {
NOTE_SILENT = 0,
@@ -103,14 +103,16 @@ enum musical_notes {
* @encoder_buf_sz: The encoder buffer size, in bytes
* @encoder_buf_offset: Our byte position in the encoder buffer.
* @sample_count: How many samples we have encoded in total.
+ * @access_units: encoder payload units, used for clock references
* @src_buf: The source of raw data to be encoded, encoder might set a
* default if null.
+ * @src_buf_sz: size of @src_buf.
* @src_buf_offset: Our position in the source buffer.
* @is_video_encoder: Whether this a video encoder (as opposed to audio)
* @ctx: Encoder-specific state.
* @stream_id: Examples: Audio streams (0xc0-0xdf), Video streams
* (0xe0-0xef).
- * @es_id: The TS PID to use for the elementary stream in this encoder.
+ * @es_pid: The TS PID to use for the elementary stream in this encoder.
* @encode: Prepare enough AUs for the given amount of time.
* @clear: Clear the encoder output.
* @sync: Attempt to synchronize with this encoder.
@@ -131,9 +133,6 @@ struct vidtv_encoder {
u32 encoder_buf_offset;
u64 sample_count;
- int last_duration;
- int note_offset;
- enum musical_notes last_tone;
struct vidtv_access_unit *access_units;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
index 082740ae9d44..b51e6a3b8cbe 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
@@ -12,23 +12,23 @@
* Copyright (C) 2020 Daniel W. S. Almeida
*/
-#include <linux/types.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/dev_printk.h>
+#include <linux/math64.h>
#include <linux/ratelimit.h>
-#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/vmalloc.h>
-#include <linux/math64.h>
-#include "vidtv_mux.h"
-#include "vidtv_ts.h"
-#include "vidtv_pes.h"
-#include "vidtv_encoder.h"
#include "vidtv_channel.h"
#include "vidtv_common.h"
+#include "vidtv_encoder.h"
+#include "vidtv_mux.h"
+#include "vidtv_pes.h"
#include "vidtv_psi.h"
+#include "vidtv_ts.h"
static struct vidtv_mux_pid_ctx
*vidtv_mux_get_pid_ctx(struct vidtv_mux *m, u16 pid)
@@ -47,33 +47,56 @@ static struct vidtv_mux_pid_ctx
struct vidtv_mux_pid_ctx *ctx;
ctx = vidtv_mux_get_pid_ctx(m, pid);
-
if (ctx)
- goto end;
+ return ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
ctx->pid = pid;
ctx->cc = 0;
hash_add(m->pid_ctx, &ctx->h, pid);
-end:
return ctx;
}
-static void vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
+static void vidtv_mux_pid_ctx_destroy(struct vidtv_mux *m)
+{
+ struct vidtv_mux_pid_ctx *ctx;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(m->pid_ctx, bkt, tmp, ctx, h) {
+ hash_del(&ctx->h);
+ kfree(ctx);
+ }
+}
+
+static int vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
{
struct vidtv_psi_table_pat_program *p = m->si.pat->program;
u16 pid;
hash_init(m->pid_ctx);
/* push the pcr pid ctx */
- vidtv_mux_create_pid_ctx_once(m, m->pcr_pid);
- /* push the null packet pid ctx */
- vidtv_mux_create_pid_ctx_once(m, TS_NULL_PACKET_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, m->pcr_pid))
+ return -ENOMEM;
+ /* push the NULL packet pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, TS_NULL_PACKET_PID))
+ goto free;
/* push the PAT pid ctx */
- vidtv_mux_create_pid_ctx_once(m, VIDTV_PAT_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_PAT_PID))
+ goto free;
/* push the SDT pid ctx */
- vidtv_mux_create_pid_ctx_once(m, VIDTV_SDT_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_SDT_PID))
+ goto free;
+ /* push the NIT pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_NIT_PID))
+ goto free;
+ /* push the EIT pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_EIT_PID))
+ goto free;
/* add a ctx for all PMT sections */
while (p) {
@@ -81,18 +104,12 @@ static void vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
vidtv_mux_create_pid_ctx_once(m, pid);
p = p->next;
}
-}
-static void vidtv_mux_pid_ctx_destroy(struct vidtv_mux *m)
-{
- int bkt;
- struct vidtv_mux_pid_ctx *ctx;
- struct hlist_node *tmp;
+ return 0;
- hash_for_each_safe(m->pid_ctx, bkt, tmp, ctx, h) {
- hash_del(&ctx->h);
- kfree(ctx);
- }
+free:
+ vidtv_mux_pid_ctx_destroy(m);
+ return -ENOMEM;
}
static void vidtv_mux_update_clk(struct vidtv_mux *m)
@@ -112,32 +129,53 @@ static void vidtv_mux_update_clk(struct vidtv_mux *m)
static u32 vidtv_mux_push_si(struct vidtv_mux *m)
{
+ struct vidtv_psi_pat_write_args pat_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .pat = m->si.pat,
+ };
+ struct vidtv_psi_pmt_write_args pmt_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .pcr_pid = m->pcr_pid,
+ };
+ struct vidtv_psi_sdt_write_args sdt_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .sdt = m->si.sdt,
+ };
+ struct vidtv_psi_nit_write_args nit_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .nit = m->si.nit,
+
+ };
+ struct vidtv_psi_eit_write_args eit_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .eit = m->si.eit,
+ };
u32 initial_offset = m->mux_buf_offset;
-
struct vidtv_mux_pid_ctx *pat_ctx;
struct vidtv_mux_pid_ctx *pmt_ctx;
struct vidtv_mux_pid_ctx *sdt_ctx;
-
- struct vidtv_psi_pat_write_args pat_args = {};
- struct vidtv_psi_pmt_write_args pmt_args = {};
- struct vidtv_psi_sdt_write_args sdt_args = {};
-
- u32 nbytes; /* the number of bytes written by this function */
+ struct vidtv_mux_pid_ctx *nit_ctx;
+ struct vidtv_mux_pid_ctx *eit_ctx;
+ u32 nbytes;
u16 pmt_pid;
u32 i;
pat_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_PAT_PID);
sdt_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_SDT_PID);
+ nit_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_NIT_PID);
+ eit_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_EIT_PID);
- pat_args.buf = m->mux_buf;
pat_args.offset = m->mux_buf_offset;
- pat_args.pat = m->si.pat;
- pat_args.buf_sz = m->mux_buf_sz;
pat_args.continuity_counter = &pat_ctx->cc;
- m->mux_buf_offset += vidtv_psi_pat_write_into(pat_args);
+ m->mux_buf_offset += vidtv_psi_pat_write_into(&pat_args);
- for (i = 0; i < m->si.pat->programs; ++i) {
+ for (i = 0; i < m->si.pat->num_pmt; ++i) {
pmt_pid = vidtv_psi_pmt_get_pid(m->si.pmt_secs[i],
m->si.pat);
@@ -149,25 +187,29 @@ static u32 vidtv_mux_push_si(struct vidtv_mux *m)
pmt_ctx = vidtv_mux_get_pid_ctx(m, pmt_pid);
- pmt_args.buf = m->mux_buf;
pmt_args.offset = m->mux_buf_offset;
pmt_args.pmt = m->si.pmt_secs[i];
pmt_args.pid = pmt_pid;
- pmt_args.buf_sz = m->mux_buf_sz;
pmt_args.continuity_counter = &pmt_ctx->cc;
- pmt_args.pcr_pid = m->pcr_pid;
/* write each section into buffer */
- m->mux_buf_offset += vidtv_psi_pmt_write_into(pmt_args);
+ m->mux_buf_offset += vidtv_psi_pmt_write_into(&pmt_args);
}
- sdt_args.buf = m->mux_buf;
sdt_args.offset = m->mux_buf_offset;
- sdt_args.sdt = m->si.sdt;
- sdt_args.buf_sz = m->mux_buf_sz;
sdt_args.continuity_counter = &sdt_ctx->cc;
- m->mux_buf_offset += vidtv_psi_sdt_write_into(sdt_args);
+ m->mux_buf_offset += vidtv_psi_sdt_write_into(&sdt_args);
+
+ nit_args.offset = m->mux_buf_offset;
+ nit_args.continuity_counter = &nit_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_nit_write_into(&nit_args);
+
+ eit_args.offset = m->mux_buf_offset;
+ eit_args.continuity_counter = &eit_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_eit_write_into(&eit_args);
nbytes = m->mux_buf_offset - initial_offset;
@@ -230,23 +272,29 @@ static bool vidtv_mux_should_push_si(struct vidtv_mux *m)
static u32 vidtv_mux_packetize_access_units(struct vidtv_mux *m,
struct vidtv_encoder *e)
{
- u32 nbytes = 0;
-
- struct pes_write_args args = {};
- u32 initial_offset = m->mux_buf_offset;
+ struct pes_write_args args = {
+ .dest_buf = m->mux_buf,
+ .dest_buf_sz = m->mux_buf_sz,
+ .pid = be16_to_cpu(e->es_pid),
+ .encoder_id = e->id,
+ .stream_id = be16_to_cpu(e->stream_id),
+ .send_pts = true, /* forbidden value '01'... */
+ .send_dts = false, /* ...for PTS_DTS flags */
+ };
struct vidtv_access_unit *au = e->access_units;
-
+ u32 initial_offset = m->mux_buf_offset;
+ struct vidtv_mux_pid_ctx *pid_ctx;
+ u32 nbytes = 0;
u8 *buf = NULL;
- struct vidtv_mux_pid_ctx *pid_ctx = vidtv_mux_create_pid_ctx_once(m,
- be16_to_cpu(e->es_pid));
- args.dest_buf = m->mux_buf;
- args.dest_buf_sz = m->mux_buf_sz;
- args.pid = be16_to_cpu(e->es_pid);
- args.encoder_id = e->id;
+ /* see SMPTE 302M clause 6.4 */
+ if (args.encoder_id == S302M) {
+ args.send_dts = false;
+ args.send_pts = true;
+ }
+
+ pid_ctx = vidtv_mux_create_pid_ctx_once(m, be16_to_cpu(e->es_pid));
args.continuity_counter = &pid_ctx->cc;
- args.stream_id = be16_to_cpu(e->stream_id);
- args.send_pts = true;
while (au) {
buf = e->encoder_buf + au->offset;
@@ -256,7 +304,7 @@ static u32 vidtv_mux_packetize_access_units(struct vidtv_mux *m,
args.pts = au->pts;
args.pcr = m->timing.clk;
- m->mux_buf_offset += vidtv_pes_write_into(args);
+ m->mux_buf_offset += vidtv_pes_write_into(&args);
au = au->next;
}
@@ -273,10 +321,10 @@ static u32 vidtv_mux_packetize_access_units(struct vidtv_mux *m,
static u32 vidtv_mux_poll_encoders(struct vidtv_mux *m)
{
- u32 nbytes = 0;
- u32 au_nbytes;
struct vidtv_channel *cur_chnl = m->channels;
struct vidtv_encoder *e = NULL;
+ u32 nbytes = 0;
+ u32 au_nbytes;
while (cur_chnl) {
e = cur_chnl->encoders;
@@ -300,18 +348,19 @@ static u32 vidtv_mux_poll_encoders(struct vidtv_mux *m)
static u32 vidtv_mux_pad_with_nulls(struct vidtv_mux *m, u32 npkts)
{
- struct null_packet_write_args args = {};
+ struct null_packet_write_args args = {
+ .dest_buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .dest_offset = m->mux_buf_offset,
+ };
u32 initial_offset = m->mux_buf_offset;
- u32 nbytes; /* the number of bytes written by this function */
- u32 i;
struct vidtv_mux_pid_ctx *ctx;
+ u32 nbytes;
+ u32 i;
ctx = vidtv_mux_get_pid_ctx(m, TS_NULL_PACKET_PID);
- args.dest_buf = m->mux_buf;
- args.buf_sz = m->mux_buf_sz;
args.continuity_counter = &ctx->cc;
- args.dest_offset = m->mux_buf_offset;
for (i = 0; i < npkts; ++i) {
m->mux_buf_offset += vidtv_ts_null_write_into(args);
@@ -343,9 +392,9 @@ static void vidtv_mux_tick(struct work_struct *work)
struct vidtv_mux,
mpeg_thread);
struct dtv_frontend_properties *c = &m->fe->dtv_property_cache;
+ u32 tot_bits = 0;
u32 nbytes;
u32 npkts;
- u32 tot_bits = 0;
while (m->streaming) {
nbytes = 0;
@@ -427,40 +476,62 @@ void vidtv_mux_stop_thread(struct vidtv_mux *m)
struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
struct device *dev,
- struct vidtv_mux_init_args args)
+ struct vidtv_mux_init_args *args)
{
- struct vidtv_mux *m = kzalloc(sizeof(*m), GFP_KERNEL);
+ struct vidtv_mux *m;
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
m->dev = dev;
m->fe = fe;
- m->timing.pcr_period_usecs = args.pcr_period_usecs;
- m->timing.si_period_usecs = args.si_period_usecs;
+ m->timing.pcr_period_usecs = args->pcr_period_usecs;
+ m->timing.si_period_usecs = args->si_period_usecs;
+
+ m->mux_rate_kbytes_sec = args->mux_rate_kbytes_sec;
- m->mux_rate_kbytes_sec = args.mux_rate_kbytes_sec;
+ m->on_new_packets_available_cb = args->on_new_packets_available_cb;
- m->on_new_packets_available_cb = args.on_new_packets_available_cb;
+ m->mux_buf = vzalloc(args->mux_buf_sz);
+ if (!m->mux_buf)
+ goto free_mux;
- m->mux_buf = vzalloc(args.mux_buf_sz);
- m->mux_buf_sz = args.mux_buf_sz;
+ m->mux_buf_sz = args->mux_buf_sz;
- m->pcr_pid = args.pcr_pid;
- m->transport_stream_id = args.transport_stream_id;
- m->priv = args.priv;
+ m->pcr_pid = args->pcr_pid;
+ m->transport_stream_id = args->transport_stream_id;
+ m->priv = args->priv;
+ m->network_id = args->network_id;
+ m->network_name = kstrdup(args->network_name, GFP_KERNEL);
m->timing.current_jiffies = get_jiffies_64();
- if (args.channels)
- m->channels = args.channels;
+ if (args->channels)
+ m->channels = args->channels;
else
- vidtv_channels_init(m);
+ if (vidtv_channels_init(m) < 0)
+ goto free_mux_buf;
/* will alloc data for pmt_sections after initializing pat */
- vidtv_channel_si_init(m);
+ if (vidtv_channel_si_init(m) < 0)
+ goto free_channels;
INIT_WORK(&m->mpeg_thread, vidtv_mux_tick);
- vidtv_mux_pid_ctx_init(m);
+ if (vidtv_mux_pid_ctx_init(m) < 0)
+ goto free_channel_si;
return m;
+
+free_channel_si:
+ vidtv_channel_si_destroy(m);
+free_channels:
+ vidtv_channels_destroy(m);
+free_mux_buf:
+ vfree(m->mux_buf);
+free_mux:
+ kfree(m);
+ return NULL;
}
void vidtv_mux_destroy(struct vidtv_mux *m)
@@ -469,6 +540,7 @@ void vidtv_mux_destroy(struct vidtv_mux *m)
vidtv_mux_pid_ctx_destroy(m);
vidtv_channel_si_destroy(m);
vidtv_channels_destroy(m);
+ kfree(m->network_name);
vfree(m->mux_buf);
kfree(m);
}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.h b/drivers/media/test-drivers/vidtv/vidtv_mux.h
index 2caa60623e97..ad82eb72b841 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_mux.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.h
@@ -15,9 +15,10 @@
#ifndef VIDTV_MUX_H
#define VIDTV_MUX_H
-#include <linux/types.h>
#include <linux/hashtable.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
+
#include <media/dvb_frontend.h>
#include "vidtv_psi.h"
@@ -58,12 +59,16 @@ struct vidtv_mux_timing {
* @pat: The PAT in use by the muxer.
* @pmt_secs: The PMT sections in use by the muxer. One for each program in the PAT.
* @sdt: The SDT in use by the muxer.
+ * @nit: The NIT in use by the muxer.
+ * @eit: the EIT in use by the muxer.
*/
struct vidtv_mux_si {
/* the SI tables */
struct vidtv_psi_table_pat *pat;
struct vidtv_psi_table_pmt **pmt_secs; /* the PMT sections */
struct vidtv_psi_table_sdt *sdt;
+ struct vidtv_psi_table_nit *nit;
+ struct vidtv_psi_table_eit *eit;
};
/**
@@ -82,8 +87,10 @@ struct vidtv_mux_pid_ctx {
/**
* struct vidtv_mux - A muxer abstraction loosely based in libavcodec/mpegtsenc.c
- * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
+ * @fe: The frontend structure allocated by the muxer.
+ * @dev: pointer to struct device.
* @timing: Keeps track of timing related information.
+ * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
* @pid_ctx: A hash table to keep track of per-PID metadata.
* @on_new_packets_available_cb: A callback to inform of new TS packets ready.
* @mux_buf: A pointer to a buffer for this muxer. TS packets are stored there
@@ -99,6 +106,8 @@ struct vidtv_mux_pid_ctx {
* @pcr_pid: The TS PID used for the PSI packets. All channels will share the
* same PCR.
* @transport_stream_id: The transport stream ID
+ * @network_id: The network ID
+ * @network_name: The network name
* @priv: Private data.
*/
struct vidtv_mux {
@@ -128,6 +137,8 @@ struct vidtv_mux {
u16 pcr_pid;
u16 transport_stream_id;
+ u16 network_id;
+ char *network_name;
void *priv;
};
@@ -142,6 +153,8 @@ struct vidtv_mux {
* same PCR.
* @transport_stream_id: The transport stream ID
* @channels: an optional list of channels to use
+ * @network_id: The network ID
+ * @network_name: The network name
* @priv: Private data.
*/
struct vidtv_mux_init_args {
@@ -153,12 +166,14 @@ struct vidtv_mux_init_args {
u16 pcr_pid;
u16 transport_stream_id;
struct vidtv_channel *channels;
+ u16 network_id;
+ char *network_name;
void *priv;
};
struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
struct device *dev,
- struct vidtv_mux_init_args args);
+ struct vidtv_mux_init_args *args);
void vidtv_mux_destroy(struct vidtv_mux *m);
void vidtv_mux_start_thread(struct vidtv_mux *m);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.c b/drivers/media/test-drivers/vidtv/vidtv_pes.c
index 1c75f88070e9..782e5e7fbb02 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_pes.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
-#include <asm/byteorder.h>
#include "vidtv_pes.h"
#include "vidtv_common.h"
@@ -57,7 +56,7 @@ static u32 vidtv_pes_h_get_len(bool send_pts, bool send_dts)
return len;
}
-static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args args)
+static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args *args)
{
/*
* This is a fixed 8-bit value equal to '0xFF' that can be inserted
@@ -65,20 +64,20 @@ static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args args)
* It is discarded by the decoder. No more than 32 stuffing bytes shall
* be present in one PES packet header.
*/
- if (args.n_pes_h_s_bytes > PES_HEADER_MAX_STUFFING_BYTES) {
+ if (args->n_pes_h_s_bytes > PES_HEADER_MAX_STUFFING_BYTES) {
pr_warn_ratelimited("More than %d stuffing bytes in PES packet header\n",
PES_HEADER_MAX_STUFFING_BYTES);
- args.n_pes_h_s_bytes = PES_HEADER_MAX_STUFFING_BYTES;
+ args->n_pes_h_s_bytes = PES_HEADER_MAX_STUFFING_BYTES;
}
- return vidtv_memset(args.dest_buf,
- args.dest_offset,
- args.dest_buf_sz,
+ return vidtv_memset(args->dest_buf,
+ args->dest_offset,
+ args->dest_buf_sz,
TS_FILL_BYTE,
- args.n_pes_h_s_bytes);
+ args->n_pes_h_s_bytes);
}
-static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args args)
+static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args *args)
{
u32 nbytes = 0; /* the number of bytes written by this function */
@@ -90,7 +89,7 @@ static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args args)
u64 mask2;
u64 mask3;
- if (!args.send_pts && args.send_dts)
+ if (!args->send_pts && args->send_dts)
return 0;
mask1 = GENMASK_ULL(32, 30);
@@ -98,80 +97,81 @@ static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args args)
mask3 = GENMASK_ULL(14, 0);
/* see ISO/IEC 13818-1 : 2000 p. 32 */
- if (args.send_pts && args.send_dts) {
- pts_dts.pts1 = (0x3 << 4) | ((args.pts & mask1) >> 29) | 0x1;
- pts_dts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
- pts_dts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+ if (args->send_pts && args->send_dts) {
+ pts_dts.pts1 = (0x3 << 4) | ((args->pts & mask1) >> 29) | 0x1;
+ pts_dts.pts2 = cpu_to_be16(((args->pts & mask2) >> 14) | 0x1);
+ pts_dts.pts3 = cpu_to_be16(((args->pts & mask3) << 1) | 0x1);
- pts_dts.dts1 = (0x1 << 4) | ((args.dts & mask1) >> 29) | 0x1;
- pts_dts.dts2 = cpu_to_be16(((args.dts & mask2) >> 14) | 0x1);
- pts_dts.dts3 = cpu_to_be16(((args.dts & mask3) << 1) | 0x1);
+ pts_dts.dts1 = (0x1 << 4) | ((args->dts & mask1) >> 29) | 0x1;
+ pts_dts.dts2 = cpu_to_be16(((args->dts & mask2) >> 14) | 0x1);
+ pts_dts.dts3 = cpu_to_be16(((args->dts & mask3) << 1) | 0x1);
op = &pts_dts;
op_sz = sizeof(pts_dts);
- } else if (args.send_pts) {
- pts.pts1 = (0x1 << 5) | ((args.pts & mask1) >> 29) | 0x1;
- pts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
- pts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+ } else if (args->send_pts) {
+ pts.pts1 = (0x1 << 5) | ((args->pts & mask1) >> 29) | 0x1;
+ pts.pts2 = cpu_to_be16(((args->pts & mask2) >> 14) | 0x1);
+ pts.pts3 = cpu_to_be16(((args->pts & mask3) << 1) | 0x1);
op = &pts;
op_sz = sizeof(pts);
}
/* copy PTS/DTS optional */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
op,
op_sz);
return nbytes;
}
-static u32 vidtv_pes_write_h(struct pes_header_write_args args)
+static u32 vidtv_pes_write_h(struct pes_header_write_args *args)
{
u32 nbytes = 0; /* the number of bytes written by this function */
struct vidtv_mpeg_pes pes_header = {};
struct vidtv_pes_optional pes_optional = {};
- struct pes_header_write_args pts_dts_args = args;
- u32 stream_id = (args.encoder_id == S302M) ? PRIVATE_STREAM_1_ID : args.stream_id;
+ struct pes_header_write_args pts_dts_args;
+ u32 stream_id = (args->encoder_id == S302M) ? PRIVATE_STREAM_1_ID : args->stream_id;
u16 pes_opt_bitfield = 0x01 << 15;
pes_header.bitfield = cpu_to_be32((PES_START_CODE_PREFIX << 8) | stream_id);
- pes_header.length = cpu_to_be16(vidtv_pes_op_get_len(args.send_pts,
- args.send_dts) +
- args.access_unit_len);
+ pes_header.length = cpu_to_be16(vidtv_pes_op_get_len(args->send_pts,
+ args->send_dts) +
+ args->access_unit_len);
- if (args.send_pts && args.send_dts)
+ if (args->send_pts && args->send_dts)
pes_opt_bitfield |= (0x3 << 6);
- else if (args.send_pts)
+ else if (args->send_pts)
pes_opt_bitfield |= (0x1 << 7);
pes_optional.bitfield = cpu_to_be16(pes_opt_bitfield);
- pes_optional.length = vidtv_pes_op_get_len(args.send_pts, args.send_dts) +
- args.n_pes_h_s_bytes -
+ pes_optional.length = vidtv_pes_op_get_len(args->send_pts, args->send_dts) +
+ args->n_pes_h_s_bytes -
sizeof(struct vidtv_pes_optional);
/* copy header */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&pes_header,
sizeof(pes_header));
/* copy optional header bits */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&pes_optional,
sizeof(pes_optional));
/* copy the timing information */
- pts_dts_args.dest_offset = args.dest_offset + nbytes;
- nbytes += vidtv_pes_write_pts_dts(pts_dts_args);
+ pts_dts_args = *args;
+ pts_dts_args.dest_offset = args->dest_offset + nbytes;
+ nbytes += vidtv_pes_write_pts_dts(&pts_dts_args);
/* write any PES header stuffing */
nbytes += vidtv_pes_write_header_stuffing(args);
@@ -300,14 +300,31 @@ static u32 vidtv_pes_write_ts_h(struct pes_ts_header_write_args args,
return nbytes;
}
-u32 vidtv_pes_write_into(struct pes_write_args args)
+u32 vidtv_pes_write_into(struct pes_write_args *args)
{
- u32 unaligned_bytes = (args.dest_offset % TS_PACKET_LEN);
- struct pes_ts_header_write_args ts_header_args = {};
- struct pes_header_write_args pes_header_args = {};
- u32 remaining_len = args.access_unit_len;
+ u32 unaligned_bytes = (args->dest_offset % TS_PACKET_LEN);
+ struct pes_ts_header_write_args ts_header_args = {
+ .dest_buf = args->dest_buf,
+ .dest_buf_sz = args->dest_buf_sz,
+ .pid = args->pid,
+ .pcr = args->pcr,
+ .continuity_counter = args->continuity_counter,
+ };
+ struct pes_header_write_args pes_header_args = {
+ .dest_buf = args->dest_buf,
+ .dest_buf_sz = args->dest_buf_sz,
+ .encoder_id = args->encoder_id,
+ .send_pts = args->send_pts,
+ .pts = args->pts,
+ .send_dts = args->send_dts,
+ .dts = args->dts,
+ .stream_id = args->stream_id,
+ .n_pes_h_s_bytes = args->n_pes_h_s_bytes,
+ .access_unit_len = args->access_unit_len,
+ };
+ u32 remaining_len = args->access_unit_len;
bool wrote_pes_header = false;
- u64 last_pcr = args.pcr;
+ u64 last_pcr = args->pcr;
bool need_pcr = true;
u32 available_space;
u32 payload_size;
@@ -318,25 +335,13 @@ u32 vidtv_pes_write_into(struct pes_write_args args)
pr_warn_ratelimited("buffer is misaligned, while starting PES\n");
/* forcibly align and hope for the best */
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - unaligned_bytes);
}
- if (args.send_dts && !args.send_pts) {
- pr_warn_ratelimited("forbidden value '01' for PTS_DTS flags\n");
- args.send_pts = true;
- args.pts = args.dts;
- }
-
- /* see SMPTE 302M clause 6.4 */
- if (args.encoder_id == S302M) {
- args.send_dts = false;
- args.send_pts = true;
- }
-
while (remaining_len) {
available_space = TS_PAYLOAD_LEN;
/*
@@ -345,14 +350,14 @@ u32 vidtv_pes_write_into(struct pes_write_args args)
* the space needed for the TS header _and_ for the PES header
*/
if (!wrote_pes_header)
- available_space -= vidtv_pes_h_get_len(args.send_pts,
- args.send_dts);
+ available_space -= vidtv_pes_h_get_len(args->send_pts,
+ args->send_dts);
/*
* if the encoder has inserted stuffing bytes in the PES
* header, account for them.
*/
- available_space -= args.n_pes_h_s_bytes;
+ available_space -= args->n_pes_h_s_bytes;
/* Take the extra adaptation into account if need to send PCR */
if (need_pcr) {
@@ -387,14 +392,9 @@ u32 vidtv_pes_write_into(struct pes_write_args args)
}
/* write ts header */
- ts_header_args.dest_buf = args.dest_buf;
- ts_header_args.dest_offset = args.dest_offset + nbytes;
- ts_header_args.dest_buf_sz = args.dest_buf_sz;
- ts_header_args.pid = args.pid;
- ts_header_args.pcr = args.pcr;
- ts_header_args.continuity_counter = args.continuity_counter;
- ts_header_args.wrote_pes_header = wrote_pes_header;
- ts_header_args.n_stuffing_bytes = stuff_bytes;
+ ts_header_args.dest_offset = args->dest_offset + nbytes;
+ ts_header_args.wrote_pes_header = wrote_pes_header;
+ ts_header_args.n_stuffing_bytes = stuff_bytes;
nbytes += vidtv_pes_write_ts_h(ts_header_args, need_pcr,
&last_pcr);
@@ -403,33 +403,20 @@ u32 vidtv_pes_write_into(struct pes_write_args args)
if (!wrote_pes_header) {
/* write the PES header only once */
- pes_header_args.dest_buf = args.dest_buf;
-
- pes_header_args.dest_offset = args.dest_offset +
- nbytes;
-
- pes_header_args.dest_buf_sz = args.dest_buf_sz;
- pes_header_args.encoder_id = args.encoder_id;
- pes_header_args.send_pts = args.send_pts;
- pes_header_args.pts = args.pts;
- pes_header_args.send_dts = args.send_dts;
- pes_header_args.dts = args.dts;
- pes_header_args.stream_id = args.stream_id;
- pes_header_args.n_pes_h_s_bytes = args.n_pes_h_s_bytes;
- pes_header_args.access_unit_len = args.access_unit_len;
-
- nbytes += vidtv_pes_write_h(pes_header_args);
- wrote_pes_header = true;
+ pes_header_args.dest_offset = args->dest_offset +
+ nbytes;
+ nbytes += vidtv_pes_write_h(&pes_header_args);
+ wrote_pes_header = true;
}
/* write as much of the payload as we possibly can */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
- args.from,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
+ args->from,
payload_size);
- args.from += payload_size;
+ args->from += payload_size;
remaining_len -= payload_size;
}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.h b/drivers/media/test-drivers/vidtv/vidtv_pes.h
index 0ea9e863024d..963c59155e72 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_pes.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.h
@@ -14,7 +14,6 @@
#ifndef VIDTV_PES_H
#define VIDTV_PES_H
-#include <asm/byteorder.h>
#include <linux/types.h>
#include "vidtv_common.h"
@@ -114,8 +113,10 @@ struct pes_header_write_args {
* @dest_buf_sz: The size of the dest_buffer
* @pid: The PID to use for the TS packets.
* @continuity_counter: Incremented on every new TS packet.
- * @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
+ * @wrote_pes_header: Flag to indicate that the PES header was written
+ * @n_stuffing_bytes: Padding bytes. Might be used by an encoder if needed, gets
* discarded by the decoder.
+ * @pcr: counter driven by a 27Mhz clock.
*/
struct pes_ts_header_write_args {
void *dest_buf;
@@ -146,6 +147,7 @@ struct pes_ts_header_write_args {
* @dts: DTS value to send.
* @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
* discarded by the decoder.
+ * @pcr: counter driven by a 27Mhz clock.
*/
struct pes_write_args {
void *dest_buf;
@@ -186,6 +188,6 @@ struct pes_write_args {
* equal to the size of the access unit, since we need space for PES headers, TS headers
* and padding bytes, if any.
*/
-u32 vidtv_pes_write_into(struct pes_write_args args);
+u32 vidtv_pes_write_into(struct pes_write_args *args);
#endif // VIDTV_PES_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
index 82cf67dd27c0..4511a2a98405 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -6,31 +6,31 @@
* technically be broken into one or more sections, we do not do this here,
* hence 'table' and 'section' are interchangeable for vidtv.
*
- * This code currently supports three tables: PAT, PMT and SDT. These are the
- * bare minimum to get userspace to recognize our MPEG transport stream. It can
- * be extended to support more PSI tables in the future.
- *
* Copyright (C) 2020 Daniel W. S. Almeida
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
+#include <linux/bcd.h>
#include <linux/crc32.h>
-#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#include <asm/byteorder.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
-#include "vidtv_psi.h"
#include "vidtv_common.h"
+#include "vidtv_psi.h"
#include "vidtv_ts.h"
#define CRC_SIZE_IN_BYTES 4
#define MAX_VERSION_NUM 32
+#define INITIAL_CRC 0xffffffff
+#define ISO_LANGUAGE_CODE_LEN 3
static const u32 CRC_LUT[256] = {
/* from libdvbv5 */
@@ -79,7 +79,7 @@ static const u32 CRC_LUT[256] = {
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
};
-static inline u32 dvb_crc32(u32 crc, u8 *data, u32 len)
+static u32 dvb_crc32(u32 crc, u8 *data, u32 len)
{
/* from libdvbv5 */
while (len--)
@@ -92,40 +92,7 @@ static void vidtv_psi_update_version_num(struct vidtv_psi_table_header *h)
h->version++;
}
-static inline u16 vidtv_psi_sdt_serv_get_desc_loop_len(struct vidtv_psi_table_sdt_service *s)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(11, 0);
-
- ret = be16_to_cpu(s->bitfield) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_pmt_stream_get_desc_loop_len(struct vidtv_psi_table_pmt_stream *s)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(9, 0);
-
- ret = be16_to_cpu(s->bitfield2) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_pmt_get_desc_loop_len(struct vidtv_psi_table_pmt *p)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(9, 0);
-
- ret = be16_to_cpu(p->bitfield2) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
+static u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
{
u16 mask;
u16 ret;
@@ -136,7 +103,7 @@ static inline u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
return ret;
}
-inline u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
+u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
{
u16 mask;
u16 ret;
@@ -147,7 +114,7 @@ inline u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
return ret;
}
-inline u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
+u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
{
u16 mask;
u16 ret;
@@ -158,10 +125,11 @@ inline u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *
return ret;
}
-static inline void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len, u8 desc_len_nbits)
+static void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len,
+ u8 desc_len_nbits)
{
- u16 mask;
__be16 new;
+ u16 mask;
mask = GENMASK(15, desc_len_nbits);
@@ -188,90 +156,81 @@ static void vidtv_psi_set_sec_len(struct vidtv_psi_table_header *h, u16 new_len)
h->bitfield = new;
}
-static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args args)
+/*
+ * Packetize PSI sections into TS packets:
+ * push a TS header (4bytes) every 184 bytes
+ * manage the continuity_counter
+ * add stuffing (i.e. padding bytes) after the CRC
+ */
+static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args *args)
{
- /*
- * Packetize PSI sections into TS packets:
- * push a TS header (4bytes) every 184 bytes
- * manage the continuity_counter
- * add stuffing (i.e. padding bytes) after the CRC
- */
-
- u32 nbytes_past_boundary = (args.dest_offset % TS_PACKET_LEN);
+ struct vidtv_mpeg_ts ts_header = {
+ .sync_byte = TS_SYNC_BYTE,
+ .bitfield = cpu_to_be16((args->new_psi_section << 14) | args->pid),
+ .scrambling = 0,
+ .payload = 1,
+ .adaptation_field = 0, /* no adaptation field */
+ };
+ u32 nbytes_past_boundary = (args->dest_offset % TS_PACKET_LEN);
bool aligned = (nbytes_past_boundary == 0);
- struct vidtv_mpeg_ts ts_header = {};
-
- /* number of bytes written by this function */
- u32 nbytes = 0;
- /* how much there is left to write */
- u32 remaining_len = args.len;
- /* how much we can be written in this packet */
+ u32 remaining_len = args->len;
u32 payload_write_len = 0;
- /* where we are in the source */
u32 payload_offset = 0;
+ u32 nbytes = 0;
- const u16 PAYLOAD_START = args.new_psi_section;
-
- if (!args.crc && !args.is_crc)
+ if (!args->crc && !args->is_crc)
pr_warn_ratelimited("Missing CRC for chunk\n");
- if (args.crc)
- *args.crc = dvb_crc32(*args.crc, args.from, args.len);
+ if (args->crc)
+ *args->crc = dvb_crc32(*args->crc, args->from, args->len);
- if (args.new_psi_section && !aligned) {
+ if (args->new_psi_section && !aligned) {
pr_warn_ratelimited("Cannot write a new PSI section in a misaligned buffer\n");
/* forcibly align and hope for the best */
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - nbytes_past_boundary);
}
while (remaining_len) {
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
aligned = (nbytes_past_boundary == 0);
if (aligned) {
/* if at a packet boundary, write a new TS header */
- ts_header.sync_byte = TS_SYNC_BYTE;
- ts_header.bitfield = cpu_to_be16((PAYLOAD_START << 14) | args.pid);
- ts_header.scrambling = 0;
- ts_header.continuity_counter = *args.continuity_counter;
- ts_header.payload = 1;
- /* no adaptation field */
- ts_header.adaptation_field = 0;
-
- /* copy the header */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ ts_header.continuity_counter = *args->continuity_counter;
+
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&ts_header,
sizeof(ts_header));
/*
* This will trigger a discontinuity if the buffer is full,
* effectively dropping the packet.
*/
- vidtv_ts_inc_cc(args.continuity_counter);
+ vidtv_ts_inc_cc(args->continuity_counter);
}
/* write the pointer_field in the first byte of the payload */
- if (args.new_psi_section)
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ if (args->new_psi_section)
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
0x0,
1);
/* write as much of the payload as possible */
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
payload_write_len = min(TS_PACKET_LEN - nbytes_past_boundary, remaining_len);
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
- args.from + payload_offset,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
+ args->from + payload_offset,
payload_write_len);
/* 'payload_write_len' written from a total of 'len' requested*/
@@ -283,37 +242,45 @@ static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args args)
* fill the rest of the packet if there is any remaining space unused
*/
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
- if (args.is_crc)
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ if (args->is_crc)
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - nbytes_past_boundary);
return nbytes;
}
-static u32 table_section_crc32_write_into(struct crc32_write_args args)
+static u32 table_section_crc32_write_into(struct crc32_write_args *args)
{
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = &args->crc,
+ .len = CRC_SIZE_IN_BYTES,
+ .dest_offset = args->dest_offset,
+ .pid = args->pid,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = true,
+ .dest_buf_sz = args->dest_buf_sz,
+ };
+
/* the CRC is the last entry in the section */
- u32 nbytes = 0;
- struct psi_write_args psi_args = {};
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = &args.crc;
- psi_args.len = CRC_SIZE_IN_BYTES;
- psi_args.dest_offset = args.dest_offset;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = true;
- psi_args.dest_buf_sz = args.dest_buf_sz;
+ return vidtv_psi_ts_psi_write_into(&psi_args);
+}
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+static void vidtv_psi_desc_chain(struct vidtv_psi_desc *head, struct vidtv_psi_desc *desc)
+{
+ if (head) {
+ while (head->next)
+ head = head->next;
- return nbytes;
+ head->next = desc;
+ }
}
struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc *head,
@@ -326,6 +293,8 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
u32 provider_name_len = provider_name ? strlen(provider_name) : 0;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
desc->type = SERVICE_DESCRIPTOR;
@@ -347,12 +316,7 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
if (provider_name && provider_name_len)
desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
- if (head) {
- while (head->next)
- head = head->next;
-
- head->next = (struct vidtv_psi_desc *)desc;
- }
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
}
@@ -365,6 +329,8 @@ struct vidtv_psi_desc_registration
struct vidtv_psi_desc_registration *desc;
desc = kzalloc(sizeof(*desc) + sizeof(format_id) + additional_info_len, GFP_KERNEL);
+ if (!desc)
+ return NULL;
desc->type = REGISTRATION_DESCRIPTOR;
@@ -378,44 +344,178 @@ struct vidtv_psi_desc_registration
additional_ident_info,
additional_info_len);
- if (head) {
- while (head->next)
- head = head->next;
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
- head->next = (struct vidtv_psi_desc *)desc;
+struct vidtv_psi_desc_network_name
+*vidtv_psi_network_name_desc_init(struct vidtv_psi_desc *head, char *network_name)
+{
+ u32 network_name_len = network_name ? strlen(network_name) : 0;
+ struct vidtv_psi_desc_network_name *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = NETWORK_NAME_DESCRIPTOR;
+
+ desc->length = network_name_len;
+
+ if (network_name && network_name_len)
+ desc->network_name = kstrdup(network_name, GFP_KERNEL);
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
+
+struct vidtv_psi_desc_service_list
+*vidtv_psi_service_list_desc_init(struct vidtv_psi_desc *head,
+ struct vidtv_psi_desc_service_list_entry *entry)
+{
+ struct vidtv_psi_desc_service_list_entry *curr_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *head_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *prev_e = NULL;
+ struct vidtv_psi_desc_service_list *desc;
+ u16 length = 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = SERVICE_LIST_DESCRIPTOR;
+
+ while (entry) {
+ curr_e = kzalloc(sizeof(*curr_e), GFP_KERNEL);
+ if (!curr_e) {
+ while (head_e) {
+ curr_e = head_e;
+ head_e = head_e->next;
+ kfree(curr_e);
+ }
+ kfree(desc);
+ return NULL;
+ }
+
+ curr_e->service_id = entry->service_id;
+ curr_e->service_type = entry->service_type;
+
+ length += sizeof(struct vidtv_psi_desc_service_list_entry) -
+ sizeof(struct vidtv_psi_desc_service_list_entry *);
+
+ if (!head_e)
+ head_e = curr_e;
+ if (prev_e)
+ prev_e->next = curr_e;
+
+ prev_e = curr_e;
+ entry = entry->next;
}
+ desc->length = length;
+ desc->service_list = head_e;
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
+
+struct vidtv_psi_desc_short_event
+*vidtv_psi_short_event_desc_init(struct vidtv_psi_desc *head,
+ char *iso_language_code,
+ char *event_name,
+ char *text)
+{
+ u32 iso_len = iso_language_code ? strlen(iso_language_code) : 0;
+ u32 event_name_len = event_name ? strlen(event_name) : 0;
+ struct vidtv_psi_desc_short_event *desc;
+ u32 text_len = text ? strlen(text) : 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = SHORT_EVENT_DESCRIPTOR;
+
+ desc->length = ISO_LANGUAGE_CODE_LEN +
+ sizeof_field(struct vidtv_psi_desc_short_event, event_name_len) +
+ event_name_len +
+ sizeof_field(struct vidtv_psi_desc_short_event, text_len) +
+ text_len;
+
+ desc->event_name_len = event_name_len;
+ desc->text_len = text_len;
+
+ if (iso_len != ISO_LANGUAGE_CODE_LEN)
+ iso_language_code = "eng";
+
+ desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
+
+ if (event_name && event_name_len)
+ desc->event_name = kstrdup(event_name, GFP_KERNEL);
+
+ if (text && text_len)
+ desc->text = kstrdup(text, GFP_KERNEL);
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
}
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
{
+ struct vidtv_psi_desc_network_name *desc_network_name;
+ struct vidtv_psi_desc_service_list *desc_service_list;
+ struct vidtv_psi_desc_short_event *desc_short_event;
+ struct vidtv_psi_desc_service *service;
struct vidtv_psi_desc *head = NULL;
struct vidtv_psi_desc *prev = NULL;
struct vidtv_psi_desc *curr = NULL;
- struct vidtv_psi_desc_service *service;
-
while (desc) {
switch (desc->type) {
case SERVICE_DESCRIPTOR:
service = (struct vidtv_psi_desc_service *)desc;
curr = (struct vidtv_psi_desc *)
- vidtv_psi_service_desc_init(head,
- service->service_type,
- service->service_name,
- service->provider_name);
+ vidtv_psi_service_desc_init(head,
+ service->service_type,
+ service->service_name,
+ service->provider_name);
+ break;
+
+ case NETWORK_NAME_DESCRIPTOR:
+ desc_network_name = (struct vidtv_psi_desc_network_name *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_network_name_desc_init(head,
+ desc_network_name->network_name);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ desc_service_list = (struct vidtv_psi_desc_service_list *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_service_list_desc_init(head,
+ desc_service_list->service_list);
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ desc_short_event = (struct vidtv_psi_desc_short_event *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_short_event_desc_init(head,
+ desc_short_event->iso_language_code,
+ desc_short_event->event_name,
+ desc_short_event->text);
break;
case REGISTRATION_DESCRIPTOR:
default:
curr = kzalloc(sizeof(*desc) + desc->length, GFP_KERNEL);
+ if (!curr)
+ return NULL;
memcpy(curr, desc, sizeof(*desc) + desc->length);
- break;
- }
+ }
- if (curr)
- curr->next = NULL;
+ if (!curr)
+ return NULL;
+
+ curr->next = NULL;
if (!head)
head = curr;
if (prev)
@@ -430,6 +530,8 @@ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
void vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc)
{
+ struct vidtv_psi_desc_service_list_entry *sl_entry_tmp = NULL;
+ struct vidtv_psi_desc_service_list_entry *sl_entry = NULL;
struct vidtv_psi_desc *curr = desc;
struct vidtv_psi_desc *tmp = NULL;
@@ -447,6 +549,25 @@ void vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc)
/* nothing to do */
break;
+ case NETWORK_NAME_DESCRIPTOR:
+ kfree(((struct vidtv_psi_desc_network_name *)tmp)->network_name);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ sl_entry = ((struct vidtv_psi_desc_service_list *)tmp)->service_list;
+ while (sl_entry) {
+ sl_entry_tmp = sl_entry;
+ sl_entry = sl_entry->next;
+ kfree(sl_entry_tmp);
+ }
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->iso_language_code);
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->event_name);
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->text);
+ break;
+
default:
pr_warn_ratelimited("Possible leak: not handling descriptor type %d\n",
tmp->type);
@@ -513,63 +634,119 @@ void vidtv_sdt_desc_assign(struct vidtv_psi_table_sdt *sdt,
vidtv_psi_update_version_num(&sdt->header);
}
-static u32 vidtv_psi_desc_write_into(struct desc_write_args args)
+static u32 vidtv_psi_desc_write_into(struct desc_write_args *args)
{
- /* the number of bytes written by this function */
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = &args->desc->type,
+ .pid = args->pid,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->dest_buf_sz,
+ .crc = args->crc,
+ .len = sizeof_field(struct vidtv_psi_desc, type) +
+ sizeof_field(struct vidtv_psi_desc, length),
+ };
+ struct vidtv_psi_desc_service_list_entry *serv_list_entry = NULL;
u32 nbytes = 0;
- struct psi_write_args psi_args = {};
-
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = &args.desc->type;
- psi_args.len = sizeof_field(struct vidtv_psi_desc, type) +
- sizeof_field(struct vidtv_psi_desc, length);
+ psi_args.dest_offset = args->dest_offset + nbytes;
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.dest_buf_sz;
- psi_args.crc = args.crc;
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
-
- switch (args.desc->type) {
+ switch (args->desc->type) {
case SERVICE_DESCRIPTOR:
- psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.dest_offset = args->dest_offset + nbytes;
psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_type) +
sizeof_field(struct vidtv_psi_desc_service, provider_name_len);
- psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_type;
+ psi_args.from = &((struct vidtv_psi_desc_service *)args->desc)->service_type;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->provider_name_len;
- psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->provider_name;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args->desc)->provider_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args->desc)->provider_name;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.dest_offset = args->dest_offset + nbytes;
psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_name_len);
- psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
+ psi_args.from = &((struct vidtv_psi_desc_service *)args->desc)->service_name_len;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
- psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->service_name;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args->desc)->service_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args->desc)->service_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+ break;
+
+ case NETWORK_NAME_DESCRIPTOR:
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = args->desc->length;
+ psi_args.from = ((struct vidtv_psi_desc_network_name *)args->desc)->network_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ serv_list_entry = ((struct vidtv_psi_desc_service_list *)args->desc)->service_list;
+ while (serv_list_entry) {
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof(struct vidtv_psi_desc_service_list_entry) -
+ sizeof(struct vidtv_psi_desc_service_list_entry *);
+ psi_args.from = serv_list_entry;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ serv_list_entry = serv_list_entry->next;
+ }
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ISO_LANGUAGE_CODE_LEN;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)
+ args->desc)->iso_language_code;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_short_event, event_name_len);
+ psi_args.from = &((struct vidtv_psi_desc_short_event *)
+ args->desc)->event_name_len;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_short_event *)args->desc)->event_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)args->desc)->event_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_short_event, text_len);
+ psi_args.from = &((struct vidtv_psi_desc_short_event *)args->desc)->text_len;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_short_event *)args->desc)->text_len;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)args->desc)->text;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
break;
case REGISTRATION_DESCRIPTOR:
default:
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = args.desc->length;
- psi_args.from = &args.desc->data;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = args->desc->length;
+ psi_args.from = &args->desc->data;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
break;
}
@@ -577,40 +754,37 @@ static u32 vidtv_psi_desc_write_into(struct desc_write_args args)
}
static u32
-vidtv_psi_table_header_write_into(struct header_write_args args)
+vidtv_psi_table_header_write_into(struct header_write_args *args)
{
- /* the number of bytes written by this function */
- u32 nbytes = 0;
- struct psi_write_args psi_args = {};
-
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = args.h;
- psi_args.len = sizeof(struct vidtv_psi_table_header);
- psi_args.dest_offset = args.dest_offset;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = true;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.dest_buf_sz;
- psi_args.crc = args.crc;
-
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
-
- return nbytes;
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = args->h,
+ .len = sizeof(struct vidtv_psi_table_header),
+ .dest_offset = args->dest_offset,
+ .pid = args->pid,
+ .new_psi_section = true,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->dest_buf_sz,
+ .crc = args->crc,
+ };
+
+ return vidtv_psi_ts_psi_write_into(&psi_args);
}
void
vidtv_psi_pat_table_update_sec_len(struct vidtv_psi_table_pat *pat)
{
- /* see ISO/IEC 13818-1 : 2000 p.43 */
u16 length = 0;
u32 i;
+ /* see ISO/IEC 13818-1 : 2000 p.43 */
+
/* from immediately after 'section_length' until 'last_section_number'*/
length += PAT_LEN_UNTIL_LAST_SECTION_NUMBER;
/* do not count the pointer */
- for (i = 0; i < pat->programs; ++i)
+ for (i = 0; i < pat->num_pat; ++i)
length += sizeof(struct vidtv_psi_table_pat_program) -
sizeof(struct vidtv_psi_table_pat_program *);
@@ -621,10 +795,11 @@ vidtv_psi_pat_table_update_sec_len(struct vidtv_psi_table_pat *pat)
void vidtv_psi_pmt_table_update_sec_len(struct vidtv_psi_table_pmt *pmt)
{
- /* see ISO/IEC 13818-1 : 2000 p.46 */
- u16 length = 0;
struct vidtv_psi_table_pmt_stream *s = pmt->stream;
u16 desc_loop_len;
+ u16 length = 0;
+
+ /* see ISO/IEC 13818-1 : 2000 p.46 */
/* from immediately after 'section_length' until 'program_info_length'*/
length += PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH;
@@ -655,10 +830,11 @@ void vidtv_psi_pmt_table_update_sec_len(struct vidtv_psi_table_pmt *pmt)
void vidtv_psi_sdt_table_update_sec_len(struct vidtv_psi_table_sdt *sdt)
{
- /* see ETSI EN 300 468 V 1.10.1 p.24 */
- u16 length = 0;
struct vidtv_psi_table_sdt_service *s = sdt->service;
u16 desc_loop_len;
+ u16 length = 0;
+
+ /* see ETSI EN 300 468 V 1.10.1 p.24 */
/*
* from immediately after 'section_length' until
@@ -681,7 +857,6 @@ void vidtv_psi_sdt_table_update_sec_len(struct vidtv_psi_table_sdt *sdt)
}
length += CRC_SIZE_IN_BYTES;
-
vidtv_psi_set_sec_len(&sdt->header, length);
}
@@ -694,6 +869,8 @@ vidtv_psi_pat_program_init(struct vidtv_psi_table_pat_program *head,
const u16 RESERVED = 0x07;
program = kzalloc(sizeof(*program), GFP_KERNEL);
+ if (!program)
+ return NULL;
program->service_id = cpu_to_be16(service_id);
@@ -714,8 +891,8 @@ vidtv_psi_pat_program_init(struct vidtv_psi_table_pat_program *head,
void
vidtv_psi_pat_program_destroy(struct vidtv_psi_table_pat_program *p)
{
- struct vidtv_psi_table_pat_program *curr = p;
struct vidtv_psi_table_pat_program *tmp = NULL;
+ struct vidtv_psi_table_pat_program *curr = p;
while (curr) {
tmp = curr;
@@ -724,42 +901,49 @@ vidtv_psi_pat_program_destroy(struct vidtv_psi_table_pat_program *p)
}
}
+/* This function transfers ownership of p to the table */
void
vidtv_psi_pat_program_assign(struct vidtv_psi_table_pat *pat,
struct vidtv_psi_table_pat_program *p)
{
- /* This function transfers ownership of p to the table */
+ struct vidtv_psi_table_pat_program *program;
+ u16 program_count;
- u16 program_count = 0;
- struct vidtv_psi_table_pat_program *program = p;
+ do {
+ program_count = 0;
+ program = p;
- if (p == pat->program)
- return;
+ if (p == pat->program)
+ return;
- while (program) {
- ++program_count;
- program = program->next;
- }
+ while (program) {
+ ++program_count;
+ program = program->next;
+ }
- pat->programs = program_count;
- pat->program = p;
+ pat->num_pat = program_count;
+ pat->program = p;
- /* Recompute section length */
- vidtv_psi_pat_table_update_sec_len(pat);
+ /* Recompute section length */
+ vidtv_psi_pat_table_update_sec_len(pat);
- if (vidtv_psi_get_sec_len(&pat->header) > MAX_SECTION_LEN)
- vidtv_psi_pat_program_assign(pat, NULL);
+ p = NULL;
+ } while (vidtv_psi_get_sec_len(&pat->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&pat->header);
}
struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id)
{
- struct vidtv_psi_table_pat *pat = kzalloc(sizeof(*pat), GFP_KERNEL);
+ struct vidtv_psi_table_pat *pat;
const u16 SYNTAX = 0x1;
const u16 ZERO = 0x0;
const u16 ONES = 0x03;
+ pat = kzalloc(sizeof(*pat), GFP_KERNEL);
+ if (!pat)
+ return NULL;
+
pat->header.table_id = 0x0;
pat->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ZERO << 14) | (ONES << 12));
@@ -772,70 +956,68 @@ struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id)
pat->header.section_id = 0x0;
pat->header.last_section = 0x0;
- pat->programs = 0;
-
vidtv_psi_pat_table_update_sec_len(pat);
return pat;
}
-u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args args)
+u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args *args)
{
- /* the number of bytes written by this function */
+ struct vidtv_psi_table_pat_program *p = args->pat->program;
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .pid = VIDTV_PAT_PID,
+ .h = &args->pat->header,
+ .continuity_counter = args->continuity_counter,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_PAT_PID,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_PAT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ u32 crc = INITIAL_CRC;
u32 nbytes = 0;
- const u16 pat_pid = VIDTV_PAT_PID;
- u32 crc = 0xffffffff;
-
- struct vidtv_psi_table_pat_program *p = args.pat->program;
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct crc32_write_args c_args = {};
+ vidtv_psi_pat_table_update_sec_len(args->pat);
- vidtv_psi_pat_table_update_sec_len(args.pat);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.pat->header;
- h_args.pid = pat_pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
/* note that the field 'u16 programs' is not really part of the PAT */
- psi_args.dest_buf = args.buf;
- psi_args.pid = pat_pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
- psi_args.crc = &crc;
+ psi_args.crc = &crc;
while (p) {
/* copy the PAT programs */
psi_args.from = p;
/* skip the pointer */
psi_args.len = sizeof(*p) -
- sizeof(struct vidtv_psi_table_pat_program *);
- psi_args.dest_offset = args.offset + nbytes;
+ sizeof(struct vidtv_psi_table_pat_program *);
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
p = p->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.continuity_counter = args->continuity_counter;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = pat_pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
/* Write the CRC32 at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -859,6 +1041,8 @@ vidtv_psi_pmt_stream_init(struct vidtv_psi_table_pmt_stream *head,
u16 desc_loop_len;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
stream->type = stream_type;
@@ -883,8 +1067,8 @@ vidtv_psi_pmt_stream_init(struct vidtv_psi_table_pmt_stream *head,
void vidtv_psi_pmt_stream_destroy(struct vidtv_psi_table_pmt_stream *s)
{
- struct vidtv_psi_table_pmt_stream *curr_stream = s;
struct vidtv_psi_table_pmt_stream *tmp_stream = NULL;
+ struct vidtv_psi_table_pmt_stream *curr_stream = s;
while (curr_stream) {
tmp_stream = curr_stream;
@@ -897,15 +1081,16 @@ void vidtv_psi_pmt_stream_destroy(struct vidtv_psi_table_pmt_stream *s)
void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_table_pmt_stream *s)
{
- /* This function transfers ownership of s to the table */
- if (s == pmt->stream)
- return;
+ do {
+ /* This function transfers ownership of s to the table */
+ if (s == pmt->stream)
+ return;
- pmt->stream = s;
- vidtv_psi_pmt_table_update_sec_len(pmt);
+ pmt->stream = s;
+ vidtv_psi_pmt_table_update_sec_len(pmt);
- if (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN)
- vidtv_psi_pmt_stream_assign(pmt, NULL);
+ s = NULL;
+ } while (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&pmt->header);
}
@@ -933,14 +1118,18 @@ u16 vidtv_psi_pmt_get_pid(struct vidtv_psi_table_pmt *section,
struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
u16 pcr_pid)
{
- struct vidtv_psi_table_pmt *pmt = kzalloc(sizeof(*pmt), GFP_KERNEL);
- const u16 SYNTAX = 0x1;
- const u16 ZERO = 0x0;
- const u16 ONES = 0x03;
+ struct vidtv_psi_table_pmt *pmt;
const u16 RESERVED1 = 0x07;
const u16 RESERVED2 = 0x0f;
+ const u16 SYNTAX = 0x1;
+ const u16 ONES = 0x03;
+ const u16 ZERO = 0x0;
u16 desc_loop_len;
+ pmt = kzalloc(sizeof(*pmt), GFP_KERNEL);
+ if (!pmt)
+ return NULL;
+
if (!pcr_pid)
pcr_pid = 0x1fff;
@@ -970,87 +1159,84 @@ struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
return pmt;
}
-u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args)
+u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
{
- /* the number of bytes written by this function */
+ struct vidtv_psi_desc *table_descriptor = args->pmt->descriptor;
+ struct vidtv_psi_table_pmt_stream *stream = args->pmt->stream;
+ struct vidtv_psi_desc *stream_descriptor;
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->pmt->header,
+ .pid = args->pid,
+ .continuity_counter = args->continuity_counter,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .from = &args->pmt->bitfield,
+ .len = sizeof_field(struct vidtv_psi_table_pmt, bitfield) +
+ sizeof_field(struct vidtv_psi_table_pmt, bitfield2),
+ .pid = args->pid,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .desc = table_descriptor,
+ .pid = args->pid,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = args->pid,
+ .dest_buf_sz = args->buf_sz,
+ };
+ u32 crc = INITIAL_CRC;
u32 nbytes = 0;
- u32 crc = 0xffffffff;
-
- struct vidtv_psi_desc *table_descriptor = args.pmt->descriptor;
- struct vidtv_psi_table_pmt_stream *stream = args.pmt->stream;
- struct vidtv_psi_desc *stream_descriptor = (stream) ?
- args.pmt->stream->descriptor :
- NULL;
-
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct desc_write_args d_args = {};
- struct crc32_write_args c_args = {};
-
- vidtv_psi_pmt_table_update_sec_len(args.pmt);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.pmt->header;
- h_args.pid = args.pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
+
+ vidtv_psi_pmt_table_update_sec_len(args->pmt);
+
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
/* write the two bitfields */
- psi_args.dest_buf = args.buf;
- psi_args.from = &args.pmt->bitfield;
- psi_args.len = sizeof_field(struct vidtv_psi_table_pmt, bitfield) +
- sizeof_field(struct vidtv_psi_table_pmt, bitfield2);
-
- psi_args.dest_offset = args.offset + nbytes;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
- psi_args.crc = &crc;
-
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
while (table_descriptor) {
/* write the descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
- d_args.desc = table_descriptor;
- d_args.pid = args.pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
table_descriptor = table_descriptor->next;
}
+ psi_args.len += sizeof_field(struct vidtv_psi_table_pmt_stream, type);
while (stream) {
/* write the streams, if any */
psi_args.from = stream;
- psi_args.len = sizeof_field(struct vidtv_psi_table_pmt_stream, type) +
- sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield) +
- sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield2);
- psi_args.dest_offset = args.offset + nbytes;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ stream_descriptor = stream->descriptor;
while (stream_descriptor) {
/* write the stream descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
+ d_args.dest_offset = args->offset + nbytes;
d_args.desc = stream_descriptor;
- d_args.pid = args.pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
stream_descriptor = stream_descriptor->next;
}
@@ -1058,15 +1244,12 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args)
stream = stream->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = args.pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
+ c_args.continuity_counter = args->continuity_counter;
/* Write the CRC32 at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -1078,16 +1261,20 @@ void vidtv_psi_pmt_table_destroy(struct vidtv_psi_table_pmt *pmt)
kfree(pmt);
}
-struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id)
+struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 network_id,
+ u16 transport_stream_id)
{
- struct vidtv_psi_table_sdt *sdt = kzalloc(sizeof(*sdt), GFP_KERNEL);
+ struct vidtv_psi_table_sdt *sdt;
+ const u16 RESERVED = 0xff;
const u16 SYNTAX = 0x1;
- const u16 ONE = 0x1;
const u16 ONES = 0x03;
- const u16 RESERVED = 0xff;
+ const u16 ONE = 0x1;
- sdt->header.table_id = 0x42;
+ sdt = kzalloc(sizeof(*sdt), GFP_KERNEL);
+ if (!sdt)
+ return NULL;
+ sdt->header.table_id = 0x42;
sdt->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
/*
@@ -1111,7 +1298,7 @@ struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id)
* This can be changed to something more useful, when support for
* NIT gets added
*/
- sdt->network_id = cpu_to_be16(0xff01);
+ sdt->network_id = cpu_to_be16(network_id);
sdt->reserved = RESERVED;
vidtv_psi_sdt_table_update_sec_len(sdt);
@@ -1119,74 +1306,79 @@ struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id)
return sdt;
}
-u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args)
+u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args *args)
{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->sdt->header,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .len = sizeof_field(struct vidtv_psi_table_sdt, network_id) +
+ sizeof_field(struct vidtv_psi_table_sdt, reserved),
+ .pid = VIDTV_SDT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_table_sdt_service *service = args->sdt->service;
+ struct vidtv_psi_desc *service_desc;
u32 nbytes = 0;
- u16 sdt_pid = VIDTV_SDT_PID; /* see ETSI EN 300 468 v1.15.1 p. 11 */
+ u32 crc = INITIAL_CRC;
- u32 crc = 0xffffffff;
+ /* see ETSI EN 300 468 v1.15.1 p. 11 */
- struct vidtv_psi_table_sdt_service *service = args.sdt->service;
- struct vidtv_psi_desc *service_desc = (args.sdt->service) ?
- args.sdt->service->descriptor :
- NULL;
+ vidtv_psi_sdt_table_update_sec_len(args->sdt);
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct desc_write_args d_args = {};
- struct crc32_write_args c_args = {};
-
- vidtv_psi_sdt_table_update_sec_len(args.sdt);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.sdt->header;
- h_args.pid = sdt_pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
+ h_args.continuity_counter = args->continuity_counter;
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
-
- psi_args.dest_buf = args.buf;
- psi_args.from = &args.sdt->network_id;
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
- psi_args.len = sizeof_field(struct vidtv_psi_table_sdt, network_id) +
- sizeof_field(struct vidtv_psi_table_sdt, reserved);
-
- psi_args.dest_offset = args.offset + nbytes;
- psi_args.pid = sdt_pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
+ psi_args.from = &args->sdt->network_id;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
psi_args.crc = &crc;
/* copy u16 network_id + u8 reserved)*/
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ /* skip both pointers at the end */
+ psi_args.len = sizeof(struct vidtv_psi_table_sdt_service) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_sdt_service *);
while (service) {
/* copy the services, if any */
psi_args.from = service;
- /* skip both pointers at the end */
- psi_args.len = sizeof(struct vidtv_psi_table_sdt_service) -
- sizeof(struct vidtv_psi_desc *) -
- sizeof(struct vidtv_psi_table_sdt_service *);
- psi_args.dest_offset = args.offset + nbytes;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ service_desc = service->descriptor;
while (service_desc) {
/* copy the service descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
+ d_args.dest_offset = args->offset + nbytes;
d_args.desc = service_desc;
- d_args.pid = sdt_pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
service_desc = service_desc->next;
}
@@ -1194,15 +1386,12 @@ u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args)
service = service->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = sdt_pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
+ c_args.continuity_counter = args->continuity_counter;
/* Write the CRC at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -1215,11 +1404,15 @@ void vidtv_psi_sdt_table_destroy(struct vidtv_psi_table_sdt *sdt)
struct vidtv_psi_table_sdt_service
*vidtv_psi_sdt_service_init(struct vidtv_psi_table_sdt_service *head,
- u16 service_id)
+ u16 service_id,
+ bool eit_schedule,
+ bool eit_present_following)
{
struct vidtv_psi_table_sdt_service *service;
service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service)
+ return NULL;
/*
* ETSI 300 468: this is a 16bit field which serves as a label to
@@ -1228,8 +1421,8 @@ struct vidtv_psi_table_sdt_service
* corresponding program_map_section
*/
service->service_id = cpu_to_be16(service_id);
- service->EIT_schedule = 0x0;
- service->EIT_present_following = 0x0;
+ service->EIT_schedule = eit_schedule;
+ service->EIT_present_following = eit_present_following;
service->reserved = 0x3f;
service->bitfield = cpu_to_be16(RUNNING << 13);
@@ -1262,53 +1455,78 @@ void
vidtv_psi_sdt_service_assign(struct vidtv_psi_table_sdt *sdt,
struct vidtv_psi_table_sdt_service *service)
{
- if (service == sdt->service)
- return;
+ do {
+ if (service == sdt->service)
+ return;
- sdt->service = service;
+ sdt->service = service;
- /* recompute section length */
- vidtv_psi_sdt_table_update_sec_len(sdt);
+ /* recompute section length */
+ vidtv_psi_sdt_table_update_sec_len(sdt);
- if (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN)
- vidtv_psi_sdt_service_assign(sdt, NULL);
+ service = NULL;
+ } while (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&sdt->header);
}
+/*
+ * PMTs contain information about programs. For each program,
+ * there is one PMT section. This function will create a section
+ * for each program found in the PAT
+ */
struct vidtv_psi_table_pmt**
-vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat, u16 pcr_pid)
+vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat,
+ u16 pcr_pid)
{
+ struct vidtv_psi_table_pat_program *program;
+ struct vidtv_psi_table_pmt **pmt_secs;
+ u32 i = 0, num_pmt = 0;
+
/*
- * PMTs contain information about programs. For each program,
- * there is one PMT section. This function will create a section
- * for each program found in the PAT
+ * The number of PMT entries is the number of PAT entries
+ * that contain service_id. That exclude special tables, like NIT
*/
- struct vidtv_psi_table_pat_program *program = pat->program;
- struct vidtv_psi_table_pmt **pmt_secs;
- u32 i = 0;
+ program = pat->program;
+ while (program) {
+ if (program->service_id)
+ num_pmt++;
+ program = program->next;
+ }
- /* a section for each program_id */
- pmt_secs = kcalloc(pat->programs,
+ pmt_secs = kcalloc(num_pmt,
sizeof(struct vidtv_psi_table_pmt *),
GFP_KERNEL);
-
- while (program) {
- pmt_secs[i] = vidtv_psi_pmt_table_init(be16_to_cpu(program->service_id), pcr_pid);
- ++i;
- program = program->next;
+ if (!pmt_secs)
+ return NULL;
+
+ for (program = pat->program; program; program = program->next) {
+ if (!program->service_id)
+ continue;
+ pmt_secs[i] = vidtv_psi_pmt_table_init(be16_to_cpu(program->service_id),
+ pcr_pid);
+
+ if (!pmt_secs[i]) {
+ while (i > 0) {
+ i--;
+ vidtv_psi_pmt_table_destroy(pmt_secs[i]);
+ }
+ return NULL;
+ }
+ i++;
}
+ pat->num_pmt = num_pmt;
return pmt_secs;
}
+/* find the PMT section associated with 'program_num' */
struct vidtv_psi_table_pmt
*vidtv_psi_find_pmt_sec(struct vidtv_psi_table_pmt **pmt_sections,
u16 nsections,
u16 program_num)
{
- /* find the PMT section associated with 'program_num' */
struct vidtv_psi_table_pmt *sec = NULL;
u32 i;
@@ -1320,3 +1538,488 @@ struct vidtv_psi_table_pmt
return NULL; /* not found */
}
+
+static void vidtv_psi_nit_table_update_sec_len(struct vidtv_psi_table_nit *nit)
+{
+ u16 length = 0;
+ struct vidtv_psi_table_transport *t = nit->transport;
+ u16 desc_loop_len;
+ u16 transport_loop_len = 0;
+
+ /*
+ * from immediately after 'section_length' until
+ * 'network_descriptor_length'
+ */
+ length += NIT_LEN_UNTIL_NETWORK_DESCRIPTOR_LEN;
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(nit->descriptor);
+ vidtv_psi_set_desc_loop_len(&nit->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ length += sizeof_field(struct vidtv_psi_table_nit, bitfield2);
+
+ while (t) {
+ /* skip both pointers at the end */
+ transport_loop_len += sizeof(struct vidtv_psi_table_transport) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_transport *);
+
+ length += transport_loop_len;
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(t->descriptor);
+ vidtv_psi_set_desc_loop_len(&t->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ t = t->next;
+ }
+
+ // Actually sets the transport stream loop len, maybe rename this function later
+ vidtv_psi_set_desc_loop_len(&nit->bitfield2, transport_loop_len, 12);
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&nit->header, length);
+}
+
+struct vidtv_psi_table_nit
+*vidtv_psi_nit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ char *network_name,
+ struct vidtv_psi_desc_service_list_entry *service_list)
+{
+ struct vidtv_psi_table_transport *transport;
+ struct vidtv_psi_table_nit *nit;
+ const u16 SYNTAX = 0x1;
+ const u16 ONES = 0x03;
+ const u16 ONE = 0x1;
+
+ nit = kzalloc(sizeof(*nit), GFP_KERNEL);
+ if (!nit)
+ return NULL;
+
+ transport = kzalloc(sizeof(*transport), GFP_KERNEL);
+ if (!transport)
+ goto free_nit;
+
+ nit->header.table_id = 0x40; // ACTUAL_NETWORK
+
+ nit->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
+
+ nit->header.id = cpu_to_be16(network_id);
+ nit->header.current_next = ONE;
+
+ nit->header.version = 0x1f;
+
+ nit->header.one2 = ONES;
+ nit->header.section_id = 0;
+ nit->header.last_section = 0;
+
+ nit->bitfield = cpu_to_be16(0xf);
+ nit->bitfield2 = cpu_to_be16(0xf);
+
+ nit->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_network_name_desc_init(NULL, network_name);
+ if (!nit->descriptor)
+ goto free_transport;
+
+ transport->transport_id = cpu_to_be16(transport_stream_id);
+ transport->network_id = cpu_to_be16(network_id);
+ transport->bitfield = cpu_to_be16(0xf);
+ transport->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_service_list_desc_init(NULL, service_list);
+ if (!transport->descriptor)
+ goto free_nit_desc;
+
+ nit->transport = transport;
+
+ vidtv_psi_nit_table_update_sec_len(nit);
+
+ return nit;
+
+free_nit_desc:
+ vidtv_psi_desc_destroy((struct vidtv_psi_desc *)nit->descriptor);
+
+free_transport:
+ kfree(transport);
+free_nit:
+ kfree(nit);
+ return NULL;
+}
+
+u32 vidtv_psi_nit_write_into(struct vidtv_psi_nit_write_args *args)
+{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->nit->header,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .from = &args->nit->bitfield,
+ .len = sizeof_field(struct vidtv_psi_table_nit, bitfield),
+ .pid = VIDTV_NIT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_desc *table_descriptor = args->nit->descriptor;
+ struct vidtv_psi_table_transport *transport = args->nit->transport;
+ struct vidtv_psi_desc *transport_descriptor;
+ u32 crc = INITIAL_CRC;
+ u32 nbytes = 0;
+
+ vidtv_psi_nit_table_update_sec_len(args->nit);
+
+ h_args.continuity_counter = args->continuity_counter;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
+
+ /* write the bitfield */
+
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ psi_args.crc = &crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ while (table_descriptor) {
+ /* write the descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = table_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ table_descriptor = table_descriptor->next;
+ }
+
+ /* write the second bitfield */
+ psi_args.from = &args->nit->bitfield2;
+ psi_args.len = sizeof_field(struct vidtv_psi_table_nit, bitfield2);
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.len = sizeof_field(struct vidtv_psi_table_transport, transport_id) +
+ sizeof_field(struct vidtv_psi_table_transport, network_id) +
+ sizeof_field(struct vidtv_psi_table_transport, bitfield);
+ while (transport) {
+ /* write the transport sections, if any */
+ psi_args.from = transport;
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ transport_descriptor = transport->descriptor;
+
+ while (transport_descriptor) {
+ /* write the transport descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = transport_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ transport_descriptor = transport_descriptor->next;
+ }
+
+ transport = transport->next;
+ }
+
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.continuity_counter = args->continuity_counter;
+
+ /* Write the CRC32 at the end */
+ nbytes += table_section_crc32_write_into(&c_args);
+
+ return nbytes;
+}
+
+static void vidtv_psi_transport_destroy(struct vidtv_psi_table_transport *t)
+{
+ struct vidtv_psi_table_transport *tmp_t = NULL;
+ struct vidtv_psi_table_transport *curr_t = t;
+
+ while (curr_t) {
+ tmp_t = curr_t;
+ curr_t = curr_t->next;
+ vidtv_psi_desc_destroy(tmp_t->descriptor);
+ kfree(tmp_t);
+ }
+}
+
+void vidtv_psi_nit_table_destroy(struct vidtv_psi_table_nit *nit)
+{
+ vidtv_psi_desc_destroy(nit->descriptor);
+ vidtv_psi_transport_destroy(nit->transport);
+ kfree(nit);
+}
+
+void vidtv_psi_eit_table_update_sec_len(struct vidtv_psi_table_eit *eit)
+{
+ struct vidtv_psi_table_eit_event *e = eit->event;
+ u16 desc_loop_len;
+ u16 length = 0;
+
+ /*
+ * from immediately after 'section_length' until
+ * 'last_table_id'
+ */
+ length += EIT_LEN_UNTIL_LAST_TABLE_ID;
+
+ while (e) {
+ /* skip both pointers at the end */
+ length += sizeof(struct vidtv_psi_table_eit_event) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_eit_event *);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(e->descriptor);
+ vidtv_psi_set_desc_loop_len(&e->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ e = e->next;
+ }
+
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&eit->header, length);
+}
+
+void vidtv_psi_eit_event_assign(struct vidtv_psi_table_eit *eit,
+ struct vidtv_psi_table_eit_event *e)
+{
+ do {
+ if (e == eit->event)
+ return;
+
+ eit->event = e;
+ vidtv_psi_eit_table_update_sec_len(eit);
+
+ e = NULL;
+ } while (vidtv_psi_get_sec_len(&eit->header) > EIT_MAX_SECTION_LEN);
+
+ vidtv_psi_update_version_num(&eit->header);
+}
+
+struct vidtv_psi_table_eit
+*vidtv_psi_eit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ __be16 service_id)
+{
+ struct vidtv_psi_table_eit *eit;
+ const u16 SYNTAX = 0x1;
+ const u16 ONE = 0x1;
+ const u16 ONES = 0x03;
+
+ eit = kzalloc(sizeof(*eit), GFP_KERNEL);
+ if (!eit)
+ return NULL;
+
+ eit->header.table_id = 0x4e; //actual_transport_stream: present/following
+
+ eit->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
+
+ eit->header.id = service_id;
+ eit->header.current_next = ONE;
+
+ eit->header.version = 0x1f;
+
+ eit->header.one2 = ONES;
+ eit->header.section_id = 0;
+ eit->header.last_section = 0;
+
+ eit->transport_id = cpu_to_be16(transport_stream_id);
+ eit->network_id = cpu_to_be16(network_id);
+
+ eit->last_segment = eit->header.last_section; /* not implemented */
+ eit->last_table_id = eit->header.table_id; /* not implemented */
+
+ vidtv_psi_eit_table_update_sec_len(eit);
+
+ return eit;
+}
+
+u32 vidtv_psi_eit_write_into(struct vidtv_psi_eit_write_args *args)
+{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->eit->header,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .len = sizeof_field(struct vidtv_psi_table_eit, transport_id) +
+ sizeof_field(struct vidtv_psi_table_eit, network_id) +
+ sizeof_field(struct vidtv_psi_table_eit, last_segment) +
+ sizeof_field(struct vidtv_psi_table_eit, last_table_id),
+ .pid = VIDTV_EIT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_table_eit_event *event = args->eit->event;
+ struct vidtv_psi_desc *event_descriptor;
+ u32 crc = INITIAL_CRC;
+ u32 nbytes = 0;
+
+ vidtv_psi_eit_table_update_sec_len(args->eit);
+
+ h_args.continuity_counter = args->continuity_counter;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
+
+ psi_args.from = &args->eit->transport_id;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ psi_args.crc = &crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ /* skip both pointers at the end */
+ psi_args.len = sizeof(struct vidtv_psi_table_eit_event) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_eit_event *);
+ while (event) {
+ /* copy the events, if any */
+ psi_args.from = event;
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ event_descriptor = event->descriptor;
+
+ while (event_descriptor) {
+ /* copy the event descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = event_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ event_descriptor = event_descriptor->next;
+ }
+
+ event = event->next;
+ }
+
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.continuity_counter = args->continuity_counter;
+
+ /* Write the CRC at the end */
+ nbytes += table_section_crc32_write_into(&c_args);
+
+ return nbytes;
+}
+
+struct vidtv_psi_table_eit_event
+*vidtv_psi_eit_event_init(struct vidtv_psi_table_eit_event *head, u16 event_id)
+{
+ const u8 DURATION[] = {0x23, 0x59, 0x59}; /* BCD encoded */
+ struct vidtv_psi_table_eit_event *e;
+ struct timespec64 ts;
+ struct tm time;
+ int mjd, l;
+ __be16 mjd_be;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ e->event_id = cpu_to_be16(event_id);
+
+ ts = ktime_to_timespec64(ktime_get_real());
+ time64_to_tm(ts.tv_sec, 0, &time);
+
+ /* Convert date to Modified Julian Date - per EN 300 468 Annex C */
+ if (time.tm_mon < 2)
+ l = 1;
+ else
+ l = 0;
+
+ mjd = 14956 + time.tm_mday;
+ mjd += (time.tm_year - l) * 36525 / 100;
+ mjd += (time.tm_mon + 2 + l * 12) * 306001 / 10000;
+ mjd_be = cpu_to_be16(mjd);
+
+ /*
+ * Store MJD and hour/min/sec to the event.
+ *
+ * Let's make the event to start on a full hour
+ */
+ memcpy(e->start_time, &mjd_be, sizeof(mjd_be));
+ e->start_time[2] = bin2bcd(time.tm_hour);
+ e->start_time[3] = 0;
+ e->start_time[4] = 0;
+
+ /*
+ * TODO: for now, the event will last for a day. Should be
+ * enough for testing purposes, but if one runs the driver
+ * for more than that, the current event will become invalid.
+ * So, we need a better code here in order to change the start
+ * time once the event expires.
+ */
+ memcpy(e->duration, DURATION, sizeof(e->duration));
+
+ e->bitfield = cpu_to_be16(RUNNING << 13);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = e;
+ }
+
+ return e;
+}
+
+void vidtv_psi_eit_event_destroy(struct vidtv_psi_table_eit_event *e)
+{
+ struct vidtv_psi_table_eit_event *tmp_e = NULL;
+ struct vidtv_psi_table_eit_event *curr_e = e;
+
+ while (curr_e) {
+ tmp_e = curr_e;
+ curr_e = curr_e->next;
+ vidtv_psi_desc_destroy(tmp_e->descriptor);
+ kfree(tmp_e);
+ }
+}
+
+void vidtv_psi_eit_table_destroy(struct vidtv_psi_table_eit *eit)
+{
+ vidtv_psi_eit_event_destroy(eit->event);
+ kfree(eit);
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.h b/drivers/media/test-drivers/vidtv/vidtv_psi.h
index 3f962cc78278..340c9fb8d583 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.h
@@ -6,10 +6,6 @@
* technically be broken into one or more sections, we do not do this here,
* hence 'table' and 'section' are interchangeable for vidtv.
*
- * This code currently supports three tables: PAT, PMT and SDT. These are the
- * bare minimum to get userspace to recognize our MPEG transport stream. It can
- * be extended to support more PSI tables in the future.
- *
* Copyright (C) 2020 Daniel W. S. Almeida
*/
@@ -17,7 +13,6 @@
#define VIDTV_PSI_H
#include <linux/types.h>
-#include <asm/byteorder.h>
/*
* all section lengths start immediately after the 'section_length' field
@@ -27,20 +22,28 @@
#define PAT_LEN_UNTIL_LAST_SECTION_NUMBER 5
#define PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH 9
#define SDT_LEN_UNTIL_RESERVED_FOR_FUTURE_USE 8
+#define NIT_LEN_UNTIL_NETWORK_DESCRIPTOR_LEN 7
+#define EIT_LEN_UNTIL_LAST_TABLE_ID 11
#define MAX_SECTION_LEN 1021
+#define EIT_MAX_SECTION_LEN 4093 /* see ETSI 300 468 v.1.10.1 p. 26 */
#define VIDTV_PAT_PID 0 /* mandated by the specs */
#define VIDTV_SDT_PID 0x0011 /* mandated by the specs */
+#define VIDTV_NIT_PID 0x0010 /* mandated by the specs */
+#define VIDTV_EIT_PID 0x0012 /*mandated by the specs */
enum vidtv_psi_descriptors {
REGISTRATION_DESCRIPTOR = 0x05, /* See ISO/IEC 13818-1 section 2.6.8 */
+ NETWORK_NAME_DESCRIPTOR = 0x40, /* See ETSI EN 300 468 section 6.2.27 */
+ SERVICE_LIST_DESCRIPTOR = 0x41, /* See ETSI EN 300 468 section 6.2.35 */
SERVICE_DESCRIPTOR = 0x48, /* See ETSI EN 300 468 section 6.2.33 */
+ SHORT_EVENT_DESCRIPTOR = 0x4d, /* See ETSI EN 300 468 section 6.2.37 */
};
enum vidtv_psi_stream_types {
STREAM_PRIVATE_DATA = 0x06, /* see ISO/IEC 13818-1 2000 p. 48 */
};
-/**
+/*
* struct vidtv_psi_desc - A generic PSI descriptor type.
* The descriptor length is an 8-bit field specifying the total number of bytes of the data portion
* of the descriptor following the byte defining the value of this field.
@@ -52,7 +55,7 @@ struct vidtv_psi_desc {
u8 data[];
} __packed;
-/**
+/*
* struct vidtv_psi_desc_service - Service descriptor.
* See ETSI EN 300 468 section 6.2.33.
*/
@@ -68,7 +71,7 @@ struct vidtv_psi_desc_service {
char *service_name;
} __packed;
-/**
+/*
* struct vidtv_psi_desc_registration - A registration descriptor.
* See ISO/IEC 13818-1 section 2.6.8
*/
@@ -90,7 +93,56 @@ struct vidtv_psi_desc_registration {
u8 additional_identification_info[];
} __packed;
-/**
+/*
+ * struct vidtv_psi_desc_network_name - A network name descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.27
+ */
+struct vidtv_psi_desc_network_name {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ char *network_name;
+} __packed;
+
+struct vidtv_psi_desc_service_list_entry {
+ __be16 service_id;
+ u8 service_type;
+ struct vidtv_psi_desc_service_list_entry *next;
+} __packed;
+
+/*
+ * struct vidtv_psi_desc_service_list - A service list descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.35
+ */
+struct vidtv_psi_desc_service_list {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ struct vidtv_psi_desc_service_list_entry *service_list;
+} __packed;
+
+/*
+ * struct vidtv_psi_desc_short_event - A short event descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.37
+ */
+struct vidtv_psi_desc_short_event {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ char *iso_language_code;
+ u8 event_name_len;
+ char *event_name;
+ u8 text_len;
+ char *text;
+} __packed;
+
+struct vidtv_psi_desc_short_event
+*vidtv_psi_short_event_desc_init(struct vidtv_psi_desc *head,
+ char *iso_language_code,
+ char *event_name,
+ char *text);
+
+/*
* struct vidtv_psi_table_header - A header that is present for all PSI tables.
*/
struct vidtv_psi_table_header {
@@ -106,7 +158,7 @@ struct vidtv_psi_table_header {
u8 last_section; /* last_section_number */
} __packed;
-/**
+/*
* struct vidtv_psi_table_pat_program - A single program in the PAT
* See ISO/IEC 13818-1 : 2000 p.43
*/
@@ -116,17 +168,18 @@ struct vidtv_psi_table_pat_program {
struct vidtv_psi_table_pat_program *next;
} __packed;
-/**
+/*
* struct vidtv_psi_table_pat - The Program Allocation Table (PAT)
* See ISO/IEC 13818-1 : 2000 p.43
*/
struct vidtv_psi_table_pat {
struct vidtv_psi_table_header header;
- u16 programs; /* Included by libdvbv5, not part of the table and not actually serialized */
+ u16 num_pat;
+ u16 num_pmt;
struct vidtv_psi_table_pat_program *program;
} __packed;
-/**
+/*
* struct vidtv_psi_table_sdt_service - Represents a service in the SDT.
* see ETSI EN 300 468 v1.15.1 section 5.2.3.
*/
@@ -140,7 +193,7 @@ struct vidtv_psi_table_sdt_service {
struct vidtv_psi_table_sdt_service *next;
} __packed;
-/**
+/*
* struct vidtv_psi_table_sdt - Represents the Service Description Table
* see ETSI EN 300 468 v1.15.1 section 5.2.3.
*/
@@ -152,7 +205,7 @@ struct vidtv_psi_table_sdt {
struct vidtv_psi_table_sdt_service *service;
} __packed;
-/**
+/*
* enum service_running_status - Status of a SDT service.
* see ETSI EN 300 468 v1.15.1 section 5.2.3 table 6.
*/
@@ -160,16 +213,17 @@ enum service_running_status {
RUNNING = 0x4,
};
-/**
+/*
* enum service_type - The type of a SDT service.
* see ETSI EN 300 468 v1.15.1 section 6.2.33, table 81.
*/
enum service_type {
/* see ETSI EN 300 468 v1.15.1 p. 77 */
DIGITAL_TELEVISION_SERVICE = 0x1,
+ DIGITAL_RADIO_SOUND_SERVICE = 0X2,
};
-/**
+/*
* struct vidtv_psi_table_pmt_stream - A single stream in the PMT.
* See ISO/IEC 13818-1 : 2000 p.46.
*/
@@ -181,7 +235,7 @@ struct vidtv_psi_table_pmt_stream {
struct vidtv_psi_table_pmt_stream *next;
} __packed;
-/**
+/*
* struct vidtv_psi_table_pmt - The Program Map Table (PMT).
* See ISO/IEC 13818-1 : 2000 p.46.
*/
@@ -290,6 +344,13 @@ struct vidtv_psi_desc_registration
u8 *additional_ident_info,
u32 additional_info_len);
+struct vidtv_psi_desc_network_name
+*vidtv_psi_network_name_desc_init(struct vidtv_psi_desc *head, char *network_name);
+
+struct vidtv_psi_desc_service_list
+*vidtv_psi_service_list_desc_init(struct vidtv_psi_desc *head,
+ struct vidtv_psi_desc_service_list_entry *entry);
+
struct vidtv_psi_table_pat_program
*vidtv_psi_pat_program_init(struct vidtv_psi_table_pat_program *head,
u16 service_id,
@@ -305,11 +366,14 @@ struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id);
struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
u16 pcr_pid);
-struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id);
+struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 network_id,
+ u16 transport_stream_id);
struct vidtv_psi_table_sdt_service*
vidtv_psi_sdt_service_init(struct vidtv_psi_table_sdt_service *head,
- u16 service_id);
+ u16 service_id,
+ bool eit_schedule,
+ bool eit_present_following);
void
vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc);
@@ -413,7 +477,6 @@ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
* vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
* program found in the PAT
* @pat: The PAT to look for programs.
- * @s: The stream loop (one or more streams)
* @pcr_pid: packet ID for the PCR to be used for the program described in this
* PMT section
*/
@@ -492,7 +555,7 @@ struct vidtv_psi_pat_write_args {
* equal to the size of the PAT, since more space is needed for TS headers during TS
* encapsulation.
*/
-u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args args);
+u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args *args);
/**
* struct vidtv_psi_sdt_write_args - Arguments for writing a SDT table
@@ -524,16 +587,18 @@ struct vidtv_psi_sdt_write_args {
* equal to the size of the SDT, since more space is needed for TS headers during TS
* encapsulation.
*/
-u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args);
+u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args *args);
/**
* struct vidtv_psi_pmt_write_args - Arguments for writing a PMT section
* @buf: The destination buffer.
* @offset: The offset into the destination buffer.
* @pmt: A pointer to the PMT.
+ * @pid: Program ID
* @buf_sz: The size of the destination buffer.
* @continuity_counter: A pointer to the CC. Incremented on every new packet.
- *
+ * @pcr_pid: The TS PID used for the PSI packets. All channels will share the
+ * same PCR.
*/
struct vidtv_psi_pmt_write_args {
char *buf;
@@ -557,7 +622,7 @@ struct vidtv_psi_pmt_write_args {
* equal to the size of the PMT section, since more space is needed for TS headers
* during TS encapsulation.
*/
-u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args);
+u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args);
/**
* vidtv_psi_find_pmt_sec - Finds the PMT section for 'program_num'
@@ -574,4 +639,171 @@ struct vidtv_psi_table_pmt *vidtv_psi_find_pmt_sec(struct vidtv_psi_table_pmt **
u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p);
u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s);
+/**
+ * struct vidtv_psi_table_transport - A entry in the TS loop for the NIT and/or other tables.
+ * See ETSI 300 468 section 5.2.1
+ * @transport_id: The TS ID being described
+ * @network_id: The network_id that contains the TS ID
+ * @bitfield: Contains the descriptor loop length
+ * @descriptor: A descriptor loop
+ * @next: Pointer to the next entry
+ *
+ */
+struct vidtv_psi_table_transport {
+ __be16 transport_id;
+ __be16 network_id;
+ __be16 bitfield; /* desc_len: 12, reserved: 4 */
+ struct vidtv_psi_desc *descriptor;
+ struct vidtv_psi_table_transport *next;
+} __packed;
+
+/**
+ * struct vidtv_psi_table_nit - A Network Information Table (NIT). See ETSI 300
+ * 468 section 5.2.1
+ * @header: A PSI table header
+ * @bitfield: Contains the network descriptor length
+ * @descriptor: A descriptor loop describing the network
+ * @bitfield2: Contains the transport stream loop length
+ * @transport: The transport stream loop
+ *
+ */
+struct vidtv_psi_table_nit {
+ struct vidtv_psi_table_header header;
+ __be16 bitfield; /* network_desc_len: 12, reserved:4 */
+ struct vidtv_psi_desc *descriptor;
+ __be16 bitfield2; /* ts_loop_len: 12, reserved: 4 */
+ struct vidtv_psi_table_transport *transport;
+} __packed;
+
+struct vidtv_psi_table_nit
+*vidtv_psi_nit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ char *network_name,
+ struct vidtv_psi_desc_service_list_entry *service_list);
+
+/**
+ * struct vidtv_psi_nit_write_args - Arguments for writing a NIT section
+ * @buf: The destination buffer.
+ * @offset: The offset into the destination buffer.
+ * @nit: A pointer to the NIT
+ * @buf_sz: The size of the destination buffer.
+ * @continuity_counter: A pointer to the CC. Incremented on every new packet.
+ *
+ */
+struct vidtv_psi_nit_write_args {
+ char *buf;
+ u32 offset;
+ struct vidtv_psi_table_nit *nit;
+ u32 buf_sz;
+ u8 *continuity_counter;
+};
+
+/**
+ * vidtv_psi_nit_write_into - Write NIT as MPEG-TS packets into a buffer.
+ * @args: an instance of struct vidtv_psi_nit_write_args
+ *
+ * This function writes the MPEG TS packets for a NIT table into a buffer.
+ * Calling code will usually generate the NIT via a call to its init function
+ * and thus is responsible for freeing it.
+ *
+ * Return: The number of bytes written into the buffer. This is NOT
+ * equal to the size of the NIT, since more space is needed for TS headers during TS
+ * encapsulation.
+ */
+u32 vidtv_psi_nit_write_into(struct vidtv_psi_nit_write_args *args);
+
+void vidtv_psi_nit_table_destroy(struct vidtv_psi_table_nit *nit);
+
+/*
+ * struct vidtv_psi_desc_short_event - A short event descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.37
+ */
+struct vidtv_psi_table_eit_event {
+ __be16 event_id;
+ u8 start_time[5];
+ u8 duration[3];
+ __be16 bitfield; /* desc_length: 12, free_CA_mode: 1, running_status: 1 */
+ struct vidtv_psi_desc *descriptor;
+ struct vidtv_psi_table_eit_event *next;
+} __packed;
+
+/*
+ * struct vidtv_psi_table_eit - A Event Information Table (EIT)
+ * See ETSI 300 468 section 5.2.4
+ */
+struct vidtv_psi_table_eit {
+ struct vidtv_psi_table_header header;
+ __be16 transport_id;
+ __be16 network_id;
+ u8 last_segment;
+ u8 last_table_id;
+ struct vidtv_psi_table_eit_event *event;
+} __packed;
+
+struct vidtv_psi_table_eit
+*vidtv_psi_eit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ u16 service_id);
+
+/**
+ * struct vidtv_psi_eit_write_args - Arguments for writing an EIT section
+ * @buf: The destination buffer.
+ * @offset: The offset into the destination buffer.
+ * @eit: A pointer to the EIT
+ * @buf_sz: The size of the destination buffer.
+ * @continuity_counter: A pointer to the CC. Incremented on every new packet.
+ *
+ */
+struct vidtv_psi_eit_write_args {
+ char *buf;
+ u32 offset;
+ struct vidtv_psi_table_eit *eit;
+ u32 buf_sz;
+ u8 *continuity_counter;
+};
+
+/**
+ * vidtv_psi_eit_write_into - Write EIT as MPEG-TS packets into a buffer.
+ * @args: an instance of struct vidtv_psi_nit_write_args
+ *
+ * This function writes the MPEG TS packets for a EIT table into a buffer.
+ * Calling code will usually generate the EIT via a call to its init function
+ * and thus is responsible for freeing it.
+ *
+ * Return: The number of bytes written into the buffer. This is NOT
+ * equal to the size of the EIT, since more space is needed for TS headers during TS
+ * encapsulation.
+ */
+u32 vidtv_psi_eit_write_into(struct vidtv_psi_eit_write_args *args);
+
+void vidtv_psi_eit_table_destroy(struct vidtv_psi_table_eit *eit);
+
+/**
+ * vidtv_psi_eit_table_update_sec_len - Recompute and update the EIT section length.
+ * @eit: The EIT whose length is to be updated.
+ *
+ * This will traverse the table and accumulate the length of its components,
+ * which is then used to replace the 'section_length' field.
+ *
+ * If section_length > EIT_MAX_SECTION_LEN, the operation fails.
+ */
+void vidtv_psi_eit_table_update_sec_len(struct vidtv_psi_table_eit *eit);
+
+/**
+ * vidtv_psi_eit_event_assign - Assigns the event loop to the EIT.
+ * @eit: The EIT to assign to.
+ * @e: The event loop
+ *
+ * This will free the previous event loop in the table.
+ * This will assign ownership of the stream loop to the table, i.e. the table
+ * will free this stream loop when a call to its destroy function is made.
+ */
+void vidtv_psi_eit_event_assign(struct vidtv_psi_table_eit *eit,
+ struct vidtv_psi_table_eit_event *e);
+
+struct vidtv_psi_table_eit_event
+*vidtv_psi_eit_event_init(struct vidtv_psi_table_eit_event *head, u16 event_id);
+
+void vidtv_psi_eit_event_destroy(struct vidtv_psi_table_eit_event *e);
+
#endif // VIDTV_PSI_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
index a447ccbd68d5..ce7dd6cafc8b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
@@ -17,23 +17,22 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
-#include <linux/types.h>
-#include <linux/slab.h>
+#include <linux/bug.h>
#include <linux/crc32.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
+#include <linux/fixp-arith.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
-#include <linux/fixp-arith.h>
-
-#include <linux/math64.h>
-#include <asm/byteorder.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
-#include "vidtv_s302m.h"
-#include "vidtv_encoder.h"
#include "vidtv_common.h"
+#include "vidtv_encoder.h"
+#include "vidtv_s302m.h"
#define S302M_SAMPLING_RATE_HZ 48000
#define PES_PRIVATE_STREAM_1 0xbd /* PES: private_stream_1 */
@@ -79,8 +78,9 @@ struct tone_duration {
int duration;
};
-#define COMPASS 120 /* beats per minute (Allegro) */
-static const struct tone_duration beethoven_5th_symphony[] = {
+#define COMPASS 100 /* beats per minute */
+static const struct tone_duration beethoven_fur_elise[] = {
+ { NOTE_SILENT, 512},
{ NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
{ NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
{ NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
@@ -121,31 +121,36 @@ static const struct tone_duration beethoven_5th_symphony[] = {
{ NOTE_E_5, 128}, { NOTE_D_5, 128}, { NOTE_A_3, 128},
{ NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_E_4, 128},
{ NOTE_D_5, 128}, { NOTE_C_5, 128}, { NOTE_E_3, 128},
- { NOTE_E_4, 128}, { NOTE_E_5, 255}, { NOTE_E_6, 128},
- { NOTE_E_5, 128}, { NOTE_E_6, 128}, { NOTE_E_5, 255},
+ { NOTE_E_4, 128}, { NOTE_E_5, 128}, { NOTE_E_5, 128},
+ { NOTE_E_6, 128}, { NOTE_E_5, 128}, { NOTE_E_6, 128},
+ { NOTE_E_5, 128}, { NOTE_E_5, 128}, { NOTE_DS_5, 128},
+ { NOTE_E_5, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
{ NOTE_DS_5, 128}, { NOTE_E_5, 128}, { NOTE_DS_6, 128},
- { NOTE_E_6, 128}, { NOTE_DS_5, 128}, { NOTE_E_5, 128},
- { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_DS_6, 128},
{ NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
- { NOTE_B_5, 128}, { NOTE_D_6, 128}, { NOTE_C_6, 128},
- { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
- { NOTE_C_5, 128}, { NOTE_E_5, 128}, { NOTE_A_5, 128},
- { NOTE_E_3, 128}, { NOTE_E_4, 128}, { NOTE_GS_4, 128},
- { NOTE_E_5, 128}, { NOTE_GS_5, 128}, { NOTE_B_5, 128},
- { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
- { NOTE_E_5, 128}, { NOTE_E_6, 128}, { NOTE_DS_6, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_GS_5, 128}, { NOTE_B_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_E_5, 128},
{ NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
- { NOTE_B_5, 128}, { NOTE_D_6, 128}, { NOTE_C_6, 128},
- { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
- { NOTE_C_5, 128}, { NOTE_E_5, 128}, { NOTE_A_5, 128},
- { NOTE_E_3, 128}, { NOTE_E_4, 128}, { NOTE_GS_4, 128},
- { NOTE_E_5, 128}, { NOTE_C_6, 128}, { NOTE_B_5, 128},
- { NOTE_C_5, 255}, { NOTE_C_5, 255}, { NOTE_SILENT, 512},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_C_6, 128}, { NOTE_B_5, 128}, { NOTE_A_5, 512},
+ { NOTE_SILENT, 256},
};
static struct vidtv_access_unit *vidtv_s302m_access_unit_init(struct vidtv_access_unit *head)
{
- struct vidtv_access_unit *au = kzalloc(sizeof(*au), GFP_KERNEL);
+ struct vidtv_access_unit *au;
+
+ au = kzalloc(sizeof(*au), GFP_KERNEL);
+ if (!au)
+ return NULL;
if (head) {
while (head->next)
@@ -196,10 +201,10 @@ static void vidtv_s302m_alloc_au(struct vidtv_encoder *e)
static void
vidtv_s302m_compute_sample_count_from_video(struct vidtv_encoder *e)
{
- struct vidtv_access_unit *au = e->access_units;
struct vidtv_access_unit *sync_au = e->sync->access_units;
- u32 vau_duration_usecs;
+ struct vidtv_access_unit *au = e->access_units;
u32 sample_duration_usecs;
+ u32 vau_duration_usecs;
u32 s;
vau_duration_usecs = USEC_PER_SEC / e->sync->sampling_rate_hz;
@@ -230,36 +235,32 @@ static u16 vidtv_s302m_get_sample(struct vidtv_encoder *e)
{
u16 sample;
int pos;
+ struct vidtv_s302m_ctx *ctx = e->ctx;
if (!e->src_buf) {
/*
* Simple tone generator: play the tones at the
- * beethoven_5th_symphony array.
+ * beethoven_fur_elise array.
*/
- if (e->last_duration <= 0) {
- if (e->src_buf_offset >= ARRAY_SIZE(beethoven_5th_symphony))
+ if (ctx->last_duration <= 0) {
+ if (e->src_buf_offset >= ARRAY_SIZE(beethoven_fur_elise))
e->src_buf_offset = 0;
- e->last_tone = beethoven_5th_symphony[e->src_buf_offset].note;
- e->last_duration = beethoven_5th_symphony[e->src_buf_offset].duration * S302M_SAMPLING_RATE_HZ / COMPASS / 5;
+ ctx->last_tone = beethoven_fur_elise[e->src_buf_offset].note;
+ ctx->last_duration = beethoven_fur_elise[e->src_buf_offset].duration *
+ S302M_SAMPLING_RATE_HZ / COMPASS / 5;
e->src_buf_offset++;
- e->note_offset = 0;
+ ctx->note_offset = 0;
} else {
- e->last_duration--;
+ ctx->last_duration--;
}
- /* Handle silent */
- if (!e->last_tone) {
- e->src_buf_offset = 0;
+ /* Handle pause notes */
+ if (!ctx->last_tone)
return 0x8000;
- }
- pos = (2 * PI * e->note_offset * e->last_tone / S302M_SAMPLING_RATE_HZ);
-
- if (pos == 360)
- e->note_offset = 0;
- else
- e->note_offset++;
+ pos = (2 * PI * ctx->note_offset * ctx->last_tone) / S302M_SAMPLING_RATE_HZ;
+ ctx->note_offset++;
return (fixp_sin32(pos % (2 * PI)) >> 16) + 0x8000;
}
@@ -289,9 +290,9 @@ static u16 vidtv_s302m_get_sample(struct vidtv_encoder *e)
static u32 vidtv_s302m_write_frame(struct vidtv_encoder *e,
u16 sample)
{
- u32 nbytes = 0;
- struct vidtv_s302m_frame_16 f = {};
struct vidtv_s302m_ctx *ctx = e->ctx;
+ struct vidtv_s302m_frame_16 f = {};
+ u32 nbytes = 0;
/* from ffmpeg: see s302enc.c */
@@ -388,6 +389,8 @@ static void vidtv_s302m_write_frames(struct vidtv_encoder *e)
static void *vidtv_s302m_encode(struct vidtv_encoder *e)
{
+ struct vidtv_s302m_ctx *ctx = e->ctx;
+
/*
* According to SMPTE 302M, an audio access unit is specified as those
* AES3 words that are associated with a corresponding video frame.
@@ -401,8 +404,6 @@ static void *vidtv_s302m_encode(struct vidtv_encoder *e)
* ffmpeg
*/
- struct vidtv_s302m_ctx *ctx = e->ctx;
-
vidtv_s302m_access_unit_destroy(e);
vidtv_s302m_alloc_au(e);
@@ -440,8 +441,13 @@ static u32 vidtv_s302m_clear(struct vidtv_encoder *e)
struct vidtv_encoder
*vidtv_s302m_encoder_init(struct vidtv_s302m_encoder_init_args args)
{
- struct vidtv_encoder *e = kzalloc(sizeof(*e), GFP_KERNEL);
u32 priv_sz = sizeof(struct vidtv_s302m_ctx);
+ struct vidtv_s302m_ctx *ctx;
+ struct vidtv_encoder *e;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
e->id = S302M;
@@ -453,14 +459,19 @@ struct vidtv_encoder
e->encoder_buf_offset = 0;
e->sample_count = 0;
- e->last_duration = 0;
e->src_buf = (args.src_buf) ? args.src_buf : NULL;
e->src_buf_sz = (args.src_buf) ? args.src_buf_sz : 0;
e->src_buf_offset = 0;
e->is_video_encoder = false;
- e->ctx = kzalloc(priv_sz, GFP_KERNEL);
+
+ ctx = kzalloc(priv_sz, GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ e->ctx = ctx;
+ ctx->last_duration = 0;
e->encode = vidtv_s302m_encode;
e->clear = vidtv_s302m_clear;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.h b/drivers/media/test-drivers/vidtv/vidtv_s302m.h
index eca5e3150ede..9cc94e4a8924 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.h
@@ -19,7 +19,6 @@
#define VIDTV_S302M_H
#include <linux/types.h>
-#include <asm/byteorder.h>
#include "vidtv_encoder.h"
@@ -34,14 +33,20 @@
* @enc: A pointer to the containing encoder structure.
* @frame_index: The current frame in a block
* @au_count: The total number of access units encoded up to now
+ * @last_duration: Duration of the tone currently being played
+ * @note_offset: Position at the music tone array
+ * @last_tone: Tone currently being played
*/
struct vidtv_s302m_ctx {
struct vidtv_encoder *enc;
u32 frame_index;
u32 au_count;
+ int last_duration;
+ unsigned int note_offset;
+ enum musical_notes last_tone;
};
-/**
+/*
* struct vidtv_smpte_s302m_es - s302m MPEG Elementary Stream header.
*
* See SMPTE 302M 2007 table 1.
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.c b/drivers/media/test-drivers/vidtv/vidtv_ts.c
index 190b9e4438dc..ca4bb9c40b78 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_ts.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.c
@@ -9,14 +9,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+#include <linux/math64.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
-#include <linux/math64.h>
-#include <asm/byteorder.h>
-#include "vidtv_ts.h"
#include "vidtv_common.h"
+#include "vidtv_ts.h"
static u32 vidtv_ts_write_pcr_bits(u8 *to, u32 to_offset, u64 pcr)
{
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.h b/drivers/media/test-drivers/vidtv/vidtv_ts.h
index 83dcc9183b45..10838a2b8389 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_ts.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.h
@@ -11,7 +11,6 @@
#define VIDTV_TS_H
#include <linux/types.h>
-#include <asm/byteorder.h>
#define TS_SYNC_BYTE 0x47
#define TS_PACKET_LEN 188
@@ -54,7 +53,7 @@ struct vidtv_mpeg_ts {
* @dest_offset: The byte offset into the buffer.
* @pid: The TS PID for the PCR packets.
* @buf_sz: The size of the buffer in bytes.
- * @countinuity_counter: The TS continuity_counter.
+ * @continuity_counter: The TS continuity_counter.
* @pcr: A sample from the system clock.
*/
struct pcr_write_args {
@@ -71,7 +70,7 @@ struct pcr_write_args {
* @dest_buf: The buffer to write into.
* @dest_offset: The byte offset into the buffer.
* @buf_sz: The size of the buffer in bytes.
- * @countinuity_counter: The TS continuity_counter.
+ * @continuity_counter: The TS continuity_counter.
*/
struct null_packet_write_args {
void *dest_buf;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index 9bc49e099f65..14b6bc902ee1 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -13,11 +13,12 @@
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/types.h>
+
#include <media/dvb_frontend.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
#include "vidtv_tuner.h"
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.h b/drivers/media/test-drivers/vidtv/vidtv_tuner.h
index 8455b2d564b3..fd55346a5c87 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.h
@@ -11,6 +11,7 @@
#define VIDTV_TUNER_H
#include <linux/types.h>
+
#include <media/dvb_frontend.h>
#define NUM_VALID_TUNER_FREQS 8
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index d3c5cc513c8f..0c352b39ad4b 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -215,8 +215,17 @@ static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
return 0;
}
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
static const struct nand_controller_ops gpio_nand_ops = {
.exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
.setup_interface = gpio_nand_setup_interface,
};
@@ -260,9 +269,6 @@ static int gpio_nand_probe(struct platform_device *pdev)
return err;
}
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- this->ecc.algo = NAND_ECC_ALGO_HAMMING;
-
platform_set_drvdata(pdev, priv);
/* Set chip enabled but write protected */
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 79b057400fe9..7892022bd6dd 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -236,8 +236,17 @@ static int au1550nd_exec_op(struct nand_chip *this,
return ret;
}
+static int au1550nd_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
static const struct nand_controller_ops au1550nd_ops = {
.exec_op = au1550nd_exec_op,
+ .attach_chip = au1550nd_attach_chip,
};
static int au1550nd_probe(struct platform_device *pdev)
@@ -294,8 +303,6 @@ static int au1550nd_probe(struct platform_device *pdev)
nand_controller_init(&ctx->controller);
ctx->controller.ops = &au1550nd_ops;
this->controller = &ctx->controller;
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- this->ecc.algo = NAND_ECC_ALGO_HAMMING;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index b7f3f6347761..282203debd0c 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -243,8 +243,24 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
static struct cs553x_nand_controller *controllers[4];
+static int cs553x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.hwctl = cs_enable_hwecc;
+ chip->ecc.calculate = cs_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.strength = 1;
+
+ return 0;
+}
+
static const struct nand_controller_ops cs553x_nand_controller_ops = {
.exec_op = cs553x_exec_op,
+ .attach_chip = cs553x_attach_chip,
};
static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
@@ -286,14 +302,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_mtd;
}
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- this->ecc.size = 256;
- this->ecc.bytes = 3;
- this->ecc.hwctl = cs_enable_hwecc;
- this->ecc.calculate = cs_calculate_ecc;
- this->ecc.correct = nand_correct_data;
- this->ecc.strength = 1;
-
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 427f320fb79b..f8c36d19ab47 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -585,6 +585,10 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
+ /* Use board-specific ECC config */
+ info->chip.ecc.engine_type = pdata->engine_type;
+ info->chip.ecc.placement = pdata->ecc_placement;
+
switch (info->chip.ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
pdata->ecc_bits = 0;
@@ -850,10 +854,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
- /* Use board-specific ECC config */
- info->chip.ecc.engine_type = pdata->engine_type;
- info->chip.ecc.placement = pdata->ecc_placement;
-
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 94432a453e5e..26b265e4384a 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1269,12 +1269,31 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
return 1;
}
+static int doc200x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ chip->ecc.hwctl = doc200x_enable_hwecc;
+ chip->ecc.calculate = doc200x_calculate_ecc;
+ chip->ecc.correct = doc200x_correct_data;
+
+ return 0;
+}
+
static const struct nand_controller_ops doc200x_ops = {
.exec_op = doc200x_exec_op,
+ .attach_chip = doc200x_attach_chip,
};
static const struct nand_controller_ops doc2001plus_ops = {
.exec_op = doc2001plus_exec_op,
+ .attach_chip = doc200x_attach_chip,
};
static int __init doc_probe(unsigned long physadr)
@@ -1452,16 +1471,6 @@ static int __init doc_probe(unsigned long physadr)
nand->controller = &doc->base;
nand_set_controller_data(nand, doc);
- nand->ecc.hwctl = doc200x_enable_hwecc;
- nand->ecc.calculate = doc200x_calculate_ecc;
- nand->ecc.correct = doc200x_correct_data;
-
- nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- nand->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
- nand->ecc.size = 512;
- nand->ecc.bytes = 6;
- nand->ecc.strength = 2;
- nand->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
nand->bbt_options = NAND_BBT_USE_FLASH;
/* Skip the automatic BBT scan so we can run it manually */
nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 4191831df182..c88421a1c078 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -880,6 +880,20 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
struct mtd_info *mtd = nand_to_mtd(nand);
struct fsmc_nand_data *host = nand_to_fsmc(nand);
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ if (!nand->ecc.size)
+ nand->ecc.size = 512;
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ }
+
if (AMBA_REV_BITS(host->pid) >= 8) {
switch (mtd->oobsize) {
case 16:
@@ -905,6 +919,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
nand->ecc.correct = nand_correct_data;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
@@ -1055,13 +1070,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
- /*
- * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
- * can overwrite this value if the DT provides a different value.
- */
- nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- nand->ecc.hwctl = fsmc_enable_hwecc;
- nand->ecc.size = 512;
nand->badblockbits = 7;
if (host->mode == USE_DMA_ACCESS) {
@@ -1084,14 +1092,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->options |= NAND_KEEP_TIMINGS;
}
- if (AMBA_REV_BITS(host->pid) >= 8) {
- nand->ecc.read_page = fsmc_read_page_hwecc;
- nand->ecc.calculate = fsmc_read_hwecc_ecc4;
- nand->ecc.correct = fsmc_bch8_correct_data;
- nand->ecc.bytes = 13;
- nand->ecc.strength = 8;
- }
-
nand_controller_init(&host->base);
host->base.ops = &fsmc_nand_controller_ops;
nand->controller = &host->base;
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index 4ec0a1e10867..eb03b8cea1cb 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -161,8 +161,17 @@ static int gpio_nand_exec_op(struct nand_chip *chip,
return ret;
}
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
static const struct nand_controller_ops gpio_nand_ops = {
.exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
};
#ifdef CONFIG_OF
@@ -342,8 +351,6 @@ static int gpio_nand_probe(struct platform_device *pdev)
gpiomtd->base.ops = &gpio_nand_ops;
nand_set_flash_node(chip, pdev->dev.of_node);
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
chip->options = gpiomtd->plat.options;
chip->controller = &gpiomtd->base;
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 4940bb2e3c07..9e728c731795 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -648,6 +648,9 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct device *dev = &host->pdev->dev;
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf)
return -ENOMEM;
@@ -656,8 +659,17 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
if (!host->dummy_buf)
return -ENOMEM;
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
+ chip->ecc.hwctl = lpc32xx_ecc_enable;
+ chip->ecc.read_page_raw = lpc32xx_read_page;
+ chip->ecc.read_page = lpc32xx_read_page;
+ chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_oob = lpc32xx_write_oob;
+ chip->ecc.read_oob = lpc32xx_read_oob;
+ chip->ecc.strength = 4;
+ chip->ecc.bytes = 10;
+
mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
host->mlcsubpages = mtd->writesize / 512;
@@ -741,15 +753,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
/* Initialize function pointers */
- nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
- nand_chip->ecc.read_page_raw = lpc32xx_read_page;
- nand_chip->ecc.read_page = lpc32xx_read_page;
- nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
- nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
- nand_chip->ecc.write_oob = lpc32xx_write_oob;
- nand_chip->ecc.read_oob = lpc32xx_read_oob;
- nand_chip->ecc.strength = 4;
- nand_chip->ecc.bytes = 10;
nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
nand_chip->options = NAND_NO_SUBPAGE_WRITE;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 6db9d2ed6881..dc7785e30d2f 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -775,6 +775,9 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
/* OOB and ECC CPU and DMA work areas */
host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -786,11 +789,22 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
if (mtd->writesize <= 512)
mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
/* These sizes remain the same regardless of page size */
chip->ecc.size = 256;
+ chip->ecc.strength = 1;
chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
chip->ecc.prepad = 0;
chip->ecc.postpad = 0;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
* Use a custom BBT marker setup for small page FLASH that
@@ -881,21 +895,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
/* NAND callbacks for LPC32xx SLC hardware */
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->legacy.read_byte = lpc32xx_nand_read_byte;
chip->legacy.read_buf = lpc32xx_nand_read_buf;
chip->legacy.write_buf = lpc32xx_nand_write_buf;
- chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
- chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
- chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
- chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
- chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
- chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
- chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
- chip->ecc.correct = nand_correct_data;
- chip->ecc.strength = 1;
- chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
* Allocate a large enough buffer for a single huge page plus
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index dfd0d3ed5ed0..fb4c0b11689f 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -104,6 +104,7 @@
#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
struct mpc5121_nfc_prv {
+ struct nand_controller controller;
struct nand_chip chip;
int irq;
void __iomem *regs;
@@ -602,6 +603,18 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
+static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mpc5121_nfc_ops = {
+ .attach_chip = mpc5121_nfc_attach_chip,
+};
+
static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -634,6 +647,10 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip = &prv->chip;
mtd = nand_to_mtd(chip);
+ nand_controller_init(&prv->controller);
+ prv->controller.ops = &mpc5121_nfc_ops;
+ chip->controller = &prv->controller;
+
mtd->dev.parent = dev;
nand_set_controller_data(chip, prv);
nand_set_flash_node(chip, dn);
@@ -688,8 +705,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* Support external chip-select logic on ADS5121 board */
if (of_machine_is_compatible("fsl,mpc5121ads")) {
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index df9c0f8e4b4e..e3bb65fd3ab2 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -22,6 +22,7 @@
#include <linux/platform_data/mtd-orion_nand.h>
struct orion_nand_info {
+ struct nand_controller controller;
struct nand_chip chip;
struct clk *clk;
};
@@ -82,6 +83,18 @@ static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
buf[i++] = readb(io_base);
}
+static int orion_nand_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops orion_nand_ops = {
+ .attach_chip = orion_nand_attach_chip,
+};
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct orion_nand_info *info;
@@ -101,6 +114,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc = &info->chip;
mtd = nand_to_mtd(nc);
+ nand_controller_init(&info->controller);
+ info->controller.ops = &orion_nand_ops;
+ nc->controller = &info->controller;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
io_base = devm_ioremap_resource(&pdev->dev, res);
@@ -139,8 +156,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
nc->legacy.read_buf = orion_nand_read_buf;
- nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- nc->ecc.algo = NAND_ECC_ALGO_HAMMING;
if (board->chip_delay)
nc->legacy.chip_delay = board->chip_delay;
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 2b8f155cc0c5..4dfff34800f4 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -29,6 +29,7 @@
static unsigned int lpcctl;
static struct mtd_info *pasemi_nand_mtd;
+static struct nand_controller controller;
static const char driver_name[] = "pasemi-nand";
static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
@@ -73,6 +74,18 @@ static int pasemi_device_ready(struct nand_chip *chip)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
+static int pasemi_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops pasemi_ops = {
+ .attach_chip = pasemi_attach_chip,
+};
+
static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -100,6 +113,10 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
goto out;
}
+ controller.ops = &pasemi_ops;
+ nand_controller_init(&controller);
+ chip->controller = &controller;
+
pasemi_nand_mtd = nand_to_mtd(chip);
/* Link the private data with the MTD structure */
@@ -132,8 +149,6 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->legacy.read_buf = pasemi_read_buf;
chip->legacy.write_buf = pasemi_write_buf;
chip->legacy.chip_delay = 0;
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* Enable the following for a flash based bad block table */
chip->bbt_options = NAND_BBT_USE_FLASH;
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index b98c0d5c413f..93d9f1694dc1 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -14,10 +14,23 @@
#include <linux/mtd/platnand.h>
struct plat_nand_data {
+ struct nand_controller controller;
struct nand_chip chip;
void __iomem *io_base;
};
+static int plat_nand_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops plat_nand_ops = {
+ .attach_chip = plat_nand_attach_chip,
+};
+
/*
* Probe for the NAND device.
*/
@@ -46,6 +59,10 @@ static int plat_nand_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->controller.ops = &plat_nand_ops;
+ nand_controller_init(&data->controller);
+ data->chip.controller = &data->controller;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->io_base))
@@ -66,9 +83,6 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
- data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- data->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
-
platform_set_drvdata(pdev, data);
/* Handle any platform specific setup */
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 6b7addd2c420..c742354c1b0b 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -817,6 +817,29 @@ out:
return ret;
}
+static int r852_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops r852_ops = {
+ .attach_chip = r852_attach_chip,
+};
+
static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
int error;
@@ -858,19 +881,6 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
chip->legacy.read_buf = r852_read_buf;
chip->legacy.write_buf = r852_write_buf;
- /* ecc */
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
- chip->ecc.size = R852_DMA_LEN;
- chip->ecc.bytes = SM_OOB_SIZE;
- chip->ecc.strength = 2;
- chip->ecc.hwctl = r852_ecc_hwctl;
- chip->ecc.calculate = r852_ecc_calculate;
- chip->ecc.correct = r852_ecc_correct;
-
- /* TODO: hack */
- chip->ecc.read_oob = r852_read_oob;
-
/* init our device structure */
dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
@@ -882,6 +892,10 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
+ nand_controller_init(&dev->controller);
+ dev->controller.ops = &r852_ops;
+ chip->controller = &dev->controller;
+
dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
&dev->phys_bounce_buffer, GFP_KERNEL);
diff --git a/drivers/mtd/nand/raw/r852.h b/drivers/mtd/nand/raw/r852.h
index e9ce299c499d..96fe301d15da 100644
--- a/drivers/mtd/nand/raw/r852.h
+++ b/drivers/mtd/nand/raw/r852.h
@@ -104,6 +104,7 @@
#define DMA_MEMORY 1
struct r852_device {
+ struct nand_controller controller;
void __iomem *mmio; /* mmio */
struct nand_chip *chip; /* nand chip backpointer */
struct pci_dev *pci_dev; /* pci backpointer */
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 1327bfb3d5d3..af98bcc9d689 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
struct sharpsl_nand {
+ struct nand_controller controller;
struct nand_chip chip;
void __iomem *io;
@@ -96,6 +97,25 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
return readb(sharpsl->io + ECCCNTR) != 0;
}
+static int sharpsl_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ chip->ecc.calculate = sharpsl_nand_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops sharpsl_ops = {
+ .attach_chip = sharpsl_attach_chip,
+};
+
/*
* Main initialization routine
*/
@@ -136,6 +156,10 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* Get pointer to private data */
this = (struct nand_chip *)(&sharpsl->chip);
+ nand_controller_init(&sharpsl->controller);
+ sharpsl->controller.ops = &sharpsl_ops;
+ this->controller = &sharpsl->controller;
+
/* Link the private data with the MTD structure */
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
@@ -156,15 +180,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
this->legacy.dev_ready = sharpsl_nand_dev_ready;
/* 15 us command delay time */
this->legacy.chip_delay = 15;
- /* set eccmode using hardware ECC */
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- this->ecc.size = 256;
- this->ecc.bytes = 3;
- this->ecc.strength = 1;
this->badblock_pattern = data->badblock_pattern;
- this->ecc.hwctl = sharpsl_nand_enable_hwecc;
- this->ecc.calculate = sharpsl_nand_calculate_ecc;
- this->ecc.correct = nand_correct_data;
/* Scan to find existence of the device */
err = nand_scan(this, 1);
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 0f63ff6f7fe7..107208311987 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -22,6 +22,7 @@
#define FPGA_NAND_DATA_SHIFT 16
struct socrates_nand_host {
+ struct nand_controller controller;
struct nand_chip nand_chip;
void __iomem *io_base;
struct device *dev;
@@ -116,6 +117,18 @@ static int socrates_nand_device_ready(struct nand_chip *nand_chip)
return 1;
}
+static int socrates_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops socrates_ops = {
+ .attach_chip = socrates_attach_chip,
+};
+
/*
* Probe for the NAND device.
*/
@@ -141,6 +154,10 @@ static int socrates_nand_probe(struct platform_device *ofdev)
mtd = nand_to_mtd(nand_chip);
host->dev = &ofdev->dev;
+ nand_controller_init(&host->controller);
+ host->controller.ops = &socrates_ops;
+ nand_chip->controller = &host->controller;
+
/* link the private data structures */
nand_set_controller_data(nand_chip, host);
nand_set_flash_node(nand_chip, ofdev->dev.of_node);
@@ -153,10 +170,6 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_chip->legacy.read_buf = socrates_nand_read_buf;
nand_chip->legacy.dev_ready = socrates_nand_device_ready;
- /* enable ECC */
- nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- nand_chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
-
/* TODO: I have no idea what real delay is. */
nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index 235a2f7b1bad..aa6c7e7bbf1b 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -103,6 +103,7 @@
/*--------------------------------------------------------------------------*/
struct tmio_nand {
+ struct nand_controller controller;
struct nand_chip chip;
struct completion comp;
@@ -355,6 +356,25 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
cell->disable(dev);
}
+static int tmio_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = tmio_nand_enable_hwecc;
+ chip->ecc.calculate = tmio_nand_calculate_ecc;
+ chip->ecc.correct = tmio_nand_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops tmio_ops = {
+ .attach_chip = tmio_attach_chip,
+};
+
static int tmio_probe(struct platform_device *dev)
{
struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
@@ -385,6 +405,10 @@ static int tmio_probe(struct platform_device *dev)
mtd->name = "tmio-nand";
mtd->dev.parent = &dev->dev;
+ nand_controller_init(&tmio->controller);
+ tmio->controller.ops = &tmio_ops;
+ nand_chip->controller = &tmio->controller;
+
tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
if (!tmio->ccr)
return -EIO;
@@ -409,15 +433,6 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->legacy.write_buf = tmio_nand_write_buf;
nand_chip->legacy.read_buf = tmio_nand_read_buf;
- /* set eccmode using hardware ECC */
- nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 6;
- nand_chip->ecc.strength = 2;
- nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
- nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
- nand_chip->ecc.correct = tmio_nand_correct_data;
-
if (data)
nand_chip->badblock_pattern = data->badblock_pattern;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index ef81dce6b5c4..fe8ed2441588 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -253,6 +253,11 @@ static int txx9ndfmc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.strength = 1;
+
if (mtd->writesize >= 512) {
chip->ecc.size = 512;
chip->ecc.bytes = 6;
@@ -261,6 +266,10 @@ static int txx9ndfmc_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 3;
}
+ chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+ chip->ecc.correct = txx9ndfmc_correct_data;
+ chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+
return 0;
}
@@ -326,11 +335,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->legacy.write_buf = txx9ndfmc_write_buf;
chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->legacy.dev_ready = txx9ndfmc_dev_ready;
- chip->ecc.calculate = txx9ndfmc_calculate_ecc;
- chip->ecc.correct = txx9ndfmc_correct_data;
- chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- chip->ecc.strength = 1;
chip->legacy.chip_delay = 100;
chip->controller = &drvdata->controller;
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index f2dbd63a5c1f..efc5bf5434e0 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -62,6 +62,7 @@
#define NAND_CON_NANDM 1
struct xway_nand_data {
+ struct nand_controller controller;
struct nand_chip chip;
unsigned long csflags;
void __iomem *nandaddr;
@@ -145,6 +146,18 @@ static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
}
+static int xway_attach_chip(struct nand_chip *chip)
+{
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops xway_nand_ops = {
+ .attach_chip = xway_attach_chip,
+};
+
/*
* Probe for the NAND device.
*/
@@ -180,8 +193,9 @@ static int xway_nand_probe(struct platform_device *pdev)
data->chip.legacy.read_byte = xway_read_byte;
data->chip.legacy.chip_delay = 30;
- data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- data->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ nand_controller_init(&data->controller);
+ data->controller.ops = &xway_nand_ops;
+ data->chip.controller = &data->controller;
platform_set_drvdata(pdev, data);
nand_set_controller_data(&data->chip, data);
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 49f4b73be513..5592a929b593 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -111,6 +111,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
{KE_IGNORE, 0x81, {KEY_SLEEP} },
{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+ {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
{KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
{KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index f5901b0b07cd..0419c8001fe3 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -206,6 +206,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ },
+ },
{} /* Array terminator */
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e3810675090a..c404706379d9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3218,7 +3218,14 @@ static int hotkey_init_tablet_mode(void)
in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
&has_tablet_mode);
- if (has_tablet_mode)
+ /*
+ * The Yoga 11e series has 2 accelerometers described by a
+ * BOSC0200 ACPI node. This setup relies on a Windows service
+ * which calls special ACPI methods on this node to report
+ * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
+ * does not support this, so skip the hotkey on these models.
+ */
+ if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
type = "GMMS";
} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
@@ -4228,6 +4235,7 @@ static void hotkey_resume(void)
pr_err("error while attempting to reset the event firmware interface\n");
tpacpi_send_radiosw_update();
+ tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();
hotkey_wakeup_reason_notify_change();
hotkey_wakeup_hotunplug_complete_notify_change();
@@ -8776,6 +8784,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
+ TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
+ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -9703,6 +9713,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
+ TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
};
static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index e557d757c647..fa7232ad8c39 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1478,7 +1478,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *buffer;
char *cmd;
- int lcd_out, crt_out, tv_out;
+ int lcd_out = -1, crt_out = -1, tv_out = -1;
int remain = count;
int value;
int ret;
@@ -1510,7 +1510,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
kfree(cmd);
- lcd_out = crt_out = tv_out = -1;
ret = get_video_status(dev, &video_out);
if (!ret) {
unsigned int new_video_out = video_out;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index dda60f89c951..5783139d0a11 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -295,6 +295,21 @@ static const struct ts_dmi_data irbis_tw90_data = {
.properties = irbis_tw90_props,
};
+static const struct property_entry irbis_tw118_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 30),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data irbis_tw118_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = irbis_tw118_props,
+};
+
static const struct property_entry itworks_tw891_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
@@ -623,6 +638,23 @@ static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
.properties = pov_mobii_wintab_p1006w_v10_props,
};
+static const struct property_entry predia_basic_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data predia_basic_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = predia_basic_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -937,6 +969,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Irbis TW118 */
+ .driver_data = (void *)&irbis_tw118_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW118"),
+ },
+ },
+ {
/* I.T.Works TW891 */
.driver_data = (void *)&itworks_tw891_data,
.matches = {
@@ -1110,6 +1150,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Predia Basic tablet) */
+ .driver_data = (void *)&predia_basic_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ /* Above matches are too generic, add bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
+ },
+ },
+ {
/* Point of View mobii wintab p800w (v2.1) */
.driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
.matches = {
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 0b2236ade412..c33866f747db 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -875,7 +875,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->set_cs = dw_spi_set_cs;
master->transfer_one = dw_spi_transfer_one;
master->handle_err = dw_spi_handle_err;
- master->mem_ops = &dws->mem_ops;
+ if (dws->mem_ops.exec_op)
+ master->mem_ops = &dws->mem_ops;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
master->dev.fwnode = dev->fwnode;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 4b80e27ecdbf..0b597905ee72 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1686,6 +1686,7 @@ static int spi_imx_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(spi_imx->dev);
+ pm_runtime_get_noresume(spi_imx->dev);
pm_runtime_set_active(spi_imx->dev);
pm_runtime_enable(spi_imx->dev);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 0d41406c036d..ab9035662717 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1001,6 +1001,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
struct resource *res;
struct nxp_fspi *f;
int ret;
+ u32 reg;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
if (!ctlr)
@@ -1032,6 +1033,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
+ /* Clear potential interrupts */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ if (reg)
+ fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
+
/* find the resources - controller memory mapped space */
if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 05c75f890ace..fc9a59788d2e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3372,12 +3372,15 @@ int spi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->controller->max_speed_hz;
+ mutex_lock(&spi->controller->io_mutex);
+
if (spi->controller->setup)
status = spi->controller->setup(spi);
if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
status = pm_runtime_get_sync(spi->controller->dev.parent);
if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
pm_runtime_put_noidle(spi->controller->dev.parent);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
@@ -3399,6 +3402,8 @@ int spi_setup(struct spi_device *spi)
spi_set_cs(spi, false);
}
+ mutex_unlock(&spi->controller->io_mutex);
+
if (spi->rt && !spi->controller->rt) {
spi->controller->rt = true;
spi_set_thread_rt(spi->controller);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index 28319351e909..781c84a9b1b7 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -446,7 +446,7 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
- if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
reg |= VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT;
cedrus_write(dev, VE_H264_SHS_QP, reg);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 5bc86f481a78..c8b0ae676809 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1093,7 +1093,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto err1;
}
- fb_virt = ioremap(par->mem->start, screen_fb_size);
+ /*
+ * Map the VRAM cacheable for performance. This is also required for
+ * VM Connect to display properly for ARM64 Linux VM, as the host also
+ * maps the VRAM cacheable.
+ */
+ fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
if (!fb_virt)
goto err2;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0378933d163c..0b29bdb25105 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -878,7 +878,10 @@ struct btrfs_fs_info {
*/
struct ulist *qgroup_ulist;
- /* protect user change for quota operations */
+ /*
+ * Protect user change for quota operations. If a transaction is needed,
+ * it must be started before locking this lock.
+ */
struct mutex qgroup_ioctl_lock;
/* list of dirty qgroups to be written at next commit */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 87355a38a654..4373da7bcc0d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -452,46 +452,6 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
}
}
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
- const u64 start,
- const u64 len,
- struct extent_state **cached_state)
-{
- u64 search_start = start;
- const u64 end = start + len - 1;
-
- while (search_start < end) {
- const u64 search_len = end - search_start + 1;
- struct extent_map *em;
- u64 em_len;
- int ret = 0;
-
- em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
- if (IS_ERR(em))
- return PTR_ERR(em);
-
- if (em->block_start != EXTENT_MAP_HOLE)
- goto next;
-
- em_len = em->len;
- if (em->start < search_start)
- em_len -= search_start - em->start;
- if (em_len > search_len)
- em_len = search_len;
-
- ret = set_extent_bit(&inode->io_tree, search_start,
- search_start + em_len - 1,
- EXTENT_DELALLOC_NEW,
- NULL, cached_state, GFP_NOFS);
-next:
- search_start = extent_map_end(em);
- free_extent_map(em);
- if (ret)
- return ret;
- }
- return 0;
-}
-
/*
* after copy_from_user, pages need to be dirtied and we need to make
* sure holes are created between the current EOF and the start of
@@ -528,23 +488,6 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, cached);
- if (!btrfs_is_free_space_inode(inode)) {
- if (start_pos >= isize &&
- !(inode->flags & BTRFS_INODE_PREALLOC)) {
- /*
- * There can't be any extents following eof in this case
- * so just set the delalloc new bit for the range
- * directly.
- */
- extra_bits |= EXTENT_DELALLOC_NEW;
- } else {
- err = btrfs_find_new_delalloc_bytes(inode, start_pos,
- num_bytes, cached);
- if (err)
- return err;
- }
- }
-
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
extra_bits, cached);
if (err)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index da58c58ef9aa..7e8d8169779d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2253,11 +2253,69 @@ static int add_pending_csums(struct btrfs_trans_handle *trans,
return 0;
}
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+ const u64 start,
+ const u64 len,
+ struct extent_state **cached_state)
+{
+ u64 search_start = start;
+ const u64 end = start + len - 1;
+
+ while (search_start < end) {
+ const u64 search_len = end - search_start + 1;
+ struct extent_map *em;
+ u64 em_len;
+ int ret = 0;
+
+ em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start != EXTENT_MAP_HOLE)
+ goto next;
+
+ em_len = em->len;
+ if (em->start < search_start)
+ em_len -= search_start - em->start;
+ if (em_len > search_len)
+ em_len = search_len;
+
+ ret = set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW,
+ NULL, cached_state, GFP_NOFS);
+next:
+ search_start = extent_map_end(em);
+ free_extent_map(em);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state)
{
WARN_ON(PAGE_ALIGNED(end));
+
+ if (start >= i_size_read(&inode->vfs_inode) &&
+ !(inode->flags & BTRFS_INODE_PREALLOC)) {
+ /*
+ * There can't be any extents following eof in this case so just
+ * set the delalloc new bit for the range directly.
+ */
+ extra_bits |= EXTENT_DELALLOC_NEW;
+ } else {
+ int ret;
+
+ ret = btrfs_find_new_delalloc_bytes(inode, start,
+ end + 1 - start,
+ cached_state);
+ if (ret)
+ return ret;
+ }
+
return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
cached_state);
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 77c54749f432..87bd37b70738 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/btrfs.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "transaction.h"
@@ -497,13 +498,13 @@ next2:
break;
}
out:
+ btrfs_free_path(path);
fs_info->qgroup_flags |= flags;
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
ret >= 0)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
- btrfs_free_path(path);
if (ret < 0) {
ulist_free(fs_info->qgroup_ulist);
@@ -936,6 +937,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
struct btrfs_trans_handle *trans = NULL;
+ struct ulist *ulist = NULL;
int ret = 0;
int slot;
@@ -943,8 +945,8 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
if (fs_info->quota_root)
goto out;
- fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
- if (!fs_info->qgroup_ulist) {
+ ulist = ulist_alloc(GFP_KERNEL);
+ if (!ulist) {
ret = -ENOMEM;
goto out;
}
@@ -952,6 +954,22 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
ret = btrfs_sysfs_add_qgroups(fs_info);
if (ret < 0)
goto out;
+
+ /*
+ * Unlock qgroup_ioctl_lock before starting the transaction. This is to
+ * avoid lock acquisition inversion problems (reported by lockdep) between
+ * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
+ * start a transaction.
+ * After we started the transaction lock qgroup_ioctl_lock again and
+ * check if someone else created the quota root in the meanwhile. If so,
+ * just return success and release the transaction handle.
+ *
+ * Also we don't need to worry about someone else calling
+ * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
+ * that function returns 0 (success) when the sysfs entries already exist.
+ */
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
+
/*
* 1 for quota root item
* 1 for BTRFS_QGROUP_STATUS item
@@ -961,12 +979,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
* would be a lot of overkill.
*/
trans = btrfs_start_transaction(tree_root, 2);
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
+ if (fs_info->quota_root)
+ goto out;
+
+ fs_info->qgroup_ulist = ulist;
+ ulist = NULL;
+
/*
* initially create the quota tree
*/
@@ -1124,11 +1150,14 @@ out:
if (ret) {
ulist_free(fs_info->qgroup_ulist);
fs_info->qgroup_ulist = NULL;
- if (trans)
- btrfs_end_transaction(trans);
btrfs_sysfs_del_qgroups(fs_info);
}
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (ret && trans)
+ btrfs_end_transaction(trans);
+ else if (trans)
+ ret = btrfs_end_transaction(trans);
+ ulist_free(ulist);
return ret;
}
@@ -1141,19 +1170,29 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
* 1 For the root item
*
* We should also reserve enough items for the quota tree deletion in
* btrfs_clean_quota_tree but this is not done.
+ *
+ * Also, we must always start a transaction without holding the mutex
+ * qgroup_ioctl_lock, see btrfs_quota_enable().
*/
trans = btrfs_start_transaction(fs_info->tree_root, 1);
+
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ trans = NULL;
goto out;
}
+ if (!fs_info->quota_root)
+ goto out;
+
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
@@ -1167,13 +1206,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
ret = btrfs_clean_quota_tree(trans, quota_root);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto end_trans;
+ goto out;
}
ret = btrfs_del_root(trans, &quota_root->root_key);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto end_trans;
+ goto out;
}
list_del(&quota_root->dirty_list);
@@ -1185,10 +1224,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_put_root(quota_root);
-end_trans:
- ret = btrfs_end_transaction(trans);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (ret && trans)
+ btrfs_end_transaction(trans);
+ else if (trans)
+ ret = btrfs_end_transaction(trans);
+
return ret;
}
@@ -1324,13 +1366,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
struct ulist *tmp;
+ unsigned int nofs_flag;
int ret = 0;
/* Check the level of src and dst first */
if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
return -EINVAL;
+ /* We hold a transaction handle open, must do a NOFS allocation. */
+ nofs_flag = memalloc_nofs_save();
tmp = ulist_alloc(GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!tmp)
return -ENOMEM;
@@ -1387,10 +1433,14 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
struct btrfs_qgroup_list *list;
struct ulist *tmp;
bool found = false;
+ unsigned int nofs_flag;
int ret = 0;
int ret2;
+ /* We hold a transaction handle open, must do a NOFS allocation. */
+ nofs_flag = memalloc_nofs_save();
tmp = ulist_alloc(GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!tmp)
return -ENOMEM;
@@ -3512,6 +3562,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
{
struct btrfs_trans_handle *trans;
int ret;
+ bool can_commit = true;
/*
* We don't want to run flush again and again, so if there is a running
@@ -3523,6 +3574,20 @@ static int try_flush_qgroup(struct btrfs_root *root)
return 0;
}
+ /*
+ * If current process holds a transaction, we shouldn't flush, as we
+ * assume all space reservation happens before a transaction handle is
+ * held.
+ *
+ * But there are cases like btrfs_delayed_item_reserve_metadata() where
+ * we try to reserve space with one transction handle already held.
+ * In that case we can't commit transaction, but at least try to end it
+ * and hope the started data writes can free some space.
+ */
+ if (current->journal_info &&
+ current->journal_info != BTRFS_SEND_TRANS_STUB)
+ can_commit = false;
+
ret = btrfs_start_delalloc_snapshot(root);
if (ret < 0)
goto out;
@@ -3534,7 +3599,10 @@ static int try_flush_qgroup(struct btrfs_root *root)
goto out;
}
- ret = btrfs_commit_transaction(trans);
+ if (can_commit)
+ ret = btrfs_commit_transaction(trans);
+ else
+ ret = btrfs_end_transaction(trans);
out:
clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
wake_up(&root->qgroup_flush_wait);
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index e6719f7db386..04022069761d 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -983,7 +983,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1050,7 +1051,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1082,7 +1084,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* Empty */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_err("clear_extent_bit returned %d", ret);
goto out;
@@ -1097,7 +1100,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
out:
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_UPTODATE, 0, 0, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 8784b74f5232..ea2bb4cb5890 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1068,6 +1068,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
"invalid root item size, have %u expect %zu or %u",
btrfs_item_size_nr(leaf, slot), sizeof(ri),
btrfs_legacy_root_item_size());
+ return -EUCLEAN;
}
/*
@@ -1423,6 +1424,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
"invalid item size, have %u expect aligned to %zu for key type %u",
btrfs_item_size_nr(leaf, slot),
sizeof(*dref), key->type);
+ return -EUCLEAN;
}
if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
generic_err(leaf, slot,
@@ -1451,6 +1453,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
offset, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
}
}
return 0;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a6406b3b8c2b..78637665166e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -940,7 +940,13 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (device->bdev != path_bdev) {
bdput(path_bdev);
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_warn_in_rcu(device->fs_info,
+ /*
+ * device->fs_info may not be reliable here, so
+ * pass in a NULL instead. This avoids a
+ * possible use-after-free when the fs_info and
+ * fs_info->sb are already torn down.
+ */
+ btrfs_warn_in_rcu(NULL,
"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
path, devid, found_transid,
current->comm,
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 23b21e943652..ef4784e72b1d 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1266,6 +1266,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
} else if (mode_from_special_sid) {
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
+ kfree(pntsd);
} else {
/* get approximated mode from ACL */
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 504766cb6c19..dab94f67c988 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -264,7 +264,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
}
static struct mid_q_entry *
-smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
{
struct mid_q_entry *mid;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
@@ -281,6 +281,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == shdr->Command)) {
kref_get(&mid->refcount);
+ if (dequeue) {
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
spin_unlock(&GlobalMid_Lock);
return mid;
}
@@ -289,6 +293,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
return NULL;
}
+static struct mid_q_entry *
+smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+{
+ return __smb2_find_mid(server, buf, false);
+}
+
+static struct mid_q_entry *
+smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
+{
+ return __smb2_find_mid(server, buf, true);
+}
+
static void
smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
{
@@ -4356,7 +4372,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
static int
handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
char *buf, unsigned int buf_len, struct page **pages,
- unsigned int npages, unsigned int page_data_size)
+ unsigned int npages, unsigned int page_data_size,
+ bool is_offloaded)
{
unsigned int data_offset;
unsigned int data_len;
@@ -4378,7 +4395,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) {
- cifs_reconnect(server);
+ if (!is_offloaded)
+ cifs_reconnect(server);
return -1;
}
@@ -4402,7 +4420,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: server returned error %d\n",
__func__, rdata->result);
/* normal error on read response */
- dequeue_mid(mid, false);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_RECEIVED;
+ else
+ dequeue_mid(mid, false);
return 0;
}
@@ -4426,7 +4447,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
__func__, data_offset);
rdata->result = -EIO;
- dequeue_mid(mid, rdata->result);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_MALFORMED;
+ else
+ dequeue_mid(mid, rdata->result);
return 0;
}
@@ -4442,21 +4466,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
__func__, data_offset);
rdata->result = -EIO;
- dequeue_mid(mid, rdata->result);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_MALFORMED;
+ else
+ dequeue_mid(mid, rdata->result);
return 0;
}
if (data_len > page_data_size - pad_len) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
- dequeue_mid(mid, rdata->result);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_MALFORMED;
+ else
+ dequeue_mid(mid, rdata->result);
return 0;
}
rdata->result = init_read_bvec(pages, npages, page_data_size,
cur_off, &bvec);
if (rdata->result != 0) {
- dequeue_mid(mid, rdata->result);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_MALFORMED;
+ else
+ dequeue_mid(mid, rdata->result);
return 0;
}
@@ -4471,7 +4504,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
/* read response payload cannot be in both buf and pages */
WARN_ONCE(1, "buf can not contain only a part of read data");
rdata->result = -EIO;
- dequeue_mid(mid, rdata->result);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_MALFORMED;
+ else
+ dequeue_mid(mid, rdata->result);
return 0;
}
@@ -4482,7 +4518,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (length < 0)
return length;
- dequeue_mid(mid, false);
+ if (is_offloaded)
+ mid->mid_state = MID_RESPONSE_RECEIVED;
+ else
+ dequeue_mid(mid, false);
return length;
}
@@ -4511,15 +4550,34 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
dw->server->lstrp = jiffies;
- mid = smb2_find_mid(dw->server, dw->buf);
+ mid = smb2_find_dequeue_mid(dw->server, dw->buf);
if (mid == NULL)
cifs_dbg(FYI, "mid not found\n");
else {
mid->decrypted = true;
rc = handle_read_data(dw->server, mid, dw->buf,
dw->server->vals->read_rsp_size,
- dw->ppages, dw->npages, dw->len);
- mid->callback(mid);
+ dw->ppages, dw->npages, dw->len,
+ true);
+ if (rc >= 0) {
+#ifdef CONFIG_CIFS_STATS2
+ mid->when_received = jiffies;
+#endif
+ mid->callback(mid);
+ } else {
+ spin_lock(&GlobalMid_Lock);
+ if (dw->server->tcpStatus == CifsNeedReconnect) {
+ mid->mid_state = MID_RETRY_NEEDED;
+ spin_unlock(&GlobalMid_Lock);
+ mid->callback(mid);
+ } else {
+ mid->mid_state = MID_REQUEST_SUBMITTED;
+ mid->mid_flags &= ~(MID_DELETED);
+ list_add_tail(&mid->qhead,
+ &dw->server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+ }
+ }
cifs_mid_q_entry_release(mid);
}
@@ -4622,7 +4680,7 @@ non_offloaded_decrypt:
(*mid)->decrypted = true;
rc = handle_read_data(server, *mid, buf,
server->vals->read_rsp_size,
- pages, npages, len);
+ pages, npages, len, false);
}
free_pages:
@@ -4765,7 +4823,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
return handle_read_data(server, mid, buf, server->pdu_size,
- NULL, 0, 0);
+ NULL, 0, 0, false);
}
static int
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a80c59af2c60..922a7f600465 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -282,20 +282,6 @@ struct mem_cgroup {
MEMCG_PADDING(_pad1_);
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
- struct task_struct *move_lock_task;
-
- /* Legacy local VM stats and events */
- struct memcg_vmstats_percpu __percpu *vmstats_local;
-
- /* Subtree VM stats and events (batched updates) */
- struct memcg_vmstats_percpu __percpu *vmstats_percpu;
-
- MEMCG_PADDING(_pad2_);
-
atomic_long_t vmstats[MEMCG_NR_STAT];
atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
@@ -317,6 +303,20 @@ struct mem_cgroup {
struct list_head objcg_list; /* list of inherited objcgs */
#endif
+ MEMCG_PADDING(_pad2_);
+
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ struct task_struct *move_lock_task;
+
+ /* Legacy local VM stats and events */
+ struct memcg_vmstats_percpu __percpu *vmstats_local;
+
+ /* Subtree VM stats and events (batched updates) */
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index e7cbccc7c14c..57d795365987 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -190,7 +190,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@@ -219,7 +219,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@@ -252,7 +252,7 @@ TRACE_EVENT(track_foreign_dirty,
struct address_space *mapping = page_mapping(page);
struct inode *inode = mapping ? mapping->host : NULL;
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
@@ -285,7 +285,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
- strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
diff --git a/init/Kconfig b/init/Kconfig
index c9446911cf41..02d13ae27abb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -719,7 +719,7 @@ config LOG_CPU_MAX_BUF_SHIFT
with more CPUs. Therefore this value is used only when the sum of
contributions is greater than the half of the default kernel ring
buffer as defined by LOG_BUF_SHIFT. The default values are set
- so that more than 64 CPUs are needed to trigger the allocation.
+ so that more than 16 CPUs are needed to trigger the allocation.
Also this option is ignored when "log_buf_len" kernel parameter is
used as it forces an exact (power of two) size of the ring buffer.
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index fe64a49344bf..bc1e3b5a97bd 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -528,8 +528,8 @@ static int log_store(u32 caller_id, int facility, int level,
if (dev_info)
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
- /* insert message */
- if ((flags & LOG_CONT) || !(flags & LOG_NEWLINE))
+ /* A message without a trailing newline can be continued. */
+ if (!(flags & LOG_NEWLINE))
prb_commit(&e);
else
prb_final_commit(&e);
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 6b1525685277..74e25a1704f2 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -882,8 +882,6 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
do {
- desc = to_desc(desc_ring, head_id);
-
id = DESC_ID(head_id + 1);
id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
diff --git a/mm/filemap.c b/mm/filemap.c
index 3ebbe64a0106..0b2067b3c328 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1484,11 +1484,19 @@ void end_page_writeback(struct page *page)
rotate_reclaimable_page(page);
}
+ /*
+ * Writeback does not hold a page reference of its own, relying
+ * on truncation to wait for the clearing of PG_writeback.
+ * But here we must make sure that the page is not freed and
+ * reused before the wake_up_page().
+ */
+ get_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
+ put_page(page);
}
EXPORT_SYMBOL(end_page_writeback);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7709f0e223f5..586042472ac9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2754,12 +2754,6 @@ int test_clear_page_writeback(struct page *page)
} else {
ret = TestClearPageWriteback(page);
}
- /*
- * NOTE: Page might be free now! Writeback doesn't hold a page
- * reference on its own, it relies on truncation to wait for
- * the clearing of PG_writeback. The below can only access
- * page state that is static across allocation cycles.
- */
if (ret) {
dec_lruvec_state(lruvec, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);