summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-01-21 14:41:32 -0800
committerDavid S. Miller <davem@davemloft.net>2019-01-21 14:41:32 -0800
commitfa7f3a8d56b38a3ed1880a3780afba82387da277 (patch)
treea4628ee966f21963e5e97a6d1a227a3e8138183e
parent28f9d1a3d4fecdb2352d3984ddeec88146385885 (diff)
parent49a57857aeea06ca831043acbb0fa5e0f50602fd (diff)
downloadlinux-fa7f3a8d56b38a3ed1880a3780afba82387da277.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Completely minor snmp doc conflict. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.clang-format43
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sp810.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/topology.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt2
-rw-r--r--Documentation/devicetree/bindings/display/arm,pl11x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt4
-rw-r--r--Documentation/fb/fbcon.txt8
-rw-r--r--Documentation/networking/index.rst26
-rw-r--r--Documentation/networking/rxrpc.txt45
-rw-r--r--Documentation/networking/snmp_counter.rst130
-rw-r--r--Documentation/networking/timestamping.txt4
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile10
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/mmu.h5
-rw-r--r--arch/arm64/kernel/kaslr.c8
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm47xx/setup.c31
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/lantiq/irq.c77
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/nds32/Makefile8
-rw-r--r--arch/openrisc/Makefile3
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/powerpc/kernel/head_8xx.S3
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c7
-rw-r--r--arch/powerpc/perf/perf_regs.c6
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c6
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c3
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/xen/enlighten_pv.c5
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--block/bfq-wf2q.c11
-rw-r--r--block/blk-mq-debugfs-zoned.c2
-rw-r--r--block/blk-mq.c3
-rw-r--r--crypto/adiantum.c4
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/bus.c24
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nfit/core.c20
-rw-r--r--drivers/acpi/nfit/intel.c8
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c8
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/bcm/cipher.c44
-rw-r--r--drivers/crypto/caam/caamalg.c2
-rw-r--r--drivers/crypto/caam/caamhash.c15
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/talitos.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c19
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c14
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/nct6775.c12
-rw-r--r--drivers/hwmon/occ/common.c24
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c7
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c11
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c62
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h35
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c24
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/mtd/nand/raw/denali.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c21
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/dsa/realtek-smi.c18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c12
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/cortina.c1
-rw-r--r--drivers/net/phy/marvell.c37
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c1
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy.c19
-rw-r--r--drivers/net/phy/phy_device.c17
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvme/host/pci.c21
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/property.c1
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c9
-rw-r--r--drivers/s390/virtio/virtio_ccw.c12
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c28
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c12
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_pm.c26
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_user.c88
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c20
-rw-r--r--drivers/vhost/vhost.c105
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/offb.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/logo/Kconfig9
-rw-r--r--drivers/virtio/virtio_balloon.c98
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/tqmx86_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pvcalls-back.c9
-rw-r--r--drivers/xen/pvcalls-front.c104
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/inode.c3
-rw-r--r--fs/afs/protocol_yfs.h11
-rw-r--r--fs/afs/rxrpc.c53
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/pstore/ram.c12
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h3
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/compiler-intel.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/libnvdimm.h1
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/net/af_rxrpc.h16
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h1
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/cgroup.c1
-rw-r--r--kernel/bpf/map_in_map.c17
-rw-r--r--kernel/bpf/stackmap.c9
-rw-r--r--kernel/dma/swiotlb.c2
-rw-r--r--kernel/seccomp.c4
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S2
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c9
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/core/filter.c32
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/fou.c9
-rw-r--r--net/ipv4/ip_gre.c23
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/fou6.c9
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/udp.c18
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/netfilter/nf_flow_table_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nft_flow_offload.c13
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/rxrpc/af_rxrpc.c70
-rw-r--r--net/rxrpc/ar-internal.h19
-rw-r--r--net/rxrpc/call_object.c97
-rw-r--r--net/rxrpc/conn_client.c5
-rw-r--r--net/rxrpc/sendmsg.c24
-rw-r--r--net/sched/act_tunnel_key.c19
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_flower.c19
-rw-r--r--net/sched/sch_cake.c5
-rw-r--r--net/sched/sch_cbs.c3
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c20
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c10
-rw-r--r--net/sunrpc/xprtsock.c22
-rw-r--r--net/tipc/netlink_compat.c50
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/xdp/xdp_umem.c16
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/asm_goto_workaround.h16
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c23
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--security/security.c7
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--tools/bpf/bpftool/Makefile9
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h5
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1163
-rw-r--r--tools/lib/bpf/bpf.c19
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c1
-rw-r--r--tools/testing/selftests/bpf/Makefile1
-rw-r--r--tools/testing/selftests/bpf/test_progs.c30
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh20
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh34
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json88
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json31
331 files changed, 3353 insertions, 1443 deletions
diff --git a/.clang-format b/.clang-format
index e6080f5834a3..bc2ffb2a0b53 100644
--- a/.clang-format
+++ b/.clang-format
@@ -72,6 +72,10 @@ ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
+ - '__ata_qc_for_each'
+ - 'ata_qc_for_each'
+ - 'ata_qc_for_each_raw'
+ - 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_integrity_vec'
@@ -85,6 +89,7 @@ ForEachMacros:
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
+ - 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
@@ -103,6 +108,8 @@ ForEachMacros:
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
+ - 'drm_atomic_for_each_plane_damage'
+ - 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
@@ -121,11 +128,21 @@ ForEachMacros:
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
+ - 'for_each_card_components'
+ - 'for_each_card_links'
+ - 'for_each_card_links_safe'
+ - 'for_each_card_prelinks'
+ - 'for_each_card_rtds'
+ - 'for_each_card_rtds_safe'
+ - 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
+ - 'for_each_component_dais'
+ - 'for_each_component_dais_safe'
+ - 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
@@ -133,6 +150,10 @@ ForEachMacros:
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
- 'for_each_dma_cap_mask'
+ - 'for_each_dpcm_be'
+ - 'for_each_dpcm_be_rollback'
+ - 'for_each_dpcm_be_safe'
+ - 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
@@ -149,6 +170,7 @@ ForEachMacros:
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
+ - 'for_each_link_codecs'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
@@ -160,6 +182,7 @@ ForEachMacros:
- 'for_each_mem_range_rev'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
+ - 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_netdev'
- 'for_each_netdev_continue'
@@ -183,12 +206,14 @@ ForEachMacros:
- 'for_each_node_with_property'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
+ - 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_plane_in_state'
+ - 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
@@ -206,14 +231,17 @@ ForEachMacros:
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_property_of_node'
+ - 'for_each_registered_fb'
- 'for_each_reserved_mem_region'
- - 'for_each_resv_unavail_range'
+ - 'for_each_rtd_codec_dai'
+ - 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom'
- 'for_each_rtdcom_safe'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_sg'
- 'for_each_sg_page'
+ - 'for_each_sibling_event'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
@@ -251,6 +279,8 @@ ForEachMacros:
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
+ - 'i3c_bus_for_each_i2cdev'
+ - 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
@@ -267,11 +297,14 @@ ForEachMacros:
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
+ - 'list_for_each_codec'
+ - 'list_for_each_codec_safe'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
+ - 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
@@ -291,6 +324,7 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
+ - 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
@@ -357,12 +391,14 @@ ForEachMacros:
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
+ - 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
+ - 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -371,6 +407,11 @@ ForEachMacros:
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
+ - 'virtio_device_for_each_vq'
+ - 'xa_for_each'
+ - 'xas_for_each'
+ - 'xas_for_each_conflict'
+ - 'xas_for_each_marked'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
index 84262cdb8d29..96fa46cb133c 100644
--- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -235,4 +235,4 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 8f0937db55c5..45730ba60af5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -684,7 +684,7 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
[2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
index 1b2ab1ff5587..46652bf65147 100644
--- a/Documentation/devicetree/bindings/arm/sp810.txt
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -4,7 +4,7 @@ SP810 System Controller
Required properties:
- compatible: standard compatible string for a Primecell peripheral,
- see Documentation/devicetree/bindings/arm/primecell.txt
+ see Documentation/devicetree/bindings/arm/primecell.yaml
for more details
should be: "arm,sp810", "arm,primecell"
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt
index de9eb0486630..b0d80c0fb265 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/arm/topology.txt
@@ -472,4 +472,4 @@ cpus {
===============================================================================
[1] ARM Linux kernel documentation
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
index af376a01f2b7..23b52dc02266 100644
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
@@ -18,4 +18,4 @@ Required Properties:
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
-All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
+All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt
index ef89ab46b2c9..572fa2773ec4 100644
--- a/Documentation/devicetree/bindings/display/arm,pl11x.txt
+++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt
@@ -1,6 +1,6 @@
* ARM PrimeCell Color LCD Controller PL110/PL111
-See also Documentation/devicetree/bindings/arm/primecell.txt
+See also Documentation/devicetree/bindings/arm/primecell.yaml
Required properties:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 38ca2201e8ae..2e097b57f170 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -14,8 +14,6 @@ Required properties:
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see
- Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
- and
Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
for specific details about the offset property.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index b83bb8249074..a3be5298a5eb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -78,7 +78,7 @@ Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node,
containing a set of sub-nodes, each with the following property:
- affinity: Should be a list of phandles to CPU nodes (as described in
-Documentation/devicetree/bindings/arm/cpus.txt).
+ Documentation/devicetree/bindings/arm/cpus.yaml).
GICv3 has one or more Interrupt Translation Services (ITS) that are
used to route Message Signalled Interrupts (MSI) to the CPUs.
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
index 0b8cc533ca83..cf759e5f9b10 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
= EXAMPLE
The following example represents the GLINK RPM node on a MSM8996 device, with
the function for the "rpm_request" channel defined, which is used for
-regualtors and root clocks.
+regulators and root clocks.
apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
index a35af2dafdad..49e1d72d3648 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
@@ -41,12 +41,12 @@ processor ID) and a string identifier.
- qcom,local-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the local endpoint of this edge
+ Definition: specifies the identifier of the local endpoint of this edge
- qcom,remote-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the remote endpoint of this edge
+ Definition: specifies the identifier of the remote endpoint of this edge
= SUBNODES
Each SMP2P pair contain a set of inbound and outbound entries, these are
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 62af30511a95..60a5ec04e8f0 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -163,6 +163,14 @@ C. Boot options
be preserved until there actually is some text is output to the console.
This option causes fbcon to bind immediately to the fbdev device.
+7. fbcon=logo-pos:<location>
+
+ The only possible 'location' is 'center' (without quotes), and when
+ given, the bootup logo is moved from the default top-left corner
+ location to the center of the framebuffer. If more than one logo is
+ displayed due to multiple CPUs, the collected line of logos is moved
+ as a whole.
+
C. Attaching, Detaching and Unloading
Before going on to how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 6a47629ef8ed..59e86de662cd 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -11,19 +11,19 @@ Contents:
batman-adv
can
can_ucan_protocol
- dpaa2/index
- e100
- e1000
- e1000e
- fm10k
- igb
- igbvf
- ixgb
- ixgbe
- ixgbevf
- i40e
- iavf
- ice
+ device_drivers/freescale/dpaa2/index
+ device_drivers/intel/e100
+ device_drivers/intel/e1000
+ device_drivers/intel/e1000e
+ device_drivers/intel/fm10k
+ device_drivers/intel/igb
+ device_drivers/intel/igbvf
+ device_drivers/intel/ixgb
+ device_drivers/intel/ixgbe
+ device_drivers/intel/ixgbevf
+ device_drivers/intel/i40e
+ device_drivers/intel/iavf
+ device_drivers/intel/ice
kapi
z8530book
msg_zerocopy
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index c9d052e0cf51..2df5894353d6 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
size should be set when the call is begun. tx_total_len may not be less
than zero.
- (*) Check to see the completion state of a call so that the caller can assess
- whether it needs to be retried.
-
- enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED,
- RXRPC_CALL_REMOTELY_ABORTED,
- RXRPC_CALL_LOCALLY_ABORTED,
- RXRPC_CALL_LOCAL_ERROR,
- RXRPC_CALL_NETWORK_ERROR,
- };
-
- int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl,
- u32 *_abort_code);
-
- On return, -EINPROGRESS will be returned if the call is still ongoing; if
- it is finished, *_compl will be set to indicate the manner of completion,
- *_abort_code will be set to any abort code that occurred. 0 will be
- returned on a successful completion, -ECONNABORTED will be returned if the
- client failed due to a remote abort and anything else will return an
- appropriate error code.
-
- The caller should look at this information to decide if it's worth
- retrying the call.
-
- (*) Retry a client call.
-
- int rxrpc_kernel_retry_call(struct socket *sock,
- struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx,
- struct key *key);
-
- This attempts to partially reinitialise a call and submit it again while
- reusing the original call's Tx queue to avoid the need to repackage and
- re-encrypt the data to be sent. call indicates the call to retry, srx the
- new address to send it to and key the encryption key to use for signing or
- encrypting the packets.
-
- For this to work, the first Tx data packet must still be in the transmit
- queue, and currently this is only permitted for local and network errors
- and the call must not have been aborted. Any partially constructed Tx
- packet is left as is and can continue being filled afterwards.
-
- It returns 0 if the call was requeued and an error otherwise.
-
(*) Get call RTT.
u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index 486ab33acc3a..c5642f430d2e 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -366,6 +366,27 @@ to the accept queue.
TCP Fast Open
=============
+* TcpEstabResets
+Defined in `RFC1213 tcpEstabResets`_.
+
+.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpAttemptFails
+Defined in `RFC1213 tcpAttemptFails`_.
+
+.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpOutRsts
+Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
+the 'segments sent containing the RST flag', but in linux kernel, this
+couner indicates the segments kerenl tried to send. The sending
+process might be failed due to some errors (e.g. memory alloc failed).
+
+.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
+
+
+TCP Fast Path
+============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
code provides a good explanation of them, I pasted them below::
@@ -413,7 +434,6 @@ increase 1.
TCP abort
=========
-
* TcpExtTCPAbortOnData
It means TCP layer has data in flight, but need to close the
@@ -589,7 +609,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP
stack of kernel will increase TcpExtTCPSACKReorder for both of the
above scenarios.
-
DSACK
=====
The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
@@ -612,8 +631,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
-
-The TCP stack receives a DSACK, which indicate an acknowledged
+The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
* TcpExtTCPDSACKOfoRecv
@@ -621,6 +639,56 @@ duplicate packet is received.
The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
+invalid SACK and DSACK
+====================
+When a SACK (or DSACK) block is invalid, a corresponding counter would
+be updated. The validation method is base on the start/end sequence
+number of the SACK block. For more details, please refer the comment
+of the function tcp_is_sackblock_valid in the kernel source code. A
+SACK option could have up to 4 blocks, they are checked
+individually. E.g., if 3 blocks of a SACk is invalid, the
+corresponding counter would be updated 3 times. The comment of the
+`Add counters for discarded SACK blocks`_ patch has additional
+explaination:
+
+.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
+
+* TcpExtTCPSACKDiscard
+This counter indicates how many SACK blocks are invalid. If the invalid
+SACK block is caused by ACK recording, the TCP stack will only ignore
+it and won't update this counter.
+
+* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+When a DSACK block is invalid, one of these two counters would be
+updated. Which counter will be updated depends on the undo_marker flag
+of the TCP socket. If the undo_marker is not set, the TCP stack isn't
+likely to re-transmit any packets, and we still receive an invalid
+DSACK block, the reason might be that the packet is duplicated in the
+middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
+will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
+will be updated. As implied in its name, it might be an old packet.
+
+SACK shift
+=========
+The linux networking stack stores data in sk_buff struct (skb for
+short). If a SACK block acrosses multiple skb, the TCP stack will try
+to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
+10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
+15 in skb2 would be moved to skb1. This operation is 'shift'. If a
+SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
+seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
+discard, this operation is 'merge'.
+
+* TcpExtTCPSackShifted
+A skb is shifted
+
+* TcpExtTCPSackMerged
+A skb is merged
+
+* TcpExtTCPSackShiftFallback
+A skb should be shifted or merged, but the TCP stack doesn't do it for
+some reasons.
+
TCP out of order
================
* TcpExtTCPOFOQueue
@@ -721,6 +789,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
+TCP receive window
+=================
+* TcpExtTCPWantZeroWindowAdv
+Depending on current memory usage, the TCP stack tries to set receive
+window to zero. But the receive window might still be a no-zero
+value. For example, if the previous window size is 10, and the TCP
+stack receives 3 bytes, the current window size would be 7 even if the
+window size calculated by the memory usage is zero.
+
+* TcpExtTCPToZeroWindowAdv
+The TCP receive window is set to zero from a no-zero value.
+
+* TcpExtTCPFromZeroWindowAdv
+The TCP receive window is set to no-zero value from zero.
+
+
+Delayed ACK
+==========
+The TCP Delayed ACK is a technique which is used for reducing the
+packet count in the network. For more details, please refer the
+`Delayed ACK wiki`_
+
+.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
+
+* TcpExtDelayedACKs
+A delayed ACK timer expires. The TCP stack will send a pure ACK packet
+and exit the delayed ACK mode.
+
+* TcpExtDelayedACKLocked
+A delayed ACK timer expires, but the TCP stack can't send an ACK
+immediately due to the socket is locked by a userspace program. The
+TCP stack will send a pure ACK later (after the userspace program
+unlock the socket). When the TCP stack sends the pure ACK later, the
+TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
+mode.
+
+* TcpExtDelayedACKLost
+It will be updated when the TCP stack receives a packet which has been
+ACKed. A Delayed ACK loss might cause this issue, but it would also be
+triggered by other reasons, such as a packet is duplicated in the
+network.
+
+Tail Loss Probe (TLP)
+===================
+TLP is an algorithm which is used to detect TCP packet loss. For more
+details, please refer the `TLP paper`_.
+
+.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
+
+* TcpExtTCPLossProbes
+A TLP probe packet is sent.
+
+* TcpExtTCPLossProbeRecovery
+A packet loss is detected and recovered by TLP.
examples
========
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 1be0b6f9e0cb..9d1432e0aaa8 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in
-/include/linux/net_tstamp.h as:
+include/uapi/linux/net_tstamp.h as:
struct hwtstamp_config {
int flags; /* no flags defined right now, must be zero */
@@ -487,7 +487,7 @@ enum {
HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
/* for the complete list of values, please check
- * the include file /include/linux/net_tstamp.h
+ * the include file include/uapi/linux/net_tstamp.h
*/
};
diff --git a/MAINTAINERS b/MAINTAINERS
index a592b9992b46..8e2c82f4c72f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
CAVIUM LIQUIDIO NETWORK DRIVER
-M: Derek Chickles <derek.chickles@caviumnetworks.com>
-M: Satanand Burla <satananda.burla@caviumnetworks.com>
-M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
-M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+M: Derek Chickles <dchickles@marvell.com>
+M: Satanand Burla <sburla@marvell.com>
+M: Felix Manlunas <fmanlunas@marvell.com>
L: netdev@vger.kernel.org
W: http://www.cavium.com
S: Supported
diff --git a/Makefile b/Makefile
index 499b96810995..f5b1d0d168e0 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Shy Crocodile
# *DOCUMENTATION*
@@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
+PHONY += prepare0
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
# archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created.
-# Listed in dependency order
-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts)
-PHONY += $(mrproper-dirs) mrproper archmrproper
+PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
-mrproper: clean archmrproper $(mrproper-dirs)
+mrproper: clean $(mrproper-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index eb43e09c1980..926434f413fa 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -60,8 +60,6 @@
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
-#else
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index ac352accb3d9..3e8063f4f9d3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void)
* later determine that kpti is required, then
* kpti_install_ng_mappings() will make them non-global.
*/
+ if (arm64_kernel_unmapped_at_el0())
+ return true;
+
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return arm64_kernel_unmapped_at_el0();
+ return false;
/*
* KASLR is enabled so we're going to be enabling kpti on non-broken
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8abe9c..ba6b41790fcd 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
{
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- cmdline = get_cmdline(fdt);
+ cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0;
@@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
return offset;
}
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 4003ddc616e1..f801f3708a89 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot
-archmrproper:
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 320d86f192ee..171290f9f1de 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
-export AWK
-
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 787290781b8c..0d14f51d0002 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3155,6 +3155,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49e608e..fe3773539eff 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
pm_power_off = bcm47xx_machine_halt;
}
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+ struct device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ err = dev_set_name(dev, "bcm47xx_soc");
+ if (err) {
+ pr_err("Failed to set SoC device name: %d\n", err);
+ kfree(dev);
+ return NULL;
+ }
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+ return dev;
+}
+#endif
+
/*
* This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err;
+ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+ if (!bcm47xx_bus.bcma.dev)
+ panic("Failed to setup SoC device\n");
+
err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
+ if (device_register(bcm47xx_bus.bcma.dev))
+ pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus);
break;
#endif
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 2c79ab52977a..8bf43c5a7bc7 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
" sync \n"
" synci ($0) \n");
- relocated_kexec_smp_wait(NULL);
+ kexec_reboot();
}
#endif
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 4e4ec779f182..6f981af67826 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
index c6b63a409641..6dd8ad2409dc 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -18,8 +18,6 @@
#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 141076325307..0b424214a5e9 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -19,8 +19,6 @@
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 6256d35dbf4d..bedb5047aff3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc3312ed11..6549499eb202 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
.irq_set_type = ltq_eiu_settype,
};
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
{
+ int module = irq_desc_get_irq(desc) - 2;
u32 irq;
+ int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0)
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
* other bits might be bogus
*/
irq = __fls(irq);
- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
LTQ_EBU_PCC_ISTAT);
}
-#define DEFINE_HWx_IRQDISPATCH(x) \
- static void ltq_hw ## x ## _irqdispatch(void) \
- { \
- ltq_hw_irqdispatch(x); \
- }
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
- int irq;
-
- if (!pending) {
- spurious_interrupt();
- return;
- }
-
- pending >>= CAUSEB_IP;
- while (pending) {
- irq = fls(pending) - 1;
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- pending &= ~BIT(irq);
- }
-}
-
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct irq_chip *chip = &ltq_irq_type;
@@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
- if (cpu_has_vint) {
- pr_info("Setting up vectored interrupts\n");
- set_vi_handler(2, ltq_hw0_irqdispatch);
- set_vi_handler(3, ltq_hw1_irqdispatch);
- set_vi_handler(4, ltq_hw2_irqdispatch);
- set_vi_handler(5, ltq_hw3_irqdispatch);
- set_vi_handler(6, ltq_hw4_irqdispatch);
- set_vi_handler(7, ltq_hw5_irqdispatch);
- }
-
ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
-#ifndef CONFIG_MIPS_MT_SMP
- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
/* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
- /*
- * if the timer irq is not one of the mips irqs we need to
- * create a mapping
- */
- if (MIPS_CPU_TIMER_IRQ != 7)
- irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
-
/* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
- return MIPS_CPU_TIMER_IRQ;
+ return CP0_LEGACY_COMPARE_IRQ;
}
static struct of_device_id __initdata of_irq_ids[] = {
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 0a935c136ec2..ac3482882cf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
KBUILD_DEFCONFIG := defconfig
-comma = ,
-
-
ifdef CONFIG_FUNCTION_TRACER
arch-y += -malways-save-lp -mno-relax
endif
@@ -54,8 +51,6 @@ endif
boot := arch/nds32/boot
core-y += $(boot)/dts/
-.PHONY: FORCE
-
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@@ -68,9 +63,6 @@ prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
-CLEAN_FILES += include/asm-nds32/constants.h*
-
-# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 70e06d34006c..bf10141c7426 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -20,7 +20,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@@ -50,5 +49,3 @@ else
BUILTIN_DTB := n
endif
core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
-
-all: vmlinux
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 57deb1e9ffea..20cc816b3508 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -852,11 +852,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB.
*/
- tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
+ lis r6, swapper_pg_dir@h
+ ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bd5e6834ca69..6794466f6420 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- }
+ } else
#endif
- /* Fall through, for non-TM restore */
- if (!MSR_TM_ACTIVE(msr)) {
+ {
/*
+ * Fall through, for non-TM restore
+ *
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 29746dc28df5..517662a56bdc 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -967,13 +967,6 @@ out:
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
-unsigned long __init arch_syscall_addr(int nr)
-{
- return sys_call_table[nr*2];
-}
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-
#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 5c36b3a8d47a..3349f3f8fe84 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
+ PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
@@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
!is_sier_available()))
return 0;
+ if (idx == PERF_REG_POWERPC_MMCRA &&
+ (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
+ IS_ENABLED(CONFIG_PPC32)))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index a1aaa1569d7c..f0e488d97567 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
- seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys));
+ seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
- seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys));
+ seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner);
}
- seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys));
+ seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index e66644e0fb40..9438fa0fc355 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
/* see if there is a keyboard in the device tree
with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard")
- if (kbd->parent && kbd->parent->type
- && strcmp(kbd->parent->type, "adb") == 0)
+ if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
if (kbd)
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index d7f742ed48ba..3f58c7dbd581 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
}
} else {
/* Create a group for 1 GPU and attached NPUs for POWER8 */
- pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL);
+ pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
table_group = &pe->npucomp->table_group;
table_group->ops = &pnv_npu_peers_ops;
iommu_register_group(table_group, hose->global_number,
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1d6406a051f1..7db3119f8a5b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->type == PNV_PHB_NPU_NVLINK)
+ if (phb->type == PNV_PHB_NPU_NVLINK ||
+ phb->type == PNV_PHB_NPU_OCAPI)
continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 7725825d887d..37a77e57893e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu"))
continue;
+#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose));
+#endif
break;
}
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 15af091611e2..4b4a7f32b68e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -617,7 +617,7 @@ config X86_INTEL_QUARK
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select COMMON_CLK
select PINCTRL
select IOSF_MBI
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a77445d1b034..780f2b42c8ef 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{
if (unlikely(!access_ok(ptr,len)))
return 0;
- __uaccess_begin();
+ __uaccess_begin_nospec();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 2f6787fc7106..c54a493e139a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
- val &= ~X2APIC_ENABLE;
+ val &= ~X2APIC_ENABLE;
break;
}
return val;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 72bf446c3fee..6e29794573b7 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@ void xen_timer_resume(void)
{
int cpu;
- pvclock_resume();
-
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
};
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
+static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
+ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
+
if (!xen_clock)
return;
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
int ret;
if (!xen_clock)
- return;
+ goto out;
t.addr.v = &xen_clock->pvti;
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
+
+out:
+ /* Need pvclock_resume() before using xen_clocksource_read(). */
+ pvclock_resume();
+ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63e0f12be7c9..72adbbe975d5 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
}
/**
- * __bfq_deactivate_entity - deactivate an entity from its service tree.
- * @entity: the entity to deactivate.
+ * __bfq_deactivate_entity - update sched_data and service trees for
+ * entity, so as to represent entity as inactive
+ * @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
- * Deactivates an entity, independently of its previous state. Must
- * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it into the idle
- * tree.
+ * If necessary and allowed, puts entity into the idle tree. NOTE:
+ * entity may be on no tree if in service.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
index fb2c82c351e4..038cb627c868 100644
--- a/block/blk-mq-debugfs-zoned.c
+++ b/block/blk-mq-debugfs-zoned.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
- *
- * This file is released under the GPL.
*/
#include <linux/blkdev.h>
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ba37b9e15e9..8f5b533764ca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
+ struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
@@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
+ data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 6651e713c45d..5564e73266a6 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
+ crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
+ crypto_set_spawn(&ictx->blockcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1b2f66..4be293a4b5f0 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc04aec..4741fe89ba2c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
- aead_request_complete(req, err);
+ authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f08aad..c0cf87ae7ef6 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) {
- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12);
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7c6afc111d76..bb857421c2e8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,8 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
-acpi-y += acpi_lpss.o acpi_apd.o
+acpi-$(CONFIG_PCI) += acpi_lpss.o
+acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 99d820a693a8..5c093ce01bcd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
goto error0;
}
- /*
- * ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before
- * acpi_load_tables() is called).
- *
- * This is accomplished by looking for the ECDT table, and getting
- * the EC parameters out of that.
- *
- * Ignore the result. Not having an ECDT is not fatal.
- */
- status = acpi_ec_ecdt_probe();
-
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /*
+ * ACPI 2.0 requires the EC driver to be loaded and work before the EC
+ * device is found in the namespace.
+ *
+ * This is accomplished by looking for the ECDT table and getting the EC
+ * parameters out of that.
+ *
+ * Do that before calling acpi_initialize_objects() which may trigger EC
+ * address space accesses.
+ */
+ acpi_ec_ecdt_probe();
+
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7e6952edb5b0..6a9e1fb8913a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
+#ifdef CONFIG_PCI
void acpi_lpss_init(void);
+#else
+static inline void acpi_lpss_init(void) {}
+#endif
void acpi_apd_init(void);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 011d3db19c80..5143e11e3b0f 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -26,7 +26,6 @@
#include <acpi/nfit.h>
#include "intel.h"
#include "nfit.h"
-#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
}
EXPORT_SYMBOL(to_nfit_uuid);
-static struct acpi_nfit_desc *to_acpi_nfit_desc(
- struct nvdimm_bus_descriptor *nd_desc)
-{
- return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
-}
-
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
@@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
+ u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
+ *flags = memdev->flags;
+ physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
- *flags = memdev->flags;
- return memdev->physical_id;
+ return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
@@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
- ndr_desc->nd_set = nd_set;
guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
if (nvdimm)
return 0;
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 850b2927b4e7..f70de71f79d6 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
static void nvdimm_invalidate_cache(void);
-static int intel_security_unlock(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
return 0;
}
-static int intel_security_erase(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
return 0;
}
-static int intel_security_query_overwrite(struct nvdimm *nvdimm)
+static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
return 0;
}
-static int intel_security_overwrite(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
const struct nvdimm_key_data *nkey)
{
int rc;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ca7a6b4eaae..8218db17ebdb 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI && ATA_BMDMA
+ depends on ATA_ACPI && ATA_BMDMA && PCI
help
This option enables an ACPI method driver which drives
motherboard PATA controller interfaces through the ACPI
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 2e9d1cfe3aeb..211607986134 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -718,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1bd1145ad8b5..330c1f7e9665 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
+ if (!d->chip->mask_base)
+ continue;
+
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (d->chip->mask_invert) {
@@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
- return -ENOTSUPP;
+ return 0;
reg = t->type_reg_offset / map->reg_stride;
@@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
+ if (!chip->mask_base)
+ continue;
+
reg = chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (chip->mask_invert)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08696f5f00bb..7c9a949e876b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) {
- if (bdev->bd_disk)
+ if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize);
- else
+ set_blocksize(bdev, config->blksize);
+ } else
bdev->bd_invalidated = 1;
bdput(bdev);
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
+ select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..5567cbda2798 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- const u8 *origkey = key;
- const unsigned int origkeylen = keylen;
-
- int ret = 0;
+ struct crypto_authenc_keys keys;
+ int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < ctx->enckeylen)
- goto badkey;
- if (ctx->enckeylen > MAX_KEY_SIZE)
+ if (keys.enckeylen > MAX_KEY_SIZE ||
+ keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
- ctx->authkeylen = keylen - ctx->enckeylen;
-
- if (ctx->authkeylen > MAX_KEY_SIZE)
- goto badkey;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
- memcpy(ctx->authkey, key, ctx->authkeylen);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
- if (des_ekey(tmp, key) == 0) {
+ if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
+ const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- ret =
- crypto_aead_setkey(ctx->fallback_cipher, origkey,
- origkeylen);
+ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..80ae69f906fb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..bb1a2cdf1951 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, state->buf_dma)) {
- dev_err(jrdev, "unable to map src\n");
- goto unmap;
- }
+ if (buflen) {
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ goto unmap;
+ }
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ }
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
+
+#include "desc.h"
+
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
+
+static inline bool is_mdha(u32 algtype)
+{
+ return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
+ OP_ALG_CHA_MDHA;
+}
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..fe070d75c842 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
- softreq_destroy(sr);
if (sr->callback)
sr->callback(sr->cb_arg, err);
+ softreq_destroy(sr);
req_completed++;
}
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
- struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
- int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_keylen)
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
goto badkey;
- ctx->auth_keylen = keylen - ctx->enc_keylen;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
+ rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..f8e2c5c3f4eb 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
- void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- if (ivsize)
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
+ alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
- if (!edesc) {
- err = ERR_PTR(-ENOMEM);
- goto error_sg;
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+ if (ivsize) {
+ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
}
return edesc;
-error_sg:
- if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index dafc645b2e4e..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
- int height;
- struct amdgpu_device *adev = dev->dev_private;
- int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
- int pitch = mode_cmd->pitches[0] / cpp;
-
- pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
- if (mode_cmd->pitches[0] != pitch) {
- DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
- pitch, mode_cmd->pitches[0]);
- return ERR_PTR(-EINVAL);
- }
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
@@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- height = ALIGN(mode_cmd->height, 8);
- if (obj->size < pitch * height) {
- DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
- pitch * height, obj->size);
- return ERR_PTR(-EINVAL);
- }
-
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && X86_64
- imply AMD_IOMMU_V2
+ depends on DRM_AMDGPU && (X86_64 || ARM64)
+ imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..5d85ff341385 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
+#if CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
+#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
*
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size;
int numa_node_id;
+#ifdef CONFIG_X86_64
uint32_t entries = 0;
+#endif
int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr->length);
/* Fill in Subtype: IO Link */
+#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries,
(struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
+#else
+ pr_info("IO link not available for non x86 platforms\n");
+#endif
crat_table->num_domains++;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
- * TODO: Rather than assiging @gpu to first topology device withtout
- * gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry(dev, &topology_device_list, list) {
+ /* Discrete GPUs need their own topology device list
+ * entries. Don't assign them to CPU/APU nodes.
+ */
+ if (!gpu->device_info->needs_iommu_device &&
+ dev->node_props.cpu_cores_count)
+ continue;
+
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
+ }
up_write(&topology_lock);
return out_dev;
}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{
- const struct cpuinfo_x86 *cpuinfo;
int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
- cpuinfo = &cpu_data(first_cpu_of_numa_node);
-
- return cpuinfo->apicid;
+#ifdef CONFIG_X86_64
+ return cpu_data(first_cpu_of_numa_node).apicid;
+#else
+ return first_cpu_of_numa_node;
+#endif
}
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 34f35e9a3c46..f4fa40c387d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0, 0))
+ brightness, 0))
return 0;
else
return 1;
@@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- !new_crtc_state->vrr_enabled)
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..b0265dbebd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream)
+ uint32_t frame_ramp)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
(abm->funcs->set_backlight_level_pwm == NULL))
return false;
- if (stream)
- ((struct dc_stream_state *)stream)->bl_pwm_level =
- backlight_pwm_u16_16;
-
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
-
- dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
- pipe_ctx->stream->bl_pwm_level,
- 0,
- pipe_ctx->stream);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..b2243e0dad1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream);
+ uint32_t frame_ramp);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..d70c9e1cda3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -91,7 +91,6 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- unsigned int bl_pwm_level;
/* from core_stream struct */
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..8f09b8625c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0)
+ if (src_y_offset + (int)height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..d1acd7165bc8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
+ if (src_y_offset + (int)hubp->curs_attr.height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..58a12ddf12f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
+ tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state)
+ || !pipe_ctx->plane_state
+ || !tg->funcs->is_tg_enabled(tg))
continue;
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state)
- continue;
-
- dcn10_pipe_control_lock(dc, pipe_ctx, false);
- }
+ tg->funcs->unlock(tg);
+ }
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..c11a443dcbc8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
#define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16
+#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
@@ -100,6 +101,7 @@ struct iram_table_v_2 {
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
+#pragma pack(pop)
static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..0c8212902275 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
+{
+ uint32_t result;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
+ return -EINVAL);
+
+ result = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(result == 1,
+ "Failed to run ACG BTC!", return -EINVAL);
+
+ return 0;
+}
+
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
+ tmp_result = vega12_run_acg_btc(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to run ACG BTC!",
+ result = tmp_result);
+
result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!",
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b5475c91e2ef..e9f343b124b0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 5af11cf1b482..e1675a00df12 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -41,7 +41,7 @@ struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
- void (*detach_vgpu)(unsigned long handle);
+ void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c1072143da1d..dd3dfd00f4e6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{
unsigned int index;
u64 virtaddr;
- unsigned long req_size, pgoff = 0;
+ unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start;
- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (!intel_vgpu_in_aperture(vgpu, req_start))
+ return -EINVAL;
+ if (req_start + req_size >
+ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
+ return -EINVAL;
+
+ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
}
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
return 0;
}
-static void kvmgt_detach_vgpu(unsigned long handle)
+static void kvmgt_detach_vgpu(void *p_vgpu)
{
- /* nothing to do here */
+ int i;
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ if (!vgpu->vdev.region)
+ return;
+
+ for (i = 0; i < vgpu->vdev.num_regions; i++)
+ if (vgpu->vdev.region[i].ops->release)
+ vgpu->vdev.region[i].ops->release(vgpu,
+ &vgpu->vdev.region[i]);
+ vgpu->vdev.num_regions = 0;
+ kfree(vgpu->vdev.region);
+ vgpu->vdev.region = NULL;
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67f19992b226..3ed34123d8d1 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
if (!intel_gvt_host.mpt->detach_vgpu)
return;
- intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+ intel_gvt_host.mpt->detach_vgpu(vgpu);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 75d97f1b2e8f..4f5c67f70c4d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,7 +46,6 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
- bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
- meson_crtc->enabled = true;
-}
-
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
-{
- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
- struct meson_drm *priv = meson_crtc->priv;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (!meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
priv->viu.osd1_enabled = true;
}
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
-
- meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
- if (crtc->state->enable && !meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3ee4d4a4ecba..12ff47b13668 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
};
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
static irqreturn_t meson_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
+ drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
- !of_device_is_available(remote_node))
+ !of_device_is_available(remote_node)) {
+ of_node_put(remote_node);
continue;
+ }
count += meson_probe_remote(pdev, match, remote, remote_node);
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
- if (!remote || !of_device_is_available(remote))
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
continue;
+ }
count += meson_probe_remote(pdev, &match, np, remote);
+ of_node_put(remote);
}
if (count && !match)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bfbc9341e0c2..d9edb5785813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2435,6 +2435,38 @@ nv140_chipset = {
};
static const struct nvkm_device_chip
+nv162_chipset = {
+ .name = "TU102",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
.bar = tu104_bar_new,
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
default:
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13c8a662f9b4..ccb090f3ab30 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index a55dece118b2..df65d3c1a7b8 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
-struct drm_gem_object *qxl_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 96ac1458a59c..37f93022a106 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
of_node_put(port);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9e9255ee59cd..a021bab11a4f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
+ of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
- of_node_put(remote);
of_node_put(port);
+ of_node_put(ep);
return frontend;
}
}
}
-
+ of_node_put(port);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f7f32a885af7..2d1aaca49105 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -127,14 +127,10 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1deb41d42ea4..0c15000f926e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 86ce0ae93f59..c59ec34c80a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index b677e5d524e6..d5f1d8e1c6f8 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ depends on PCI
select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 0e30fa00204c..f9b8e3e23a8e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
}
rv = lm80_read_value(client, LM80_REG_FANDIV);
- if (rv < 0)
+ if (rv < 0) {
+ mutex_unlock(&data->update_lock);
return rv;
+ }
reg = (rv & ~(3 << (2 * (nr + 1))))
| (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c3040079b1cb..4adec4ab7d06 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -44,8 +44,8 @@
* nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
* nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
* (0xd451)
- * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
- * (0xd459)
+ * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
+ * (0xd429)
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_NCT6795_ID 0xd350
#define SIO_NCT6796_ID 0xd420
#define SIO_NCT6797_ID 0xd450
-#define SIO_NCT6798_ID 0xd458
+#define SIO_NCT6798_ID 0xd428
#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
if (data->kind == nct6791 || data->kind == nct6792 ||
data->kind == nct6793 || data->kind == nct6795 ||
- data->kind == nct6796)
+ data->kind == nct6796 || data->kind == nct6797 ||
+ data->kind == nct6798)
nct6791_enable_io_mapping(sioreg);
superio_exit(sioreg);
@@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
- sio_data->kind == nct6796)
+ sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
+ sio_data->kind == nct6798)
nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 423903f87955..391118c8aae8 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev,
val *= 1000000ULL;
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev,
&power->update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->system.update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->system.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->system.update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->system.value) * 1000000ULL;
@@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->proc.update_tag);
break;
case 6:
- val = get_unaligned_be32(&power->proc.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->proc.update_tag) *
+ occ->powr_sample_time_us;
break;
case 7:
val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
@@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->vdd.update_tag);
break;
case 10:
- val = get_unaligned_be32(&power->vdd.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
+ occ->powr_sample_time_us;
break;
case 11:
val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
@@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->vdn.update_tag);
break;
case 14:
- val = get_unaligned_be32(&power->vdn.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
+ occ->powr_sample_time_us;
break;
case 15:
val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8844c9565d2a..7053be59ad2e 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
.data = (void *)2
},
{
- .compatible = "ti,tmp422",
+ .compatible = "ti,tmp442",
.data = (void *)3
},
{ },
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b532e2c9cf5c..f8c00b94817f 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
- if (!old_dyn_addr)
- return 0;
-
master->addrs[data->index] = dev->info.dyn_addr;
return 0;
@@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->info.dyn_addr;
+ master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index bbd79b8b1a80..8889a4fdb454 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
return PTR_ERR(master->pclk);
master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ if (IS_ERR(master->sysclk))
+ return PTR_ERR(master->sysclk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 63a7cc00bae0..84f077b2b90a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- rdma_restrack_kadd(&id_priv->res);
+ if (id_priv->res.kern_name)
+ rdma_restrack_kadd(&id_priv->res);
+ else
+ rdma_restrack_uadd(&id_priv->res);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e600fc23ae62..3c97a8b6bf1e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
- pd->unsafe_global_rkey))
- goto err;
if (fill_res_name_pid(msg, res))
goto err;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index be6b8e1257d0..69f8db66925e 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
enum uverbs_obj_access access,
bool commit);
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
+
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6b12cc5f97b2..3317300ab036 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
{
int ret;
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ return uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
+
if (copy_to_user(attrs->ucore.outbuf, resp,
min(attrs->ucore.outlen, resp_len)))
return -EFAULT;
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
goto out_put;
}
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
+
ret = 0;
out_put:
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
+ if (!qp) {
+ ret = -EINVAL;
goto out;
+ }
is_ud = qp->qp_type == IB_QPT_UD;
sg_ind = 0;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c81ff698052..0ca04d224015 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
+static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
+ const struct uverbs_attr *attr)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ u16 flags;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+ return 0;
+}
+
static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct uverbs_objs_arr_attr *attr,
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
}
/*
+ * Until the drivers are revised to use the bundle directly we have to
+ * assume that the driver wrote to its UHW_OUT and flag userspace
+ * appropriately.
+ */
+ if (!ret && pbundle->method_elm->has_udata) {
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(attr))
+ ret = uverbs_set_output(&pbundle->bundle, attr);
+ }
+
+ /*
* EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
* not invoke the method because the request is not supported. No
* other cases should return this code.
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
const void *from, size_t size)
{
- struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- u16 flags;
size_t min_size;
if (IS_ERR(attr))
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
return -EFAULT;
- flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
- UVERBS_ATTR_F_VALID_OUTPUT;
- if (put_user(flags,
- &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
- return -EFAULT;
-
- return 0;
+ return uverbs_set_output(bundle, attr);
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+/*
+ * This is only used if the caller has directly used copy_to_use to write the
+ * data. It signals to user space that the buffer is filled in.
+ */
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return uverbs_set_output(bundle, attr);
+}
+
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val)
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- if (clear_user(u64_to_user_ptr(attr->ptr_attr.data),
- attr->ptr_attr.len))
- return -EFAULT;
+ if (size < attr->ptr_attr.len) {
+ if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
+ attr->ptr_attr.len - size))
+ return -EFAULT;
+ }
return uverbs_copy_to(bundle, idx, from, size);
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb0007aa0c27..2890a77339e1 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf += sizeof(hdr);
+ memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
bundle.ufile = file;
if (!method_elm->is_ex) {
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 82cb6b71ac7c..e3e9dd54caa2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
{
struct mthca_ucontext *context;
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (udata)
return ERR_PTR(-EINVAL);
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685c997e..3c633ab58052 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
{
- return (enum pvrdma_wr_opcode)op;
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ return PVRDMA_WR_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+ case IB_WR_SEND:
+ return PVRDMA_WR_SEND;
+ case IB_WR_SEND_WITH_IMM:
+ return PVRDMA_WR_SEND_WITH_IMM;
+ case IB_WR_RDMA_READ:
+ return PVRDMA_WR_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_LSO:
+ return PVRDMA_WR_LSO;
+ case IB_WR_SEND_WITH_INV:
+ return PVRDMA_WR_SEND_WITH_INV;
+ case IB_WR_RDMA_READ_WITH_INV:
+ return PVRDMA_WR_RDMA_READ_WITH_INV;
+ case IB_WR_LOCAL_INV:
+ return PVRDMA_WR_LOCAL_INV;
+ case IB_WR_REG_MR:
+ return PVRDMA_WR_FAST_REG_MR;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_REG_SIG_MR:
+ return PVRDMA_WR_REG_SIG_MR;
+ default:
+ return PVRDMA_WR_ERROR;
+ }
}
static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 3acf74cbe266..1ec3646087ba 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wqe_hdr->ex.imm_data = wr->ex.imm_data;
+ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (qp->ibqp.qp_type) {
case IB_QPT_GSI:
case IB_QPT_UD:
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e48902..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "\0\0" + 1;
+ cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74feee2b2..fd64df5a57a5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ if (ret)
+ return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fd4af4de03b4..05ffffb8b769 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -207,15 +207,10 @@ static bool create_on_open = true;
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
- struct bio *b;
-
if (!mddev || !bioset_initialized(&mddev->bio_set))
return bio_alloc(gfp_mask, nr_iovecs);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
- if (!b)
- return NULL;
- return b;
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d01821a6906a..89d9c4c21037 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- cancel_delayed_work_sync(&dev->work_run);
+ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+ cancel_delayed_work_sync(&dev->work_run);
+
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1441a73ce64c..90aad465f9ed 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_window *win;
const struct v4l2_sdr_format *sdr;
const struct v4l2_meta_format *meta;
+ u32 planes;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only)
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
- for (i = 0; i < mp->num_planes; i++)
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+ for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage);
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 6b212c8b78e7..2bfa3a903bf9 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
struct _vop_vdev *vdev = to_vopvdev(dev);
struct vop_device *vpdev = vdev->vpdev;
struct mic_device_ctrl __iomem *dc = vdev->dc;
- int i, err, retry;
+ int i, err, retry, queue_idx = 0;
/* We must have this many virtqueues. */
if (nvqs > ioread8(&vdev->desc->num_vq))
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
__func__, i, names[i]);
- vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
+ vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index eebac35304c6..6e8edc9375dd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali)
}
/* clk rate info is needed for setup_data_interface */
- if (denali->clk_rate && denali->clk_x_rate)
+ if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
chip->legacy.dummy_controller.ops = &denali_controller_ops;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 325b4414dccc..c9149a37f8f0 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
-/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
-{
- u32 pc = readl(host->regs_va + FSMC_PC);
-
- if (!assert)
- writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
- else
- writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
-
- /*
- * nCE line changes must be applied before returning from this
- * function.
- */
- mb();
-}
-
/*
* fsmc_exec_op - hook called by the core to execute NAND operations
*
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
- fsmc_ce_ctrl(host, true);
-
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
}
}
- fsmc_ce_ctrl(host, false);
-
return ret;
}
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index f92ae5aa2a54..9526d5b23c80 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
}
static int jz_nand_ioremap_resource(struct platform_device *pdev,
- const char *name, struct resource **res, void *__iomem *base)
+ const char *name, struct resource **res, void __iomem **base)
{
int ret;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6371958dd170..edb1c023a753 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -519,7 +519,7 @@ config NET_FAILOVER
and destroy a failover master netdev and manages a primary and
standby slave netdevs that get registered via the generic failover
infrastructure. This can be used by paravirtual drivers to enable
- an alternate low latency datapath. It alsoenables live migration of
+ an alternate low latency datapath. It also enables live migration of
a VM with direct attached VF by failing over to the paravirtual
datapath when the VF is unplugged.
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a1d095..ad41ec63cc9f 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
struct device_node *mdio_np;
int ret;
- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
- "realtek,smi-mdio");
+ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
if (!mdio_np) {
dev_err(smi->dev, "no MDIO bus node\n");
return -ENODEV;
}
smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus)
- return -ENOMEM;
+ if (!smi->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
smi->slave_mii_bus->priv = smi;
smi->slave_mii_bus->name = "SMI slave MII";
smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
- of_node_put(mdio_np);
+ goto err_put_node;
}
return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
}
static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
dsa_unregister_switch(smi->ds);
+ if (smi->slave_mii_bus)
+ of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
return 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6984ac..b40d4377cc71 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
#define MAC_MDIOSCAR_PA_WIDTH 5
#define MAC_MDIOSCAR_RA_INDEX 0
#define MAC_MDIOSCAR_RA_WIDTH 16
-#define MAC_MDIOSCAR_REG_INDEX 0
-#define MAC_MDIOSCAR_REG_WIDTH 21
#define MAC_MDIOSCCDR_BUSY_INDEX 22
#define MAC_MDIOSCCDR_BUSY_WIDTH 1
#define MAC_MDIOSCCDR_CMD_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1e4ca7..4666084eda16 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
}
}
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+ unsigned int mdio_sca, da;
+
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+ return mdio_sca;
+}
+
static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
int reg, u16 val)
{
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f53090cde041..dfebc30c4841 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
int offset = 0;
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+ /* LLTX requires to do our own update of trans_start */
+ txq->trans_start = jiffies;
+
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
/* OS defined structs */
struct pci_dev *pdev;
- struct mutex stats64_lock;
+ spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int i, j;
char *p;
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3615e2e52399..dfa357b1a9d6 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3841,7 +3841,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
- mutex_init(&adapter->stats64_lock);
+ spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -5407,9 +5407,9 @@ no_wait:
}
}
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6236,10 +6236,10 @@ static void igb_get_stats64(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c9df08133718..3bbccead2f63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -844,9 +844,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
- if (get_fec_supported_advertised(mdev, link_ksettings))
+ err = get_fec_supported_advertised(mdev, link_ksettings);
+ if (err) {
netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
+ err = 0; /* don't fail caps query because of FEC error */
+ }
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 96cc0c6a4014..04736212a21c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv {
struct list_head list;
};
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev);
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
@@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
+ s->tx_queue_dropped += sq_stats->dropped;
}
}
}
@@ -663,7 +665,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
- mlx5e_rep_indr_unregister_block(cb_priv->netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
kfree(cb_priv);
}
}
@@ -735,7 +737,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
err = tcf_block_cb_register(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev, indr_priv, f->extack);
+ indr_priv, indr_priv, f->extack);
if (err) {
list_del(&indr_priv->list);
kfree(indr_priv);
@@ -743,14 +745,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
return err;
case TC_BLOCK_UNBIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
tcf_block_cb_unregister(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev);
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (indr_priv) {
- list_del(&indr_priv->list);
- kfree(indr_priv);
- }
+ indr_priv);
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
return 0;
default:
@@ -779,7 +782,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
err = __tc_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -789,10 +792,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
return err;
}
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
{
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -811,7 +815,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
- mlx5e_rep_indr_unregister_block(netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, netdev);
break;
}
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d0bb5ff8c26..f86e4804e83e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
((struct ipv6hdr *)ip_p)->nexthdr;
}
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
goto csum_unnecessary;
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to be
+ * CHECKSUM_UNNECESSARY even if they are not padded.
+ */
+ if (short_frame(skb->len))
+ goto csum_unnecessary;
+
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..a2321fe8d6a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
if (sendq) {
struct mlxsw_pci_queue *sdq;
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, cqe);
+ wqe_counter, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, cqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ if (items)
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
}
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
- break;
+ return 0;
cond_resched();
} while (time_before(jiffies, end));
- return 0;
+ return -EBUSY;
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index bb99f6d41fe0..ffee38e36ce8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -53,6 +53,7 @@
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 055cc6943b34..9d9aa28684af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE - 1,
- .end_index = MLXSW_SP_RFID_BASE - 1,
+ .start_index = VLAN_N_VID - 1,
+ .end_index = VLAN_N_VID - 1,
.ops = &mlxsw_sp_fid_dummy_ops,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 8ab796e0c0d7..0f4e68d31cc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1233,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1290,7 +1290,7 @@ out:
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
- bool dynamic)
+ enum mlxsw_reg_sfd_rec_policy policy)
{
char *sfd_pl;
u8 num_rec;
@@ -1301,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1321,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+ MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1329,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
- false);
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 084a1b3fbc80..cf22a79af66b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
- kfree_skb(skb);
+ consume_skb(skb);
}
}
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 1b350183bffb..a271239748f2 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 8022cd317f62..1a4d04afb7f0 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
.phy_id = PHY_ID_CS4340,
.phy_id_mask = 0xffffffff,
.name = "Cortina CS4340",
+ .features = PHY_10GBIT_FEATURES,
.config_init = gen10g_config_init,
.config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a9c7c7f41b0c..2e12f982534f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1046,6 +1046,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
+/* The VOD can be out of specification on link up. Poke an
+ * undocumented register, in an undocumented page, with a magic value
+ * to fix this.
+ */
+static int m88e6390_errata(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_BMCR,
+ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (err)
+ return err;
+
+ usleep_range(300, 400);
+
+ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
+ if (err)
+ return err;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int m88e6390_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ err = m88e6390_errata(phydev);
+ if (err)
+ return err;
+
+ return m88e1510_config_aneg(phydev);
+}
+
/**
* fiber_lpa_mod_linkmode_lpa_t
* @advertising: the linkmode advertisement settings
@@ -1402,7 +1435,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
* before enabling it if !phy_interrupt_is_valid()
*/
if (!phy_interrupt_is_valid(phydev))
- phy_read(phydev, MII_M1011_IEVENT);
+ __phy_read(phydev, MII_M1011_IEVENT);
/* Enable the WOL interrupt */
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2283,7 +2316,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
- .config_aneg = &m88e1510_config_aneg,
+ .config_aneg = &m88e6390_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 741f27228088..2e53ba3fa2e7 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -388,6 +388,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
if (IS_ERR(gpiod)) {
dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
bus->id);
+ device_del(&bus->dev);
return PTR_ERR(gpiod);
} else if (gpiod) {
bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index b03bcf2c388a..3ddaf9595697 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
.flags = PHY_IS_INTERNAL,
+ .soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 670698b7ef0e..0d62b548ab39 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1049,6 +1049,7 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
+ .soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1077,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
+ .features = PHY_BASIC_FEATURES,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f7a92e7edff7..745a705a505a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -538,13 +538,6 @@ int phy_start_aneg(struct phy_device *phydev)
mutex_lock(&phydev->lock);
- if (!__phy_is_started(phydev)) {
- WARN(1, "called from state %s\n",
- phy_state_to_str(phydev->state));
- err = -EBUSY;
- goto out_unlock;
- }
-
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
@@ -555,11 +548,13 @@ int phy_start_aneg(struct phy_device *phydev)
if (err < 0)
goto out_unlock;
- if (phydev->autoneg == AUTONEG_ENABLE) {
- err = phy_check_link_status(phydev);
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ if (__phy_is_started(phydev)) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ err = phy_check_link_status(phydev);
+ } else {
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ }
}
out_unlock:
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7b3164174251..b61db0a5ba3a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -59,6 +59,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
+
static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
@@ -107,6 +110,11 @@ const int phy_10gbit_features_array[1] = {
};
EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
+const int phy_10gbit_fec_features_array[1] = {
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+};
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -189,6 +197,10 @@ static void features_init(void)
linkmode_set_bit_array(phy_10gbit_full_features_array,
ARRAY_SIZE(phy_10gbit_full_features_array),
phy_10gbit_full_features);
+ /* 10G FEC only */
+ linkmode_set_bit_array(phy_10gbit_fec_features_array,
+ ARRAY_SIZE(phy_10gbit_fec_features_array),
+ phy_10gbit_fec_features);
}
void phy_device_free(struct phy_device *phydev)
@@ -2290,6 +2302,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
{
int retval;
+ if (WARN_ON(!new_driver->features)) {
+ pr_err("%s: Driver features are missing\n", new_driver->name);
+ return -EINVAL;
+ }
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 22f3bdd8206c..91247182bc52 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
.phy_id = PHY_ID_TN2020,
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
+ .features = PHY_10GBIT_FEATURES,
.soft_reset = gen10g_no_soft_reset,
.aneg_done = teranetics_aneg_done,
.config_init = gen10g_config_init,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564b251d..f22639f0116a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (pskb_trim_rcsum(skb, len))
goto drop;
+ ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
/* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 57f1c94fca0b..820a2fe7d027 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = {
#undef ASIX112_DESC
+static const struct driver_info trendnet_info = {
+ .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = {
{AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
+ {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3305f23793c7..5512a1038721 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -843,6 +843,14 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 236ba5d5fb4b..2a0edd4653e3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
struct sk_buff *skb;
unsigned int len;
@@ -1344,7 +1344,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, in_napi);
}
/* Avoid overhead when no packets have been processed
@@ -1370,7 +1370,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
}
@@ -1446,7 +1446,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
__netif_tx_lock(txq, raw_smp_processor_id());
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1515,7 +1515,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
@@ -1558,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 2b2cf4e554d3..e5ffd5733540 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,12 +54,12 @@ struct nvdimm {
};
static inline enum nvdimm_security_state nvdimm_security_state(
- struct nvdimm *nvdimm, bool master)
+ struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
if (!nvdimm->sec.ops)
return -ENXIO;
- return nvdimm->sec.ops->state(nvdimm, master);
+ return nvdimm->sec.ops->state(nvdimm, ptype);
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index deb1a66bf117..9bc585415d9b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret;
}
+/* irq_queues covers admin queue */
static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
{
unsigned int this_w_queues = write_queues;
+ WARN_ON(!irq_queues);
+
/*
- * Setup read/write queue split
+ * Setup read/write queue split, assign admin queue one independent
+ * irq vector if irq_queues is > 1.
*/
- if (irq_queues == 1) {
+ if (irq_queues <= 2) {
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
return;
@@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
/*
* If 'write_queues' is set, ensure it leaves room for at least
- * one read queue
+ * one read queue and one admin queue
*/
if (this_w_queues >= irq_queues)
- this_w_queues = irq_queues - 1;
+ this_w_queues = irq_queues - 2;
/*
* If 'write_queues' is set to zero, reads and writes will share
* a queue set.
*/
if (!this_w_queues) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues;
+ dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
} else {
dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
- dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues;
+ dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
}
}
@@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- irq_queues = nr_io_queues - this_p_queues;
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
@@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
* If we got a failure and we're down to asking for just
* 1 + 1 queues, just ask for a single vector. We'll share
* that between the single IO queue and the admin queue.
+ * Otherwise, we assign one independent vector to admin queue.
*/
- if (result >= 0 && irq_queues > 1)
+ if (irq_queues > 1)
irq_queues = irq_sets[0] + irq_sets[1] + 1;
result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 44b37b202e39..ad0df786fe93 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1089,7 +1089,7 @@ out:
static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
{
- int result;
+ int result = 0;
if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
return 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a09c1c3cf831..49b16f76d78e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np)
if (!of_node_check_flag(np, OF_OVERLAY)) {
np->name = __of_get_property(np, "name", NULL);
- np->type = __of_get_property(np, "device_type", NULL);
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
phandle = __of_get_property(np, "phandle", &sz);
if (!phandle)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7099c652c6a5..9cc1461aac7d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob,
populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
np->name = of_get_property(np, "name", NULL);
- np->type = of_get_property(np, "device_type", NULL);
-
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
}
*pnp = np;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2b5ac43a5690..c423e94baf0f 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
tchild->parent = target->np;
tchild->name = __of_get_property(node, "name", NULL);
- tchild->type = __of_get_property(node, "device_type", NULL);
if (!tchild->name)
tchild->name = "<NULL>";
- if (!tchild->type)
- tchild->type = "<NULL>";
/* ignore obsolete "linux,phandle" */
phandle = __of_get_property(node, "phandle", &size);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index d3185063d369..7eda43c66c91 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
dp->parent = parent;
dp->name = of_pdt_get_one_property(node, "name");
- dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
dp->properties = of_pdt_build_prop_list(node);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 08430031bd28..8631efa1daa1 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
if (!of_device_is_available(remote)) {
pr_debug("not available for remote node\n");
+ of_node_put(remote);
return NULL;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4310c7a4212e..2ab92409210a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,13 +21,14 @@ menuconfig PCI
support for PCI-X and the foundations for PCI Express support.
Say 'Y' here unless you know what you are doing.
+if PCI
+
config PCI_DOMAINS
bool
depends on PCI
config PCI_DOMAINS_GENERIC
bool
- depends on PCI
select PCI_DOMAINS
config PCI_SYSCALL
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig"
config PCI_MSI
bool "Message Signaled Interrupts (MSI and MSI-X)"
- depends on PCI
select GENERIC_MSI_IRQ
help
This allows device drivers to enable MSI (Message Signaled
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN
config PCI_QUIRKS
default y
bool "Enable PCI quirk workarounds" if EXPERT
- depends on PCI
help
This enables workarounds for various PCI chipset bugs/quirks.
Disable this only if your target machine is unaffected by PCI
@@ -67,7 +66,7 @@ config PCI_QUIRKS
config PCI_DEBUG
bool "PCI Debugging"
- depends on PCI && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
Say Y here if you want the PCI core to produce a bunch of debug
messages to the system log. Select this if you are having a
@@ -77,7 +76,6 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
- depends on PCI
depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO
config PCI_STUB
tristate "PCI Stub driver"
- depends on PCI
help
Say Y or M here if you want be able to reserve a PCI device
when it is going to be assigned to a guest operating system.
@@ -99,7 +96,6 @@ config PCI_STUB
config PCI_PF_STUB
tristate "PCI PF Stub driver"
- depends on PCI
depends on PCI_IOV
help
Say Y or M here if you want to enable support for devices that
@@ -111,7 +107,7 @@ config PCI_PF_STUB
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
- depends on PCI && X86 && XEN
+ depends on X86 && XEN
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL
config PCI_IOV
bool "PCI IOV support"
- depends on PCI
select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
@@ -144,7 +139,6 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
- depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -154,7 +148,6 @@ config PCI_PRI
config PCI_PASID
bool "PCI PASID support"
- depends on PCI
select PCI_ATS
help
Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -167,7 +160,7 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
- depends on PCI && ZONE_DEVICE
+ depends on ZONE_DEVICE
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
@@ -184,12 +177,11 @@ config PCI_P2PDMA
config PCI_LABEL
def_bool y if (DMI || ACPI)
- depends on PCI
select NLS
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+
+endif
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a1c8a09efa5..4c0b47867258 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
const struct irq_affinity *affd)
{
static const struct irq_affinity msi_default_affd;
- int vecs = -ENOSPC;
+ int msix_vecs = -ENOSPC;
+ int msi_vecs = -ENOSPC;
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
@@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
}
if (flags & PCI_IRQ_MSIX) {
- vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- affd);
- if (vecs > 0)
- return vecs;
+ msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
+ max_vecs, affd);
+ if (msix_vecs > 0)
+ return msix_vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
- if (vecs > 0)
- return vecs;
+ msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
+ affd);
+ if (msi_vecs > 0)
+ return msi_vecs;
}
/* use legacy irq if allowed */
@@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
}
}
- return vecs;
+ if (msix_vecs == -ENOSPC)
+ return -ENOSPC;
+ return msi_vecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9d8e3c837de..c25acace7d91 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param = str + 18;
+ disable_acs_redir_param =
+ kstrdup(str + 18, GFP_KERNEL);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e3b62c2ee8d1..5e2109c54c7c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL
config INTEL_IPS
tristate "Intel Intelligent Power Sharing"
- depends on ACPI
+ depends on ACPI && PCI
---help---
Intel Calpella platforms support dynamic power sharing between the
CPU and GPU, maximizing performance in a given TDP. This driver,
@@ -1135,7 +1135,7 @@ config SAMSUNG_Q10
config APPLE_GMUX
tristate "Apple Gmux Driver"
- depends on ACPI
+ depends on ACPI && PCI
depends on PNP
depends on BACKLIGHT_CLASS_DEVICE
depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
@@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT
config INTEL_PMC_IPC
tristate "Intel PMC IPC Driver"
- depends on ACPI
+ depends on ACPI && PCI
---help---
This driver provides support for PMC control on some Intel platforms.
The PMC is an ARC processor which defines IPC commands for communication
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 183fc42a510a..2d7cd344f3bf 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
const bool * ctx,
struct irq_affinity *desc)
{
- int i, ret;
+ int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index fc9dbad476c0..ae1d56da671d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
- int ret, i;
+ int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false, ccw);
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
+ names[i], ctx ? ctx[i] : false,
+ ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 8a20411699d9..75e1273a44b3 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
- unsigned int tid, int pg_idx, bool reply)
+ unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
- * @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 49f8028ac524..d26f50af00ea 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
- if (!csk)
+ if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
+ return;
+ }
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
- if (rpl->status != CPL_ERR_NONE)
+ if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
+ csk->err = -EINVAL;
+ }
+
+ complete(&csk->cmpl);
__kfree_skb(skb);
}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
- int pg_idx, bool reply)
+ int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 75f876409fb9..245742557c03 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
+ init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
if (!err && conn->hdrdgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
ppm = csk->cdev->cdev2ppm(csk->cdev);
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
- ppm->tformat.pgsz_idx_dflt, 0);
+ ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..1917ff57651d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -149,6 +149,7 @@ struct cxgbi_sock {
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
+ struct completion cmpl;
int err;
rwlock_t callback_lock;
void *user_data;
@@ -490,9 +491,9 @@ struct cxgbi_device {
struct cxgbi_ppm *,
struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
- unsigned int, int, int, int);
+ unsigned int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
- unsigned int, int, bool);
+ unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e2420a810e99..c92b3822c408 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ if (hisi_hba->prot_mask) {
+ dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+ prot_mask);
+ scsi_host_set_prot(hisi_hba->shost, prot_mask);
+ }
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_register_ha;
- if (hisi_hba->prot_mask) {
- dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
- prot_mask);
- scsi_host_set_prot(hisi_hba->shost, prot_mask);
- }
-
scsi_scan_host(shost);
return 0;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 68b90c4f79a3..1727d0c71b12 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ /* turn on DIF support */
+ scsi_host_set_prot(shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_host_alloc;
}
pci_info->hosts[i] = h;
-
- /* turn on DIF support */
- scsi_host_set_prot(to_shost(h),
- SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 12fd74761ae0..2242e9b3ca12 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9407,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
cmnd = CMD_XMIT_SEQUENCE64_CR;
if (phba->link_flag & LS_LOOPBACK_MODE)
bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+ /* fall through */
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -13528,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -13937,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n");
- /* Drop thru */
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -14849,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
eq->entry_count);
if (eq->entry_count < 256)
return -EINVAL;
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
@@ -14980,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14991,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
@@ -15151,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -15160,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest (drop thru) */
+ /* fall through - otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15432,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
@@ -15851,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -15988,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7eaa400f6328..fcbff83c0097 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
instance->consistent_mask_64bit = true;
dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
(instance->consistent_mask_64bit ? "63" : "32"));
return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a9a25f0eaf6f..647f48a28f85 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
/*
* Check if it is our interrupt
*/
- status = readl(&regs->outbound_intr_status);
+ status = megasas_readl(instance,
+ &regs->outbound_intr_status);
if (status & 1) {
writel(status, &regs->outbound_intr_status);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 4c5a3d23e010..084f2fcced0a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
if (dev->dev_type == SAS_SATA_DEV) {
pm8001_device->attached_phy =
dev->rphy->identify.phy_identifier;
- flag = 1; /* directly sata*/
+ flag = 1; /* directly sata */
}
} /*register this device to HBA*/
PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4da660c1c431..6d6d6013e35b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
+ case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 11260776212f..892d70d54553 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
EP_STATE_OFLDCONN_FAILED = 0x2000,
EP_STATE_CONNECT_FAILED = 0x4000,
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+ EP_STATE_OFLDCONN_NONE = 0x10000,
};
struct qedi_conn;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index a414f51302b7..6856dfdfa473 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 26b93c563f92..d1fc4958222a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host {
uint16_t n2n_id;
struct list_head gpnid_list;
struct fab_scan scan;
+
+ unsigned int irq_offset;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 30d3090842f8..8507c43b918c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
"Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
}
}
+ vha->irq_offset = desc.pre_vectors;
ha->msix_entries = kcalloc(ha->msix_count,
sizeof(struct qla_msix_entry),
GFP_KERNEL);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ea69dafc9774..c6ef83d0d99b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
return rc;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index cfdfcda28072..a77bfb224248 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
+ if (rc)
+ goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
if (err == 0) {
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ /*
+ * Forcibly set runtime PM status of request queue to "active"
+ * to make sure we can again get requests from the queue
+ * (see also blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (!err && scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->request_queue->dev)
+ blk_set_runtime_active(sdev->request_queue);
+ }
}
return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
else
fn = NULL;
- /*
- * Forcibly set runtime PM status of request queue to "active" to
- * make sure we can again get requests from the queue (see also
- * blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a44f52e0e8..b2da8a00ec33 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80;
+ /*
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+ * received mode parameter buffer before doing MODE SELECT.
+ */
+ data.device_specific = 0;
+
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7bde6c809442..f564af8949e8 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
- return device->in_remove & !ctrl_info->in_shutdown;
+ return device->in_remove && !ctrl_info->in_shutdown;
}
static inline void pqi_schedule_rescan_worker_with_delay(
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index dd65fea07687..6d176815e6ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9ba7671b84f8..71334aaf1447 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8001,6 +8001,8 @@ out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 984941e036c8..bd15a564fe24 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void)
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
- pr_err("nable to kmem_cache_create() for"
+ pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
goto bitmap_out;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1e6d24943565..c34c88ef3319 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
size_t ring_size;
struct mutex cmdr_lock;
- struct list_head cmdr_queue;
+ struct list_head qfull_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
struct timer_list cmd_timer;
unsigned int cmd_time_out;
+ struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
- struct list_head cmdr_queue_entry;
+ struct list_head queue_entry;
uint16_t cmd_id;
@@ -192,6 +193,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_INFLIGHT 1
unsigned long flags;
};
/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
if (!tcmu_cmd)
return NULL;
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
@@ -915,11 +917,13 @@ setup_timer:
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(timer, tcmu_cmd->deadline);
+ if (!timer_pending(timer))
+ mod_timer(timer, tcmu_cmd->deadline);
+
return 0;
}
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
if (ret)
return ret;
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
tcmu_cmd->cmd_id, udev->name);
return 0;
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->cmdr_queue))
+ if (!list_empty(&udev->qfull_queue))
goto queue;
mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
+
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
return 0;
queue:
- if (add_to_cmdr_queue(tcmu_cmd)) {
+ if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
goto out;
+ list_del_init(&cmd->queue_entry);
+
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ out:
tcmu_free_cmd(cmd);
}
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ unsigned long deadline = 0;
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
+ deadline = tcmu_cmd->deadline;
+ break;
+ }
+ }
+
+ if (deadline)
+ mod_timer(timer, deadline);
+ else
+ del_timer(timer);
+}
+
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
int handled = 0;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- struct tcmu_cmd *cmd;
tcmu_flush_dcache_range(entry, sizeof(*entry));
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
/* no more pending commands */
del_timer(&udev->cmd_timer);
- if (list_empty(&udev->cmdr_queue)) {
+ if (list_empty(&udev->qfull_queue)) {
/*
* no more pending or waiting commands so try to
* reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_global_max_blocks)
schedule_delayed_work(&tcmu_unmap_work, 0);
}
+ } else if (udev->cmd_time_out) {
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
}
return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (!time_after(jiffies, cmd->deadline))
return 0;
- is_running = list_empty(&cmd->cmdr_queue_entry);
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
se_cmd = cmd->se_cmd;
if (is_running) {
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
- list_del_init(&cmd->cmdr_queue_entry);
-
idr_remove(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
+ list_del_init(&cmd->queue_entry);
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
id, udev->name, is_running ? "inflight" : "queued");
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
- INIT_LIST_HEAD(&udev->cmdr_queue);
+ INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->inflight_queue);
idr_init(&udev->commands);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
sense_reason_t scsi_ret;
int ret;
- if (list_empty(&udev->cmdr_queue))
+ if (list_empty(&udev->qfull_queue))
return true;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
- list_splice_init(&udev->cmdr_queue, &cmds);
+ list_splice_init(&udev->qfull_queue, &cmds);
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+ list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %u on dev %s from queue\n",
tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
* cmd was requeued, so just put all cmds back in
* the queue
*/
- list_splice_tail(&cmds, &udev->cmdr_queue);
+ list_splice_tail(&cmds, &udev->qfull_queue);
drained = false;
- goto done;
+ break;
}
}
- if (list_empty(&udev->cmdr_queue))
- del_timer(&udev->qfull_timer);
-done:
+
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
return drained;
}
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
mutex_lock(&udev->cmdr_lock);
tcmu_handle_completions(udev);
- run_cmdr_queue(udev, false);
+ run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
- run_cmdr_queue(udev, true);
+ run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!list_empty(&cmd->cmdr_queue_entry))
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
* Userspace was not able to start the
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
mutex_lock(&udev->cmdr_lock);
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 0582bd12a239..0ca908d12750 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -4,7 +4,7 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 36f3d0f49e60..bca86bf7189f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net)
if (nvq->done_idx > VHOST_NET_BATCH)
vhost_net_signal_used(nvq);
if (unlikely(vq_log))
- vhost_log_write(vq, vq_log, log, vhost_len);
+ vhost_log_write(vq, vq_log, log, vhost_len,
+ vq->iov, in);
total_len += vhost_len;
if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8e10ab436d1f..344684f3e2e4 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_tmf_resp __user *resp;
struct virtio_scsi_ctrl_tmf_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_an_resp __user *resp;
struct virtio_scsi_ctrl_an_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
rsp.response = VIRTIO_SCSI_S_OK;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9f7942cbcbb2..15a216cdd507 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
- if (ret != sizeof(type))
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
goto done;
+ }
switch (type) {
case VHOST_IOTLB_MSG:
@@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
- if (ret != sizeof(msg))
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
goto done;
+ }
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;
@@ -1733,13 +1737,87 @@ static int log_write(void __user *log_base,
return r;
}
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+ struct vhost_umem *umem = vq->umem;
+ struct vhost_umem_node *u;
+ u64 start, end, l, min;
+ int r;
+ bool hit = false;
+
+ while (len) {
+ min = len;
+ /* More than one GPAs can be mapped into a single HVA. So
+ * iterate all possible umems here to be safe.
+ */
+ list_for_each_entry(u, &umem->umem_list, link) {
+ if (u->userspace_addr > hva - 1 + len ||
+ u->userspace_addr - 1 + u->size < hva)
+ continue;
+ start = max(u->userspace_addr, hva);
+ end = min(u->userspace_addr - 1 + u->size,
+ hva - 1 + len);
+ l = end - start + 1;
+ r = log_write(vq->log_base,
+ u->start + start - u->userspace_addr,
+ l);
+ if (r < 0)
+ return r;
+ hit = true;
+ min = min(l, min);
+ }
+
+ if (!hit)
+ return -EFAULT;
+
+ len -= min;
+ hva += min;
+ }
+
+ return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+ struct iovec iov[64];
+ int i, ret;
+
+ if (!vq->iotlb)
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len)
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
+
+ if (vq->iotlb) {
+ for (i = 0; i < count; i++) {
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+ }
+
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1847,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof vq->used->flags);
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
@@ -1789,9 +1866,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof *vhost_avail_event(vq));
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
@@ -2191,10 +2267,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- count * sizeof *used);
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
@@ -2236,9 +2310,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
+ log_used(vq, offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef7542291..1b675dad5e05 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len);
+ unsigned int log_num, u64 len,
+ struct iovec *iov, int count);
int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8976190b6c1f..bfa1360ec750 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt)
continue;
}
#endif
+
+ if (!strncmp(options, "logo-pos:", 9)) {
+ options += 9;
+ if (!strcmp(options, "center"))
+ fb_center_logo = true;
+ continue;
+ }
}
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 558ed2ed3124..cb43a2258c51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb);
int num_registered_fb __read_mostly;
EXPORT_SYMBOL(num_registered_fb);
+bool fb_center_logo __read_mostly;
+EXPORT_SYMBOL(fb_center_logo);
+
static struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
fb_set_logo(info, logo, logo_new, fb_logo.depth);
}
-#ifdef CONFIG_FB_LOGO_CENTER
- {
+ if (fb_center_logo) {
int xres = info->var.xres;
int yres = info->var.yres;
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
--n;
image.dx = (xres - n * (logo->width + 8) - 8) / 2;
image.dy = y ?: (yres - logo->height) / 2;
+ } else {
+ image.dx = 0;
+ image.dy = y;
}
-#else
- image.dx = 0;
- image.dy = y;
-#endif
+
image.width = logo->width;
image.height = logo->height;
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
}
height = fb_logo.logo->height;
-#ifdef CONFIG_FB_LOGO_CENTER
- height += (yres - fb_logo.logo->height) / 2;
-#endif
+ if (fb_center_logo)
+ height += (yres - fb_logo.logo->height) / 2;
return fb_prepare_extra_logos(info, height, yres);
}
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 31f769d67195..057d3cdef92e 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
}
static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
- const char *name, unsigned long address)
+ unsigned long address)
{
struct offb_par *par = (struct offb_par *) info->par;
- if (dp && !strncmp(name, "ATY,Rage128", 11)) {
+ if (of_node_name_prefix(dp, "ATY,Rage128")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_r128;
- } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12)
- || !strncmp(name, "ATY,RageM3p12A", 14))) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
+ of_node_name_prefix(dp, "ATY,RageM3p12A")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3A;
- } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3B;
- } else if (dp && !strncmp(name, "ATY,Rage6", 9)) {
+ } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_radeon;
- } else if (!strncmp(name, "ATY,", 4)) {
+ } else if (of_node_name_prefix(dp, "ATY,")) {
unsigned long base = address & 0xff000000UL;
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
if (par->cmap_adr)
par->cmap_type = cmap_gxt2000;
- } else if (dp && !strncmp(name, "vga,Display-", 12)) {
+ } else if (of_node_name_prefix(dp, "vga,Display-")) {
/* Look for AVIVO initialized by SLOF */
struct device_node *pciparent = of_get_parent(dp);
const u32 *vid, *did;
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
par->cmap_type = cmap_unknown;
if (depth == 8)
- offb_init_palette_hacks(info, dp, name, address);
+ offb_init_palette_hacks(info, dp, address);
else
fix->visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 53f93616c671..8e23160ec59f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
int r = 0;
+ memset(&p, 0, sizeof(p));
+
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 1e972c4e88b1..d1f6196c8b9a 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,15 +10,6 @@ menuconfig LOGO
if LOGO
-config FB_LOGO_CENTER
- bool "Center the logo"
- depends on FB=y
- help
- When this option is selected, the bootup logo is centered both
- horizontally and vertically. If more than one logo is displayed
- due to multiple CPUs, the collected line of logos is centered
- as a whole.
-
config FB_LOGO_EXTRA
bool
depends on FB=y
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 728ecd1eea30..fb12fe205f86 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_MAX
};
+enum virtio_balloon_config_read {
+ VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
+};
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
@@ -77,14 +81,20 @@ struct virtio_balloon {
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
bool stop_update;
+ /* Bitmap to indicate if reading the related config fields are needed */
+ unsigned long config_read_bitmap;
/* The list of allocated free pages, waiting to be given back to mm */
struct list_head free_page_list;
spinlock_t free_page_list_lock;
/* The number of free page blocks on the above list */
unsigned long num_free_page_blocks;
- /* The cmd id received from host */
- u32 cmd_id_received;
+ /*
+ * The cmd id received from host.
+ * Read it via virtio_balloon_cmd_id_received to get the latest value
+ * sent from host.
+ */
+ u32 cmd_id_received_cache;
/* The cmd id that is actively in use */
__virtio32 cmd_id_active;
/* Buffer to store the stop sign */
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
return num_returned;
}
+static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
+{
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ return;
+
+ /* No need to queue the work if the bit was already set. */
+ if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ return;
+
+ queue_work(vb->balloon_wq, &vb->report_free_page_work);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
unsigned long flags;
- s64 diff = towards_target(vb);
-
- if (diff) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update)
- queue_work(system_freezable_wq,
- &vb->update_balloon_size_work);
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
- virtio_cread(vdev, struct virtio_balloon_config,
- free_page_report_cmd_id, &vb->cmd_id_received);
- if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
- /* Pass ULONG_MAX to give back all the free pages */
- return_free_pages_to_mm(vb, ULONG_MAX);
- } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
- vb->cmd_id_received !=
- virtio32_to_cpu(vdev, vb->cmd_id_active)) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update) {
- queue_work(vb->balloon_wq,
- &vb->report_free_page_work);
- }
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update) {
+ queue_work(system_freezable_wq,
+ &vb->update_balloon_size_work);
+ virtio_balloon_queue_free_page_work(vb);
}
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static void update_balloon_size(struct virtio_balloon *vb)
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
return 0;
}
+static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
+{
+ if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ virtio_cread(vb->vdev, struct virtio_balloon_config,
+ free_page_report_cmd_id,
+ &vb->cmd_id_received_cache);
+
+ return vb->cmd_id_received_cache;
+}
+
static int send_cmd_id_start(struct virtio_balloon *vb)
{
struct scatterlist sg;
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused))
;
- vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
+ vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+ virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
if (!err)
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
* stop the reporting.
*/
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
- if (cmd_id_active != vb->cmd_id_received)
+ if (unlikely(cmd_id_active !=
+ virtio_balloon_cmd_id_received(vb)))
break;
/*
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
return 0;
}
-static void report_free_page_func(struct work_struct *work)
+static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
{
int err;
- struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
- report_free_page_work);
struct device *dev = &vb->vdev->dev;
/* Start by sending the received cmd id to host with an outbuf. */
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
}
+static void report_free_page_func(struct work_struct *work)
+{
+ struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
+ report_free_page_work);
+ u32 cmd_id_received;
+
+ cmd_id_received = virtio_balloon_cmd_id_received(vb);
+ if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
+ /* Pass ULONG_MAX to give back all the free pages */
+ return_free_pages_to_mm(vb, ULONG_MAX);
+ } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
+ cmd_id_received !=
+ virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
+ virtio_balloon_report_free_page(vb);
+ }
+}
+
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
- vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
+ vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4cd9ea5c75be..d9dd0f789279 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
- int i, err;
+ int i, err, queue_idx = 0;
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return err;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5142cc..d0584c040c60 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
- int i, err, nvectors, allocated_vectors;
+ int i, err, nvectors, allocated_vectors, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
msix_vec);
if (IS_ERR(vqs[i])) {
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i, err;
+ int i, err, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764717c4..81208cd3f4ec 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0a7d10..db7c57d82cfd 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 0d3a0fbbd7a5..52941207a12a 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -ENODEV;
priv->io_base = devm_ioport_map(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(priv->io_base))
- return PTR_ERR(priv->io_base);
+ if (!priv->io_base)
+ return -ENOMEM;
watchdog_set_drvdata(&priv->wdd, priv);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 93194f3e7540..117e76b2f939 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
xen_have_vector_callback = 0;
return;
}
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
+ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
xen_hvm_callback_vector);
}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2e5d845b5091..7aa64d1b119c 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
/* write the data, then modify the indexes */
virt_wmb();
- if (ret < 0)
+ if (ret < 0) {
+ atomic_set(&map->read, 0);
intf->in_error = ret;
- else
+ } else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
static void pvcalls_sk_state_change(struct sock *sock)
{
struct sock_mapping *map = sock->sk_user_data;
- struct pvcalls_data_intf *intf;
if (map == NULL)
return;
- intf = map->ring;
- intf->in_error = -ENOTCONN;
+ atomic_inc(&map->read);
notify_remote_via_irq(map->irq);
}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..8a249c95c193 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
+static struct proto pvcalls_proto = {
+ .name = "PVCalls",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
+static void free_active_ring(struct sock_mapping *map)
+{
+ if (!map->active.ring)
+ return;
+
+ free_pages((unsigned long)map->active.data.in,
+ map->active.ring->ring_order);
+ free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+ void *bytes;
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ get_zeroed_page(GFP_KERNEL);
+ if (!map->active.ring)
+ goto out;
+
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (!bytes)
+ goto out;
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ return 0;
+
+out:
+ free_active_ring(map);
+ return -ENOMEM;
+}
+
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
- map->active.ring = (struct pvcalls_data_intf *)
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (map->active.ring == NULL)
- goto out_error;
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
- if (bytes == NULL)
- goto out_error;
+ bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
- map->active.data.in = bytes;
- map->active.data.out = bytes +
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
- free_page((unsigned long)map->active.ring);
return ret;
}
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ ret = alloc_active_ring(map);
+ if (ret < 0) {
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf,
virt_mb();
size = pvcalls_queued(prod, cons, array_size);
- if (size >= array_size)
+ if (size > array_size)
return -EINVAL;
+ if (size == array_size)
+ return 0;
if (len > array_size - size)
len = array_size - size;
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
- if (error < 0)
- return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
- return 0;
+ return error ?: size;
if (len > size)
len = size;
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
- spin_lock(&bedata->socket_lock);
- ret = get_request(bedata, &req_id);
- if (ret < 0) {
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
- spin_unlock(&bedata->socket_lock);
+ pvcalls_exit_sock(sock);
+ return -ENOMEM;
+ }
+ ret = alloc_active_ring(map2);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
- if (map2 == NULL) {
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map2);
+ kfree(map2);
pvcalls_exit_sock(sock);
- return -ENOMEM;
+ return ret;
}
+
ret = create_active(map2, &evtchn);
if (ret < 0) {
+ free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
received:
map2->sock = newsock;
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock)
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
- if (READ_ONCE(map->passive.inflight_req_id) !=
- PVCALLS_INVALID_ID) {
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0568fd986821..e432bd27a2e7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@ again:
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6b17d3620414..1a4ce07fb406 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
#endif
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
_leave("");
}
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 07bc10f076aa..d443e2bfa094 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
struct yfs_xdr_u64 max_quota;
struct yfs_xdr_u64 file_quota;
} __packed;
+
+enum yfs_lock_type {
+ yfs_LockNone = -1,
+ yfs_LockRead = 0,
+ yfs_LockWrite = 1,
+ yfs_LockExtend = 2,
+ yfs_LockRelease = 3,
+ yfs_LockMandatoryRead = 0x100,
+ yfs_LockMandatoryWrite = 0x101,
+ yfs_LockMandatoryExtend = 0x102,
+};
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a7b44863d502..2c588f9bbbda 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
+static void afs_delete_async_call(struct work_struct *);
static void afs_process_async_call(struct work_struct *);
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
}
}
+static struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int u = atomic_inc_return(&call->usage);
+
+ trace_afs_call(call, why, u,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
/*
* Queue the call for actual work.
*/
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- int u = atomic_inc_return(&call->usage);
-
- trace_afs_call(call, afs_call_trace_work, u,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
-
INIT_WORK(&call->work, call->type->work);
+ afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
}
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
}
}
+ /* If the call is going to be asynchronous, we need an extra ref for
+ * the call to hold itself so the caller need not hang on to its ref.
+ */
+ if (call->async)
+ afs_get_call(call, afs_call_trace_get);
+
/* create a call */
rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
(unsigned long)call,
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
goto error_do_abort;
}
- /* at this point, an async call may no longer exist as it may have
- * already completed */
- if (call->async)
+ /* Note that at this point, we may have received the reply or an abort
+ * - and an asynchronous call may already have completed.
+ */
+ if (call->async) {
+ afs_put_call(call);
return -EINPROGRESS;
+ }
return afs_wait_for_call_to_complete(call, ac);
error_do_abort:
- call->state = AFS_CALL_COMPLETE;
if (ret != -ECONNABORTED) {
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
@@ -463,8 +478,24 @@ error_do_abort:
error_kill_call:
if (call->type->done)
call->type->done(call);
- afs_put_call(call);
+
+ /* We need to dispose of the extra ref we grabbed for an async call.
+ * The call, however, might be queued on afs_async_calls and we need to
+ * make sure we don't get any more notifications that might requeue it.
+ */
+ if (call->rxcall) {
+ rxrpc_kernel_end_call(call->net->socket, call->rxcall);
+ call->rxcall = NULL;
+ }
+ if (call->async) {
+ if (cancel_work_sync(&call->async_work))
+ afs_put_call(call);
+ afs_put_call(call);
+ }
+
ac->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ afs_put_call(call);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 95d0761cdb34..155dc14caef9 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
if (vldb->fs_mask[i] & type_mask)
nr_servers++;
- slist = kzalloc(sizeof(struct afs_server_list) +
- sizeof(struct afs_server_entry) * nr_servers,
- GFP_KERNEL);
+ slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
if (!slist)
goto error;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 12658c1363ae..5aa57929e8c2 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
bp = xdr_encode_YFSFid(bp, &vnode->fid);
bp = xdr_encode_string(bp, name, namesz);
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
- bp = xdr_encode_u32(bp, 0); /* ViceLockType */
+ bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
afs_use_fs_server(call, fc->cbi);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c546cdce77e6..58a4c1217fa8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
}
EXPORT_SYMBOL(invalidate_bdev);
+static void set_init_blocksize(struct block_device *bdev)
+{
+ unsigned bsize = bdev_logical_block_size(bdev);
+ loff_t size = i_size_read(bdev->bd_inode);
+
+ while (bsize < PAGE_SIZE) {
+ if (size & bsize)
+ break;
+ bsize <<= 1;
+ }
+ bdev->bd_block_size = bsize;
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+
int set_blocksize(struct block_device *bdev, int size)
{
/* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change);
void bd_set_size(struct block_device *bdev, loff_t size)
{
- unsigned bsize = bdev_logical_block_size(bdev);
-
inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode);
- while (bsize < PAGE_SIZE) {
- if (size & bsize)
- break;
- bsize <<= 1;
- }
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
EXPORT_SYMBOL(bd_set_size);
@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
}
}
- if (!ret)
+ if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+ set_init_blocksize(bdev);
+ }
/*
* If the device is invalidated, rescan partition
@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear;
}
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+ set_init_blocksize(bdev);
}
if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0a68cf7032f5..7a2a2621f0d9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
struct btrfs_trans_handle;
struct btrfs_transaction;
struct btrfs_pending_snapshot;
+struct btrfs_delayed_ref_root;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
@@ -786,6 +787,9 @@ enum {
* main phase. The fs_info::balance_ctl is initialized.
*/
BTRFS_FS_BALANCE_RUNNING,
+
+ /* Indicate that the cleaner thread is awake and doing something. */
+ BTRFS_FS_CLEANER_RUNNING,
};
struct btrfs_fs_info {
@@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8da2f380d3c0..6a2a2a951705 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg)
while (1) {
again = 0;
+ set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+
/* Make the cleaner go to sleep early. */
if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
@@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop())
@@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
+
+ /*
+ * We need this here because if we've been flipped read-only we won't
+ * get sync() from the umount, so we need to make sure any ordered
+ * extents that haven't had their dirty pages IO start writeout yet
+ * actually get run and error out properly.
+ */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (pin_bytes)
btrfs_pin_extent(fs_info, head->bytenr,
head->num_bytes, 1);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b15afeae16df..d81035b7ea7d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1;
}
-static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_ref_root *delayed_refs =
- &trans->transaction->delayed_refs;
int nr_items = 1; /* Dropping this ref head update. */
if (head->total_ref_mod < 0) {
@@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
}
}
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
@@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = 0;
break;
case COMMIT_TRANS:
+ /*
+ * If we have pending delayed iputs then we could free up a
+ * bunch of pinned space, so make sure we run the iputs before
+ * we do our pinned bytes check below.
+ */
+ mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
+
ret = may_commit_transaction(fs_info, space_info);
break;
default:
@@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->must_insert_reserved)
ret = 1;
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 43eb4535319d..5c349667c761 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3129,9 +3129,6 @@ out:
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
- /* Try to release some metadata so we don't get an OOM but don't wait */
- btrfs_btree_balance_dirty_nodelay(fs_info);
-
return ret;
}
@@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
ASSERT(list_empty(&binode->delayed_iput));
list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
+ if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
+ wake_up_process(fs_info->cleaner_kthread);
}
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 46d691ba04bc..45b2322e092d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
- ssize_t ret;
-
if (file_inode(file_in) == file_inode(file_out))
return -EINVAL;
-retry:
- ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
- if (ret == -EAGAIN)
- goto retry;
- return ret;
+ return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
}
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 96f7d32cd184..898c8321b343 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
struct pstore_record *record)
{
struct persistent_ram_zone *prz;
- bool update = (record->type == PSTORE_TYPE_DMESG);
/* Give up if we never existed or have hit the end. */
if (!przs)
@@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
return NULL;
/* Update old/shadowed buffer. */
- if (update)
+ if (prz->type == PSTORE_TYPE_DMESG)
persistent_ram_save_old(prz);
if (!persistent_ram_old_size(prz))
@@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ramoops_platform_data *pdata = dev->platform_data;
+ struct ramoops_platform_data pdata_local;
struct ramoops_context *cxt = &oops_cxt;
size_t dump_mem_sz;
phys_addr_t paddr;
int err = -EINVAL;
if (dev_of_node(dev) && !pdata) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- pr_err("cannot allocate platform data buffer\n");
- err = -ENOMEM;
- goto fail_out;
- }
+ pdata = &pdata_local;
+ memset(pdata, 0, sizeof(*pdata));
err = ramoops_parse_dt(pdev, pdata);
if (err < 0)
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
index ad6f55dabd6d..0f2e0fe45ca4 100644
--- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -1,12 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Copyright (c) 2017 Amlogic, inc.
* Author: Yixun Lan <yixun.lan@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
*/
#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f859a90..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
struct bcma_soc {
struct bcma_bus bus;
+ struct device *dev;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 39f668d5066b..333a6695a918 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
+/* Compiler specific definitions for Clang compiler */
+
#define uninitialized_var(x) x = *(&(x))
/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dd8268f5f5f0..e8579412ad21 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,10 +58,6 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
/*
* A trick to suppress uninitialized variable warning without generating any
* code
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 517bd14e1222..b17f3cd18334 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
#ifdef __ECC
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
+/* Compiler specific definitions for Intel ECC compiler */
#include <asm/intrinsics.h>
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index fc5004a4b07d..445348facea9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 7cdd31a69719..f52ef0ad6781 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info);
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
+extern bool fb_center_logo;
extern struct class *fb_class;
#define for_each_registered_fb(i) \
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 5440f11b0907..7315977b64da 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
enum nvdimm_security_state {
+ NVDIMM_SECURITY_ERROR = -1,
NVDIMM_SECURITY_DISABLED,
NVDIMM_SECURITY_UNLOCKED,
NVDIMM_SECURITY_LOCKED,
diff --git a/include/linux/of.h b/include/linux/of.h
index fe472e5195a9..e240992e5cb6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -50,7 +50,6 @@ struct of_irq_controller;
struct device_node {
const char *name;
- const char *type;
phandle phandle;
const char *full_name;
struct fwnode_handle fwnode;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 0990f913d649..1f3873a2ff29 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
extern const int phy_10_100_features_array[4];
@@ -461,8 +463,8 @@ struct phy_device {
* only works for PHYs with IDs which match this field
* name: The friendly name of this PHY type
* phy_id_mask: Defines the important bits of the phy_id
- * features: A list of features (speed, duplex, etc) supported
- * by this PHY
+ * features: A mandatory list of features (speed, duplex, etc)
+ * supported by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 93f56fddd92a..95d25b010a25 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
+ * It can change skb pointers.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 32baf8e26735..987b6491b946 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -12,6 +12,11 @@ struct irq_affinity;
/**
* virtio_config_ops - operations for configuring a virtio device
+ * Note: Do not assume that a transport implements all of the operations
+ * getting/setting a value as a simple read/write! Generally speaking,
+ * any of @get/@set, @get_status/@set_status, or @get_features/
+ * @finalize_features are NOT safe to be called from an atomic
+ * context.
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@@ -22,7 +27,7 @@ struct irq_affinity;
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
- * @generation: config generation counter
+ * @generation: config generation counter (optional)
* vdev: the virtio_device
* Returns the config generation counter
* @get_status: read the status byte
@@ -48,17 +53,17 @@ struct irq_affinity;
* @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 32 feature bits (all we currently need).
+ * Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
* Returns 0 on success or error status
- * @bus_name: return the bus name associated with the device
+ * @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
* the caller can then copy.
- * @set_vq_affinity: set the affinity for a virtqueue.
+ * @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
*/
typedef void vq_callback_t(struct virtqueue *);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1adefe42c0a6..2bfb87eb98ce 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -21,18 +21,6 @@ struct socket;
struct rxrpc_call;
/*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED, /* - Normal termination */
- RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
- RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
- RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
- RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- NR__RXRPC_CALL_COMPLETIONS
-};
-
-/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
@@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *, struct key *);
-int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
- enum rxrpc_call_completion *, u32 *);
u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c5969762a8f4..9c8214d2116d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter);
-int fib_table_flush(struct net *net, struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 7d5cda7ce32a..3e370cb36263 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@ struct flow_offload {
struct nf_flow_route {
struct {
struct dst_entry *dst;
- int ifindex;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 33d291888ba9..e3f005eae1f7 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -25,6 +25,7 @@
enum afs_call_trace {
afs_call_trace_alloc,
afs_call_trace_free,
+ afs_call_trace_get,
afs_call_trace_put,
afs_call_trace_wake,
afs_call_trace_work,
@@ -159,6 +160,7 @@ enum afs_file_error {
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
EM(afs_call_trace_free, "FREE ") \
+ EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
EM(afs_call_trace_wake, "WAKE ") \
E_(afs_call_trace_work, "WORK ")
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f6052e70bf40..a55cb8b10165 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -268,7 +268,7 @@ struct sockaddr_in {
#define IN_MULTICAST(a) IN_CLASSD(a)
#define IN_MULTICAST_NET 0xe0000000
-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd490b66d..6e73f0274e41 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
PVRDMA_WR_BIND_MW,
PVRDMA_WR_REG_SIG_MR,
+ PVRDMA_WR_ERROR,
};
enum pvrdma_wc_status {
diff --git a/init/Kconfig b/init/Kconfig
index d47cb77a220e..513fa544a134 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
+ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
depends on $(cc-option,-ffunction-sections -fdata-sections)
depends on $(ld-option,--gc-sections)
help
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index a2f53642592b..befe570be5ba 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
return kind_ops[BTF_INFO_KIND(t->info)];
}
-bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len;
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 9425c2fb872f..ab612fe9862f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
+ /* fall through */
default:
return NULL;
}
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 99d243e1ad6e..52378d3e34b3 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
+ u32 inner_map_meta_size;
struct fd f;
f = fdget(inner_map_ufd);
@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
return ERR_PTR(-EINVAL);
}
- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+ inner_map_meta_size = sizeof(*inner_map_meta);
+ /* In some cases verifier needs to access beyond just base map. */
+ if (inner_map->ops == &array_map_ops)
+ inner_map_meta_size = sizeof(struct bpf_array);
+
+ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
if (!inner_map_meta) {
fdput(f);
return ERR_PTR(-ENOMEM);
@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta->key_size = inner_map->key_size;
inner_map_meta->value_size = inner_map->value_size;
inner_map_meta->map_flags = inner_map->map_flags;
- inner_map_meta->ops = inner_map->ops;
inner_map_meta->max_entries = inner_map->max_entries;
+ /* Misc members not needed in bpf_map_meta_equal() check. */
+ inner_map_meta->ops = inner_map->ops;
+ if (inner_map->ops == &array_map_ops) {
+ inner_map_meta->unpriv_array = inner_map->unpriv_array;
+ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+ container_of(inner_map, struct bpf_array, map)->index_mask;
+ }
+
fdput(f);
return inner_map_meta;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d9e2483669d0..d43b14535827 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- BPF_BUILD_ID_SIZE);
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
}
return;
}
@@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d6361776dc5c..1fb6fd68b9c7 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -378,6 +378,8 @@ void __init swiotlb_exit(void)
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
+ io_tlb_start = 0;
+ io_tlb_end = 0;
io_tlb_nslabs = 0;
max_segment = 0;
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d7f538847b84..e815781ed751 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
struct seccomp_filter *filter = file->private_data;
struct seccomp_knotif *knotif;
+ if (!filter)
+ return 0;
+
mutex_lock(&filter->notify_lock);
/*
@@ -1300,6 +1303,7 @@ out:
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
if (ret < 0) {
+ listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
} else {
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4ca6bd..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x);
- m = 1ULL << (fls64(x) & ~1ULL);
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) {
b = y + m;
y >>= 1;
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
index 7f1c521dcc2f..9ea6100dca87 100644
--- a/net/bpfilter/bpfilter_umh_blob.S
+++ b/net/bpfilter/bpfilter_umh_blob.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
- .section .bpfilter_umh, "a"
+ .section .rodata, "a"
.global bpfilter_umh_start
bpfilter_umh_start:
.incbin "net/bpfilter/bpfilter_umh"
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 6664cb8590f8..00573cc46c98 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1129,6 +1129,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
err = -ENOMEM;
goto err_unlock;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
@@ -1148,6 +1150,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
+
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 2cb8da465b98..48ddc60b4fbd 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
- skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
net = dev_net(indev);
} else {
if (unlikely(netpoll_tx_running(to->br->dev))) {
- if (!is_skb_forwardable(skb->dev, skb)) {
+ skb_push(skb, ETH_HLEN);
+ if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
- } else {
- skb_push(skb, ETH_HLEN);
+ else
br_netpoll_send_skb(to, skb);
- }
return;
}
br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 94039f588f1d..564710f88f93 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
IPSTATS_MIB_INDISCARDS);
goto drop;
}
+ hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
goto drop;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 491828713e0b..5e55cef0cec3 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
tmp.name[sizeof(tmp.name) - 1] = 0;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
- newinfo = vmalloc(sizeof(*newinfo) + countersize);
+ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
- newinfo->entries = vmalloc(tmp.entries_size);
+ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7d940e..419e8edf23ba 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
+ ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b3b436ef545..7559d6835ecb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
- /* skb->mac_len is not set on normal egress */
- unsigned int mlen = skb->network_header - skb->mac_header;
+ unsigned int mlen = skb_network_offset(skb);
- __skb_pull(skb, mlen);
+ if (mlen) {
+ __skb_pull(skb, mlen);
- /* At ingress, the mac header has already been pulled once.
- * At egress, skb_pospull_rcsum has to be done in case that
- * the skb is originated from ingress (i.e. a forwarded skb)
- * to ensure that rcsum starts at net header.
- */
- if (!skb_at_tc_ingress(skb))
- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+ * the skb is originated from ingress (i.e. a forwarded skb)
+ * to ensure that rcsum starts at net header.
+ */
+ if (!skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ }
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
case SO_MAX_PACING_RATE: /* 32bit version */
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_rcvlowat = val ? : 1;
break;
case SO_MARK:
- sk->sk_mark = val;
+ if (sk->sk_mark != val) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
default:
ret = -EINVAL;
@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
- /* else: fall through */
+ /* else, fall through */
default:
return NULL;
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..a648568c5e8f 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
lwt->name ? : "<unknown>");
ret = BPF_OK;
} else {
+ skb_reset_mac_header(skb);
ret = skb_do_redirect(skb);
if (ret == 0)
ret = BPF_REDIRECT;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 3e27a779f288..4230400b9a30 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -450,7 +450,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
- kmemleak_alloc(buckets, size, 0, GFP_ATOMIC);
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
}
if (!buckets) {
kfree(ret);
@@ -1007,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh)
if (neigh->ops->solicit)
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ consume_skb(skb);
}
/* Called when a timer expires for a neighbour entry. */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6df95be96311..fe4f6a624238 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
- flushed += fib_table_flush(net, tb);
+ flushed += fib_table_flush(net, tb, false);
}
if (flushed)
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
- fib_table_flush(net, tb);
+ fib_table_flush(net, tb, true);
fib_free_table(tb);
}
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 237c9f72b265..a573e37e0615 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
}
/* Caller must hold RTNL. */
-int fib_table_flush(struct net *net, struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
- tb->tb_id != fa->tb_id) {
+ if (!fi || tb->tb_id != fa->tb_id ||
+ (!(fi->fib_flags & RTNH_F_DEAD) &&
+ !fib_props[fa->fa_type].error)) {
+ slen = fa->fa_slen;
+ continue;
+ }
+
+ /* Do not flush error routes if network namespace is
+ * not being dismantled
+ */
+ if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 632863541082..437070d1ffb1 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1d09f3e5f9e..b1a74d80d868 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -569,8 +569,7 @@ err_free_skb:
dev->stats.tx_dropped++;
}
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
- __be16 proto)
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
@@ -578,10 +577,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
struct erspan_metadata *md;
struct rtable *rt = NULL;
bool truncate = false;
+ __be16 df, proto;
struct flowi4 fl;
int tunnel_hlen;
int version;
- __be16 df;
int nhoff;
int thoff;
@@ -626,18 +625,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
if (version == 1) {
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
ntohl(md->u.index), truncate, true);
+ proto = htons(ETH_P_ERSPAN);
} else if (version == 2) {
erspan_build_header_v2(skb,
ntohl(tunnel_id_to_key32(key->tun_id)),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, true);
+ proto = htons(ETH_P_ERSPAN2);
} else {
goto err_free_rt;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
+ proto, 0, htonl(tunnel->o_seqno++));
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
@@ -721,12 +722,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
+ __be16 proto;
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
- erspan_fb_xmit(skb, dev, skb->protocol);
+ erspan_fb_xmit(skb, dev);
return NETDEV_TX_OK;
}
@@ -742,19 +744,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
}
/* Push ERSPAN header */
- if (tunnel->erspan_ver == 1)
+ if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
- else if (tunnel->erspan_ver == 2)
+ proto = htons(ETH_P_ERSPAN);
+ } else if (tunnel->erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
tunnel->dir, tunnel->hwid,
truncate, true);
- else
+ proto = htons(ETH_P_ERSPAN2);
+ } else {
goto free_skb;
+ }
tunnel->parms.o_flags &= ~TUNNEL_KEY;
- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
+ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
free_skb:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26921f6b3b92..51d8efba6de2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
goto drop;
}
+ iph = ip_hdr(skb);
skb->transport_header = skb->network_header + iph->ihl*4;
/* Remove any debris in the socket control block */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 541bdb9f81d7..5f099c9d04e5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
- if (sk->sk_state != TCP_ESTABLISHED) {
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
err = -EINVAL;
goto out_err;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3fb0ed5e4789..5c3cd5d84a6f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (sk->sk_no_check_tx)
+ }
+ if (sk->sk_no_check_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
}
EXPORT_SYMBOL(udp_lib_rehash);
-static void udp_v4_rehash(struct sock *sk)
+void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 322672655419..6b2fa77eeb1c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
int udp_v4_get_port(struct sock *sk, unsigned short snum);
+void udp_v4_rehash(struct sock *sk);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39c7f17d916f..3c94b8f0ff27 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
.sendpage = udp_sendpage,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 7da7bf3b7fe3..b858bd5280bf 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -128,6 +129,10 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4a1a86e9c0e9..c465d8a102f2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -922,6 +922,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
__u8 dsfield = false;
struct flowi6 fl6;
int err = -EINVAL;
+ __be16 proto;
__u32 mtu;
int nhoff;
int thoff;
@@ -1035,8 +1036,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
}
/* Push GRE header. */
- gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+ : htons(ETH_P_ERSPAN2);
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1169,6 +1171,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
t->parms.fwmark = p->fwmark;
+ t->parms.erspan_ver = p->erspan_ver;
+ t->parms.index = p->index;
+ t->parms.dir = p->dir;
+ t->parms.hwid = p->hwid;
dst_cache_reset(&t->dst_cache);
}
@@ -2025,9 +2031,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
struct __ip6_tnl_parm p;
- struct ip6_tnl *t;
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
if (IS_ERR(t))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8e11f9a557b1..dc066fdf7e46 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4251,17 +4251,6 @@ struct rt6_nh {
struct list_head next;
};
-static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
-{
- struct rt6_nh *nh;
-
- list_for_each_entry(nh, rt6_nh_list, next) {
- pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
- &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
- nh->r_cfg.fc_ifindex);
- }
-}
-
static int ip6_route_info_append(struct net *net,
struct list_head *rt6_nh_list,
struct fib6_info *rt,
@@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
- ip6_print_replace_route_err(&rt6_nh_list);
+ NL_SET_ERR_MSG_MOD(extack,
+ "multipath route replace failed (check consistency of installed routes)");
err_nh = nh;
goto add_errout;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7c3505006f8e..2596ffdeebea 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
-static void udp_v6_rehash(struct sock *sk)
+void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
@@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (udp_sk(sk)->no_check6_tx)
+ }
+ if (udp_sk(sk)->no_check6_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 5730e6503cb4..20e324b6f358 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
__be32, struct udp_table *);
int udp_v6_get_port(struct sock *sk, unsigned short snum);
+void udp_v6_rehash(struct sock *sk);
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index a125aebc29e5..f35907836444 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
.recvmsg = udpv6_recvmsg,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index fa0844e2a68d..c0c72ae9df42 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
ft->dir = dir;
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
- ft->iifidx = route->tuple[dir].ifindex;
- ft->oifidx = route->tuple[!dir].ifindex;
+ ft->iifidx = other_dst->dev->ifindex;
+ ft->oifidx = dst->dev->ifindex;
ft->dst_cache = dst;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2b0a93300dd7..fb07f6cfc719 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
unsigned int s_idx = cb->args[0];
const struct nft_rule *rule;
- int rc = 1;
list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_is_active(net, rule))
@@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
table, chain, rule) < 0)
- goto out_unfinished;
+ return 1;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
(*idx)++;
}
- rc = 0;
-out_unfinished:
- cb->args[0] = *idx;
- return rc;
+ return 0;
}
static int nf_tables_dump_rules(struct sk_buff *skb,
@@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- if (ctx && ctx->chain) {
+ if (ctx && ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
@@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
}
done:
rcu_read_unlock();
+
+ cb->args[0] = idx;
return skb->len;
}
@@ -4508,6 +4506,8 @@ err6:
err5:
kfree(trans);
err4:
+ if (obj)
+ obj->use--;
kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 974525eb92df..6e6b9adf7d38 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_helper.h>
struct nft_flow_offload {
struct nft_flowtable *flowtable;
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
break;
case NFPROTO_IPV6:
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
break;
}
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
return -ENOENT;
route->tuple[dir].dst = this_dst;
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
route->tuple[!dir].dst = other_dst;
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
return 0;
}
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ const struct nf_conn_help *help;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
goto out;
}
- if (test_bit(IPS_HELPER_BIT, &ct->status))
+ help = nfct_help(ct);
+ if (help)
goto out;
if (ctinfo == IP_CT_NEW ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 435a4bdf8f89..691da853bef5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
return -EINVAL;
}
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
attrs |= 1 << type;
a[type] = nla;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d0945253f43b..3b1a78906bc0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
- if (len < reserve)
+ if (len < reserve + sizeof(struct ipv6hdr) &&
+ dev->min_header_len != dev->hard_header_len)
skb_reset_network_header(skb);
}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index a2522f9d71e2..96f2952bbdfd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
/**
- * rxrpc_kernel_check_call - Check a call's state
- * @sock: The socket the call is on
- * @call: The call to check
- * @_compl: Where to store the completion state
- * @_abort_code: Where to store any abort code
- *
- * Allow a kernel service to query the state of a call and find out the manner
- * of its termination if it has completed. Returns -EINPROGRESS if the call is
- * still going, 0 if the call finished successfully, -ECONNABORTED if the call
- * was aborted and an appropriate error if the call failed in some other way.
- */
-int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl, u32 *_abort_code)
-{
- if (call->state != RXRPC_CALL_COMPLETE)
- return -EINPROGRESS;
- smp_rmb();
- *_compl = call->completion;
- *_abort_code = call->abort_code;
- return call->error;
-}
-EXPORT_SYMBOL(rxrpc_kernel_check_call);
-
-/**
- * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
- * @sock: The socket the call is on
- * @call: The call to retry
- * @srx: The address of the peer to contact
- * @key: The security context to use (defaults to socket setting)
- *
- * Allow a kernel service to try resending a client call that failed due to a
- * network error to a new address. The Tx queue is maintained intact, thereby
- * relieving the need to re-encrypt any request data that has already been
- * buffered.
- */
-int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx, struct key *key)
-{
- struct rxrpc_conn_parameters cp;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- int ret;
-
- _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
-
- if (!key)
- key = rx->key;
- if (key && !key->payload.data[0])
- key = NULL; /* a no-security key */
-
- memset(&cp, 0, sizeof(cp));
- cp.local = rx->local;
- cp.key = key;
- cp.security_level = 0;
- cp.exclusive = false;
- cp.service_id = srx->srx_service;
-
- mutex_lock(&call->user_mutex);
-
- ret = rxrpc_prepare_call_for_retry(rx, call);
- if (ret == 0)
- ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
-
- mutex_unlock(&call->user_mutex);
- rxrpc_put_peer(cp.peer);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(rxrpc_kernel_retry_call);
-
-/**
* rxrpc_kernel_new_call_notification - Get notifications of new calls
* @sock: The socket to intercept received messages on
* @notify_new_call: Function to be called when new calls appear
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bc628acf4f4f..4b1a534d290a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -476,7 +476,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
- RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
@@ -518,6 +517,18 @@ enum rxrpc_call_state {
};
/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
* Call Tx congestion management modes.
*/
enum rxrpc_congest_mode {
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct sockaddr_rxrpc *,
struct rxrpc_call_params *, gfp_t,
unsigned int);
-int rxrpc_retry_client_call(struct rxrpc_sock *,
- struct rxrpc_call *,
- struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *,
- gfp_t);
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
bool __rxrpc_queue_call(struct rxrpc_call *);
bool rxrpc_queue_call(struct rxrpc_call *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8f1a8f85b1f9..8aa2937b069f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -325,48 +325,6 @@ error:
}
/*
- * Retry a call to a new address. It is expected that the Tx queue of the call
- * will contain data previously packaged for an old call.
- */
-int rxrpc_retry_client_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
-{
- const void *here = __builtin_return_address(0);
- int ret;
-
- /* Set up or get a connection record and set the protocol parameters,
- * including channel number and call ID.
- */
- ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
- if (ret < 0)
- goto error;
-
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, NULL);
-
- rxrpc_start_call_timer(call);
-
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
-
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- rxrpc_queue_call(call);
-
- _leave(" = 0");
- return 0;
-
-error:
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, ret);
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* Set up an incoming call. call->conn points to the connection.
* This is called in BH context and isn't allowed to fail.
*/
@@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
}
/*
- * Prepare a kernel service call for retry.
- */
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
-{
- const void *here = __builtin_return_address(0);
- int i;
- u8 last = 0;
-
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
-
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
- here, (const void *)call->flags);
-
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
- ASSERT(list_empty(&call->recvmsg_link));
-
- del_timer_sync(&call->timer);
-
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
-
- if (call->conn)
- rxrpc_disconnect_call(call);
-
- if (rxrpc_is_service_call(call) ||
- !call->tx_phase ||
- call->tx_hard_ack != 0 ||
- call->rx_hard_ack != 0 ||
- call->rx_top != 0)
- return -EINVAL;
-
- call->state = RXRPC_CALL_UNINITIALISED;
- call->completion = RXRPC_CALL_SUCCEEDED;
- call->call_id = 0;
- call->cid = 0;
- call->cong_cwnd = 0;
- call->cong_extra = 0;
- call->cong_ssthresh = 0;
- call->cong_mode = 0;
- call->cong_dup_acks = 0;
- call->cong_cumul_acks = 0;
- call->acks_lowest_nak = 0;
-
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
- last |= call->rxtx_annotations[i];
- call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
- call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
- }
-
- _leave(" = 0");
- return 0;
-}
-
-/*
* release all the calls associated with a socket
*/
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f4b666..b2adfa825363 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
write_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
- else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock);
rxrpc_see_call(call);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c5d963..46c9312085b1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
ASSERTCMP(seq, ==, call->tx_top + 1);
- if (last) {
+ if (last)
annotation |= RXRPC_TX_ANNO_LAST;
- set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
- }
/* We have to set the timestamp before queueing as the retransmit
* algorithm can see the packet as soon as we queue it.
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
call->tx_total_len -= copy;
}
+ /* check for the far side aborting the call or a network error
+ * occurring */
+ if (call->state == RXRPC_CALL_COMPLETE)
+ goto call_terminated;
+
/* add the packet to the send queue if it's now full */
if (sp->remain <= 0 ||
(msg_data_left(msg) == 0 && !more)) {
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
notify_end_tx);
skb = NULL;
}
-
- /* Check for the far side aborting the call or a network error
- * occurring. If this happens, save any packet that was under
- * construction so that in the case of a network error, the
- * call can be retried or redirected.
- */
- if (call->state == RXRPC_CALL_COMPLETE) {
- ret = call->error;
- goto out;
- }
} while (msg_data_left(msg) > 0);
success:
@@ -444,6 +437,11 @@ out:
_leave(" = %d", ret);
return ret;
+call_terminated:
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave(" = %d", call->error);
+ return call->error;
+
maybe_error:
if (copied)
goto success;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index c3b90fadaff6..8b43fe0130f7 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
};
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
+{
+ if (!p)
+ return;
+ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&p->tcft_enc_metadata->dst);
+ kfree_rcu(p, rcu);
+}
+
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
rcu_swap_protected(t->params, params_new,
lockdep_is_held(&t->tcf_lock));
spin_unlock_bh(&t->tcf_lock);
- if (params_new)
- kfree_rcu(params_new, rcu);
+ tunnel_key_release_params(params_new);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
struct tcf_tunnel_key_params *params;
params = rcu_dereference_protected(t->params, 1);
- if (params) {
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
- dst_release(&params->tcft_enc_metadata->dst);
-
- kfree_rcu(params, rcu);
- }
+ tunnel_key_release_params(params);
}
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8ce2a0507970..e2b5cb2eb34e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
- __be16 protocol = tc_skb_protocol(skb);
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
const struct tcf_proto *orig_tp = tp;
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ __be16 protocol = tc_skb_protocol(skb);
int err;
if (tp->protocol != protocol &&
@@ -1319,7 +1319,6 @@ reset:
}
tp = first_tp;
- protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dad04e710493..f6aa57fbbbaf 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *fold = *arg;
struct cls_fl_filter *fnew;
+ struct fl_flow_mask *mask;
struct nlattr **tb;
- struct fl_flow_mask mask = {};
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
- if (!tb)
+ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
+ if (!mask)
return -ENOBUFS;
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+ if (!tb) {
+ err = -ENOBUFS;
+ goto errout_mask_alloc;
+ }
+
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
fl_policy, NULL);
if (err < 0)
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, extack);
if (err)
goto errout_idr;
- err = fl_check_assign_mask(head, fnew, fold, &mask);
+ err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
goto errout_idr;
@@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
kfree(tb);
+ kfree(mask);
return 0;
errout_mask:
@@ -1405,6 +1412,8 @@ errout:
kfree(fnew);
errout_tb:
kfree(tb);
+errout_mask_alloc:
+ kfree(mask);
return err;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index b910cd5c56f7..73940293700d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int slen = 0;
+ unsigned int slen = 0, numsegs = 0;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow_queue_add(flow, segs);
sch->q.qlen++;
+ numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
b->packets++;
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch->qstats.backlog += slen;
q->avg_window_bytes += slen;
- qdisc_tree_reduce_backlog(sch, 1, len);
+ qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
consume_skb(skb);
} else {
/* not splitting */
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e689e11b6d0f..c6a502933fe7 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
int err;
err = child->ops->enqueue(skb, child, to_free);
if (err != NET_XMIT_SUCCESS)
return err;
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index cdebaed0f8cf..09b800991065 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
+ bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (cl->qdisc->q.qlen == 1) {
+ if (first) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return err;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f6f480784bc6..42471464ded3 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err;
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b18ec1f6de60..24cc220a3218 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl;
int uninitialized_var(err);
+ bool first;
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
- if (cl->qdisc->q.qlen == 1) {
- unsigned int len = qdisc_pkt_len(skb);
-
+ if (first) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 58b449490757..30f9da7e1076 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
int uninitialized_var(ret);
+ unsigned int len = qdisc_pkt_len(skb);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
htb_activate(q, cl);
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cdf68706e40f..847141cd900f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *qdisc;
int ret;
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index dc37c4ead439..29f5c4a24688 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb), gso_segs;
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
+ bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
- if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
+ if (unlikely(cl->agg->lmax < len)) {
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
- cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
- err = qfq_change_agg(sch, cl, cl->agg->class_weight,
- qdisc_pkt_len(skb));
+ cl->agg->lmax, len, cl->common.classid);
+ err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
if (err) {
cl->qstats.drops++;
return qdisc_drop(skb, sch, to_free);
}
}
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- bstats_update(&cl->bstats, skb);
- qdisc_qstats_backlog_inc(sch, skb);
+ cl->bstats.bytes += len;
+ cl->bstats.packets += gso_segs;
+ sch->qstats.backlog += len;
++sch->q.qlen;
agg = cl->agg;
/* if the queue was not empty, then done here */
- if (cl->qdisc->q.qlen != 1) {
+ if (!first) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
- == cl && cl->deficit < qdisc_pkt_len(skb))
+ == cl && cl->deficit < len)
list_move_tail(&cl->alist, &agg->active);
return err;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 942dcca09cf2..7f272a9070c5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
+ unsigned int len = qdisc_pkt_len(skb);
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return ret;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index b9ed271b7ef7..ed8e006dae85 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
- addr->a.v6.sin6_flowinfo = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
@@ -434,7 +432,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5878ae55840..4e0eeb113ef5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 1ff9768f5456..f3023bbc0b7f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -41,6 +41,9 @@ static unsigned long number_cred_unused;
static struct cred machine_cred = {
.usage = ATOMIC_INIT(1),
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ .magic = CRED_MAGIC,
+#endif
};
/*
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dc86713b32b6..1531b0219344 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
cred_len = p++;
spin_lock(&ctx->gc_seq_lock);
- req->rq_seqno = ctx->gc_seq++;
+ req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
spin_unlock(&ctx->gc_seq_lock);
+ if (req->rq_seqno == MAXSEQ)
+ goto out_expired;
*p++ = htonl((u32) RPC_GSS_VERSION);
*p++ = htonl((u32) ctx->gc_proc);
@@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
- clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ goto out_expired;
} else if (maj_stat != 0) {
- printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ task->tk_status = -EIO;
goto out_put_ctx;
}
p = xdr_encode_opaque(p, NULL, mic.len);
gss_put_ctx(ctx);
return p;
+out_expired:
+ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ task->tk_status = -EKEYEXPIRED;
out_put_ctx:
gss_put_ctx(ctx);
return NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 71d9599b5816..d7ec6132c046 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task)
xdr_buf_init(&req->rq_rcv_buf,
req->rq_rbuffer,
req->rq_rcvsize);
- req->rq_bytes_sent = 0;
p = rpc_encode_header(task);
- if (p == NULL) {
- printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
- rpc_exit(task, -EIO);
+ if (p == NULL)
return;
- }
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
@@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task)
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
/* Was the error nonfatal? */
- if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
+ switch (task->tk_status) {
+ case -EAGAIN:
+ case -ENOMEM:
rpc_delay(task, HZ >> 4);
- else
+ break;
+ case -EKEYEXPIRED:
+ task->tk_action = call_refresh;
+ break;
+ default:
rpc_exit(task, task->tk_status);
+ }
return;
}
@@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task)
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
+ if (p)
+ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 73547d17d3c6..f1ec2110efeb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt_request_need_enqueue_transmit(task, req)) {
+ req->rq_bytes_sent = 0;
spin_lock(&xprt->queue_lock);
/*
* Requests that carry congestion control credits are added
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
INIT_LIST_HEAD(&req->rq_xmit2);
goto out;
}
- } else {
+ } else if (!req->rq_seqno) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_task->tk_owner != task->tk_owner)
continue;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 7749a2bf6887..4994e75945b8 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
for (i = 0; i <= buf->rb_sc_last; i++) {
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
if (!sc)
- goto out_destroy;
+ return -ENOMEM;
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
}
return 0;
-
-out_destroy:
- rpcrdma_sendctxs_destroy(buf);
- return -ENOMEM;
}
/* The sendctx queue is not guaranteed to have a size that is a
@@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
WQ_MEM_RECLAIM | WQ_HIGHPRI,
0,
r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
- if (!buf->rb_completion_wq)
+ if (!buf->rb_completion_wq) {
+ rc = -ENOMEM;
goto out;
+ }
return 0;
out:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 13559e6a460b..7754aa3e434f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -48,6 +48,7 @@
#include <net/udp.h>
#include <net/tcp.h>
#include <linux/bvec.h>
+#include <linux/highmem.h>
#include <linux/uio.h>
#include <trace/events/sunrpc.h>
@@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
return sock_recvmsg(sock, msg, flags);
}
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+ struct bvec_iter bi = {
+ .bi_size = count,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
+ for_each_bvec(bv, bvec, bi, bi)
+ flush_dcache_page(bv.bv_page);
+}
+#else
+static inline void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+}
+#endif
+
static ssize_t
xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
@@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
seek + buf->page_base);
if (ret <= 0)
goto sock_err;
+ xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
offset += ret - buf->page_base;
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 77e4b2418f30..4ad3586da8f0 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf;
}
+static inline bool string_is_valid(char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err;
+ int len;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
return err;
name = (char *)TLV_DATA(msg->req);
+
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
if (!media)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
@@ -1208,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index efb16f69bd2c..a457c0fbbef1 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
- if (ret > 0) {
+ if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..d4de871e7d4d 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time.
*/
-static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
- u16 queue_id)
+static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
+ u16 queue_id)
{
+ if (queue_id >= max_t(unsigned int,
+ dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
+ return -EINVAL;
+
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].umem = umem;
if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].umem = umem;
+
+ return 0;
}
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
goto out_rtnl_unlock;
}
- xdp_reg_umem_at_qid(dev, umem, queue_id);
+ err = xdp_reg_umem_at_qid(dev, umem, queue_id);
+ if (err)
+ goto out_rtnl_unlock;
+
umem->dev = dev;
umem->queue_id = queue_id;
if (force_copy)
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 66ae15f27c70..db1a91dfa702 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
new file mode 100644
index 000000000000..5cd7c1d1a5d5
--- /dev/null
+++ b/samples/bpf/asm_goto_workaround.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef __ASM_GOTO_WORKAROUND_H
+#define __ASM_GOTO_WORKAROUND_H
+
+/* this will bring in asm_volatile_goto macro definition
+ * if enabled by compiler and config options.
+ */
+#include <linux/types.h>
+
+#ifdef asm_volatile_goto
+#undef asm_volatile_goto
+#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#endif
+
+#endif
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 525bff667a52..30816037036e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d)
basetarget = $(basename $(notdir $@))
###
-# filename of first prerequisite with directory and extension stripped
-baseprereq = $(basename $(notdir $<))
-
-###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
index de70b8470971..89c47f57d1ce 100644
--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
@@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
const char *sym;
rtx body;
- rtx masked_sp;
+ rtx mask, masked_sp;
/*
* Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
@@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
* produces the address of the copy of the stack canary value
* stored in struct thread_info
*/
+ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
masked_sp = gen_reg_rtx(Pmode);
emit_insn_before(gen_rtx_SET(masked_sp,
gen_rtx_AND(Pmode,
stack_pointer_rtx,
- GEN_INT(sp_mask))),
+ mask)),
insn);
SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
@@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
#define NO_GATE
#include "gcc-generate-rtl-pass.h"
+#if BUILDING_GCC_VERSION >= 9000
+static bool no(void)
+{
+ return false;
+}
+
+static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data)
+{
+ targetm.have_stack_protect_combined_set = no;
+ targetm.have_stack_protect_combined_test = no;
+}
+#endif
+
__visible int plugin_init(struct plugin_name_args *plugin_info,
struct plugin_gcc_version *version)
{
@@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
NULL, &arm_pertask_ssp_rtl_pass_info);
+#if BUILDING_GCC_VERSION >= 9000
+ register_callback(plugin_info->base_name, PLUGIN_START_UNIT,
+ arm_pertask_ssp_start_unit, NULL);
+#endif
+
return 0;
}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index c05ab001b54c..181973509a05 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $<
$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE
$(call filechk,conf_cfg)
-clean-files += conf-cfg
+clean-files += *conf-cfg
diff --git a/security/security.c b/security/security.c
index f1b8d2587639..55bc49027ba9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a50d625e7946..c1c31e33657a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key);
if (datum) {
levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level);
}
kfree(datum);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a4a1aa..02514fe558b4 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
- if (!task_is_descendant(current, child) &&
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 492f0f24e2d3..4ad1f0894d53 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c
SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+endif
+
+ifneq ($(filter -lbfd,$(LIBS)),)
CFLAGS += -DHAVE_LIBBFD_SUPPORT
SRCS += $(BFD_SRCS)
-LIBS += -lbfd -lopcodes
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index bff7ee026680..6046dcab51cc 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,15 +1,10 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index c1ab51aed99c..cb9a1993681c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -5,11 +5,6 @@
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
new file mode 100644
index 000000000000..0d18b1d1fbbc
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_sched.h
@@ -0,0 +1,1163 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_SCHED_H
+#define __LINUX_PKT_SCHED_H
+
+#include <linux/types.h>
+
+/* Logical priority bands not depending on specific packet scheduler.
+ Every scheduler will map them to real traffic classes, if it has
+ no more precise mechanism to classify packets.
+
+ These numbers have no special meaning, though their coincidence
+ with obsolete IPv6 values is not occasional :-). New IPv6 drafts
+ preferred full anarchy inspired by diffserv group.
+
+ Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
+ class, actually, as rule it will be handled with more care than
+ filler or even bulk.
+ */
+
+#define TC_PRIO_BESTEFFORT 0
+#define TC_PRIO_FILLER 1
+#define TC_PRIO_BULK 2
+#define TC_PRIO_INTERACTIVE_BULK 4
+#define TC_PRIO_INTERACTIVE 6
+#define TC_PRIO_CONTROL 7
+
+#define TC_PRIO_MAX 15
+
+/* Generic queue statistics, available for all the elements.
+ Particular schedulers may have also their private records.
+ */
+
+struct tc_stats {
+ __u64 bytes; /* Number of enqueued bytes */
+ __u32 packets; /* Number of enqueued packets */
+ __u32 drops; /* Packets dropped because of lack of resources */
+ __u32 overlimits; /* Number of throttle events when this
+ * flow goes out of allocated bandwidth */
+ __u32 bps; /* Current flow byte rate */
+ __u32 pps; /* Current flow packet rate */
+ __u32 qlen;
+ __u32 backlog;
+};
+
+struct tc_estimator {
+ signed char interval;
+ unsigned char ewma_log;
+};
+
+/* "Handles"
+ ---------
+
+ All the traffic control objects have 32bit identifiers, or "handles".
+
+ They can be considered as opaque numbers from user API viewpoint,
+ but actually they always consist of two fields: major and
+ minor numbers, which are interpreted by kernel specially,
+ that may be used by applications, though not recommended.
+
+ F.e. qdisc handles always have minor number equal to zero,
+ classes (or flows) have major equal to parent qdisc major, and
+ minor uniquely identifying class inside qdisc.
+
+ Macros to manipulate handles:
+ */
+
+#define TC_H_MAJ_MASK (0xFFFF0000U)
+#define TC_H_MIN_MASK (0x0000FFFFU)
+#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
+#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
+#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
+
+#define TC_H_UNSPEC (0U)
+#define TC_H_ROOT (0xFFFFFFFFU)
+#define TC_H_INGRESS (0xFFFFFFF1U)
+#define TC_H_CLSACT TC_H_INGRESS
+
+#define TC_H_MIN_PRIORITY 0xFFE0U
+#define TC_H_MIN_INGRESS 0xFFF2U
+#define TC_H_MIN_EGRESS 0xFFF3U
+
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
+struct tc_ratespec {
+ unsigned char cell_log;
+ __u8 linklayer; /* lower 4 bits */
+ unsigned short overhead;
+ short cell_align;
+ unsigned short mpu;
+ __u32 rate;
+};
+
+#define TC_RTAB_SIZE 1024
+
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum {
+ TCA_STAB_UNSPEC,
+ TCA_STAB_BASE,
+ TCA_STAB_DATA,
+ __TCA_STAB_MAX
+};
+
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
+
+/* FIFO section */
+
+struct tc_fifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+};
+
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
+/* PRIO section */
+
+#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
+
+struct tc_prio_qopt {
+ int bands; /* Number of bands */
+ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+};
+
+/* MULTIQ section */
+
+struct tc_multiq_qopt {
+ __u16 bands; /* Number of bands */
+ __u16 max_bands; /* Maximum number of queues */
+};
+
+/* PLUG section */
+
+#define TCQ_PLUG_BUFFER 0
+#define TCQ_PLUG_RELEASE_ONE 1
+#define TCQ_PLUG_RELEASE_INDEFINITE 2
+#define TCQ_PLUG_LIMIT 3
+
+struct tc_plug_qopt {
+ /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
+ * buffer any incoming packets
+ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
+ * to beginning of the next plug.
+ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
+ * Stop buffering packets until the next TCQ_PLUG_BUFFER
+ * command is received (just act as a pass-thru queue).
+ * TCQ_PLUG_LIMIT: Increase/decrease queue size
+ */
+ int action;
+ __u32 limit;
+};
+
+/* TBF section */
+
+struct tc_tbf_qopt {
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+ __u32 limit;
+ __u32 buffer;
+ __u32 mtu;
+};
+
+enum {
+ TCA_TBF_UNSPEC,
+ TCA_TBF_PARMS,
+ TCA_TBF_RTAB,
+ TCA_TBF_PTAB,
+ TCA_TBF_RATE64,
+ TCA_TBF_PRATE64,
+ TCA_TBF_BURST,
+ TCA_TBF_PBURST,
+ TCA_TBF_PAD,
+ __TCA_TBF_MAX,
+};
+
+#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
+
+
+/* TEQL section */
+
+/* TEQL does not require any parameters */
+
+/* SFQ section */
+
+struct tc_sfq_qopt {
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+};
+
+struct tc_sfqred_stats {
+ __u32 prob_drop; /* Early drops, below max threshold */
+ __u32 forced_drop; /* Early drops, after max threshold */
+ __u32 prob_mark; /* Marked packets, below max threshold */
+ __u32 forced_mark; /* Marked packets, after max threshold */
+ __u32 prob_mark_head; /* Marked packets, below max threshold */
+ __u32 forced_mark_head;/* Marked packets, after max threshold */
+};
+
+struct tc_sfq_qopt_v1 {
+ struct tc_sfq_qopt v0;
+ unsigned int depth; /* max number of packets per flow */
+ unsigned int headdrop;
+/* SFQRED parameters */
+ __u32 limit; /* HARD maximal flow queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+ __u32 max_P; /* probability, high resolution */
+/* SFQRED stats */
+ struct tc_sfqred_stats stats;
+};
+
+
+struct tc_sfq_xstats {
+ __s32 allot;
+};
+
+/* RED section */
+
+enum {
+ TCA_RED_UNSPEC,
+ TCA_RED_PARMS,
+ TCA_RED_STAB,
+ TCA_RED_MAX_P,
+ __TCA_RED_MAX,
+};
+
+#define TCA_RED_MAX (__TCA_RED_MAX - 1)
+
+struct tc_red_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
+#define TC_RED_ADAPTATIVE 4
+};
+
+struct tc_red_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+};
+
+/* GRED section */
+
+#define MAX_DPs 16
+
+enum {
+ TCA_GRED_UNSPEC,
+ TCA_GRED_PARMS,
+ TCA_GRED_STAB,
+ TCA_GRED_DPS,
+ TCA_GRED_MAX_P,
+ TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
+ __TCA_GRED_MAX,
+};
+
+#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
+struct tc_gred_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ __u32 DP; /* up to 2^32 DPs */
+ __u32 backlog;
+ __u32 qave;
+ __u32 forced;
+ __u32 early;
+ __u32 other;
+ __u32 pdrop;
+ __u8 Wlog; /* log(W) */
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
+ __u8 Scell_log; /* cell size for idle damping */
+ __u8 prio; /* prio of this VQ */
+ __u32 packets;
+ __u32 bytesin;
+};
+
+/* gred setup */
+struct tc_gred_sopt {
+ __u32 DPs;
+ __u32 def_DP;
+ __u8 grio;
+ __u8 flags;
+ __u16 pad1;
+};
+
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ TCA_CHOKE_MAX_P,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
+/* HTB section */
+#define TC_HTB_NUMPRIO 8
+#define TC_HTB_MAXDEPTH 8
+#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
+
+struct tc_htb_opt {
+ struct tc_ratespec rate;
+ struct tc_ratespec ceil;
+ __u32 buffer;
+ __u32 cbuffer;
+ __u32 quantum;
+ __u32 level; /* out only */
+ __u32 prio;
+};
+struct tc_htb_glob {
+ __u32 version; /* to match HTB/TC */
+ __u32 rate2quantum; /* bps->quantum divisor */
+ __u32 defcls; /* default class number */
+ __u32 debug; /* debug flags */
+
+ /* stats */
+ __u32 direct_pkts; /* count of non shaped packets */
+};
+enum {
+ TCA_HTB_UNSPEC,
+ TCA_HTB_PARMS,
+ TCA_HTB_INIT,
+ TCA_HTB_CTAB,
+ TCA_HTB_RTAB,
+ TCA_HTB_DIRECT_QLEN,
+ TCA_HTB_RATE64,
+ TCA_HTB_CEIL64,
+ TCA_HTB_PAD,
+ __TCA_HTB_MAX,
+};
+
+#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
+
+struct tc_htb_xstats {
+ __u32 lends;
+ __u32 borrows;
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
+ __s32 tokens;
+ __s32 ctokens;
+};
+
+/* HFSC section */
+
+struct tc_hfsc_qopt {
+ __u16 defcls; /* default class */
+};
+
+struct tc_service_curve {
+ __u32 m1; /* slope of the first segment in bps */
+ __u32 d; /* x-projection of the first segment in us */
+ __u32 m2; /* slope of the second segment in bps */
+};
+
+struct tc_hfsc_stats {
+ __u64 work; /* total work done */
+ __u64 rtwork; /* work done by real-time criteria */
+ __u32 period; /* current period */
+ __u32 level; /* class level in hierarchy */
+};
+
+enum {
+ TCA_HFSC_UNSPEC,
+ TCA_HFSC_RSC,
+ TCA_HFSC_FSC,
+ TCA_HFSC_USC,
+ __TCA_HFSC_MAX,
+};
+
+#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
+
+
+/* CBQ section */
+
+#define TC_CBQ_MAXPRIO 8
+#define TC_CBQ_MAXLEVEL 8
+#define TC_CBQ_DEF_EWMA 5
+
+struct tc_cbq_lssopt {
+ unsigned char change;
+ unsigned char flags;
+#define TCF_CBQ_LSS_BOUNDED 1
+#define TCF_CBQ_LSS_ISOLATED 2
+ unsigned char ewma_log;
+ unsigned char level;
+#define TCF_CBQ_LSS_FLAGS 1
+#define TCF_CBQ_LSS_EWMA 2
+#define TCF_CBQ_LSS_MAXIDLE 4
+#define TCF_CBQ_LSS_MINIDLE 8
+#define TCF_CBQ_LSS_OFFTIME 0x10
+#define TCF_CBQ_LSS_AVPKT 0x20
+ __u32 maxidle;
+ __u32 minidle;
+ __u32 offtime;
+ __u32 avpkt;
+};
+
+struct tc_cbq_wrropt {
+ unsigned char flags;
+ unsigned char priority;
+ unsigned char cpriority;
+ unsigned char __reserved;
+ __u32 allot;
+ __u32 weight;
+};
+
+struct tc_cbq_ovl {
+ unsigned char strategy;
+#define TC_CBQ_OVL_CLASSIC 0
+#define TC_CBQ_OVL_DELAY 1
+#define TC_CBQ_OVL_LOWPRIO 2
+#define TC_CBQ_OVL_DROP 3
+#define TC_CBQ_OVL_RCLASSIC 4
+ unsigned char priority2;
+ __u16 pad;
+ __u32 penalty;
+};
+
+struct tc_cbq_police {
+ unsigned char police;
+ unsigned char __res1;
+ unsigned short __res2;
+};
+
+struct tc_cbq_fopt {
+ __u32 split;
+ __u32 defmap;
+ __u32 defchange;
+};
+
+struct tc_cbq_xstats {
+ __u32 borrows;
+ __u32 overactions;
+ __s32 avgidle;
+ __s32 undertime;
+};
+
+enum {
+ TCA_CBQ_UNSPEC,
+ TCA_CBQ_LSSOPT,
+ TCA_CBQ_WRROPT,
+ TCA_CBQ_FOPT,
+ TCA_CBQ_OVL_STRATEGY,
+ TCA_CBQ_RATE,
+ TCA_CBQ_RTAB,
+ TCA_CBQ_POLICE,
+ __TCA_CBQ_MAX,
+};
+
+#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
+
+/* dsmark section */
+
+enum {
+ TCA_DSMARK_UNSPEC,
+ TCA_DSMARK_INDICES,
+ TCA_DSMARK_DEFAULT_INDEX,
+ TCA_DSMARK_SET_TC_INDEX,
+ TCA_DSMARK_MASK,
+ TCA_DSMARK_VALUE,
+ __TCA_DSMARK_MAX,
+};
+
+#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
+
+/* ATM section */
+
+enum {
+ TCA_ATM_UNSPEC,
+ TCA_ATM_FD, /* file/socket descriptor */
+ TCA_ATM_PTR, /* pointer to descriptor - later */
+ TCA_ATM_HDR, /* LL header */
+ TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
+ TCA_ATM_ADDR, /* PVC address (for output only) */
+ TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
+ __TCA_ATM_MAX,
+};
+
+#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
+
+/* Network emulator */
+
+enum {
+ TCA_NETEM_UNSPEC,
+ TCA_NETEM_CORR,
+ TCA_NETEM_DELAY_DIST,
+ TCA_NETEM_REORDER,
+ TCA_NETEM_CORRUPT,
+ TCA_NETEM_LOSS,
+ TCA_NETEM_RATE,
+ TCA_NETEM_ECN,
+ TCA_NETEM_RATE64,
+ TCA_NETEM_PAD,
+ TCA_NETEM_LATENCY64,
+ TCA_NETEM_JITTER64,
+ TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
+ __TCA_NETEM_MAX,
+};
+
+#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
+
+struct tc_netem_qopt {
+ __u32 latency; /* added delay (us) */
+ __u32 limit; /* fifo limit (packets) */
+ __u32 loss; /* random packet loss (0=none ~0=100%) */
+ __u32 gap; /* re-ordering gap (0 for none) */
+ __u32 duplicate; /* random packet dup (0=none ~0=100%) */
+ __u32 jitter; /* random jitter in latency (us) */
+};
+
+struct tc_netem_corr {
+ __u32 delay_corr; /* delay correlation */
+ __u32 loss_corr; /* packet loss correlation */
+ __u32 dup_corr; /* duplicate correlation */
+};
+
+struct tc_netem_reorder {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_corrupt {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_rate {
+ __u32 rate; /* byte/s */
+ __s32 packet_overhead;
+ __u32 cell_size;
+ __s32 cell_overhead;
+};
+
+struct tc_netem_slot {
+ __s64 min_delay; /* nsec */
+ __s64 max_delay;
+ __s32 max_packets;
+ __s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
+};
+
+enum {
+ NETEM_LOSS_UNSPEC,
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
+ __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probabilities for 4 state model */
+struct tc_netem_gimodel {
+ __u32 p13;
+ __u32 p31;
+ __u32 p32;
+ __u32 p14;
+ __u32 p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+ __u32 p;
+ __u32 r;
+ __u32 h;
+ __u32 k1;
+};
+
+#define NETEM_DIST_SCALE 8192
+#define NETEM_DIST_MAX 16384
+
+/* DRR */
+
+enum {
+ TCA_DRR_UNSPEC,
+ TCA_DRR_QUANTUM,
+ __TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats {
+ __u32 deficit;
+};
+
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+enum {
+ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
+ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
+ __TC_MQPRIO_HW_OFFLOAD_MAX
+};
+
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
+
+enum {
+ TC_MQPRIO_MODE_DCB,
+ TC_MQPRIO_MODE_CHANNEL,
+ __TC_MQPRIO_MODE_MAX
+};
+
+#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
+
+enum {
+ TC_MQPRIO_SHAPER_DCB,
+ TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
+ __TC_MQPRIO_SHAPER_MAX
+};
+
+#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
+#define TC_MQPRIO_F_MODE 0x1
+#define TC_MQPRIO_F_SHAPER 0x2
+#define TC_MQPRIO_F_MIN_RATE 0x4
+#define TC_MQPRIO_F_MAX_RATE 0x8
+
+enum {
+ TCA_MQPRIO_UNSPEC,
+ TCA_MQPRIO_MODE,
+ TCA_MQPRIO_SHAPER,
+ TCA_MQPRIO_MIN_RATE64,
+ TCA_MQPRIO_MAX_RATE64,
+ __TCA_MQPRIO_MAX,
+};
+
+#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
+
+/* SFB */
+
+enum {
+ TCA_SFB_UNSPEC,
+ TCA_SFB_PARMS,
+ __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+ __u32 rehash_interval; /* delay between hash move, in ms */
+ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+ __u32 max; /* max len of qlen_min */
+ __u32 bin_size; /* maximum queue length per bin */
+ __u32 increment; /* probability increment, (d1 in Blue) */
+ __u32 decrement; /* probability decrement, (d2 in Blue) */
+ __u32 limit; /* max SFB queue length */
+ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
+ __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+ __u32 earlydrop;
+ __u32 penaltydrop;
+ __u32 bucketdrop;
+ __u32 queuedrop;
+ __u32 childdrop; /* drops in child qdisc */
+ __u32 marked;
+ __u32 maxqlen;
+ __u32 maxprob;
+ __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
+/* QFQ */
+enum {
+ TCA_QFQ_UNSPEC,
+ TCA_QFQ_WEIGHT,
+ TCA_QFQ_LMAX,
+ __TCA_QFQ_MAX
+};
+
+#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
+
+struct tc_qfq_stats {
+ __u32 weight;
+ __u32 lmax;
+};
+
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count; /* how many drops we've done since the last time we
+ * entered dropping state
+ */
+ __u32 lastcount; /* count at entry to dropping state */
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __s32 drop_next; /* time to drop next packet */
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
+ __u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
+};
+
+/* FQ_CODEL */
+
+enum {
+ TCA_FQ_CODEL_UNSPEC,
+ TCA_FQ_CODEL_TARGET,
+ TCA_FQ_CODEL_LIMIT,
+ TCA_FQ_CODEL_INTERVAL,
+ TCA_FQ_CODEL_ECN,
+ TCA_FQ_CODEL_FLOWS,
+ TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
+ TCA_FQ_CODEL_MEMORY_LIMIT,
+ __TCA_FQ_CODEL_MAX
+};
+
+#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
+
+enum {
+ TCA_FQ_CODEL_XSTATS_QDISC,
+ TCA_FQ_CODEL_XSTATS_CLASS,
+};
+
+struct tc_fq_codel_qd_stats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 drop_overlimit; /* number of time max qdisc
+ * packet limit was hit
+ */
+ __u32 ecn_mark; /* number of packets we ECN marked
+ * instead of being dropped
+ */
+ __u32 new_flow_count; /* number of time packets
+ * created a 'new flow'
+ */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
+ __u32 memory_usage; /* in bytes */
+ __u32 drop_overmemory;
+};
+
+struct tc_fq_codel_cl_stats {
+ __s32 deficit;
+ __u32 ldelay; /* in-queue delay seen by most recently
+ * dequeued packet
+ */
+ __u32 count;
+ __u32 lastcount;
+ __u32 dropping;
+ __s32 drop_next;
+};
+
+struct tc_fq_codel_xstats {
+ __u32 type;
+ union {
+ struct tc_fq_codel_qd_stats qdisc_stats;
+ struct tc_fq_codel_cl_stats class_stats;
+ };
+};
+
+/* FQ */
+
+enum {
+ TCA_FQ_UNSPEC,
+
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
+
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
+
+ TCA_FQ_QUANTUM, /* RR quantum */
+
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
+
+ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
+ __TCA_FQ_MAX
+};
+
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
+};
+
+/* Heavy-Hitter Filter */
+
+enum {
+ TCA_HHF_UNSPEC,
+ TCA_HHF_BACKLOG_LIMIT,
+ TCA_HHF_QUANTUM,
+ TCA_HHF_HH_FLOWS_LIMIT,
+ TCA_HHF_RESET_TIMEOUT,
+ TCA_HHF_ADMIT_BYTES,
+ TCA_HHF_EVICT_TIMEOUT,
+ TCA_HHF_NON_HH_WEIGHT,
+ __TCA_HHF_MAX
+};
+
+#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
+
+struct tc_hhf_xstats {
+ __u32 drop_overlimit; /* number of times max qdisc packet limit
+ * was hit
+ */
+ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
+ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
+ __u32 hh_cur_count; /* number of current heavy-hitters */
+};
+
+/* PIE */
+enum {
+ TCA_PIE_UNSPEC,
+ TCA_PIE_TARGET,
+ TCA_PIE_LIMIT,
+ TCA_PIE_TUPDATE,
+ TCA_PIE_ALPHA,
+ TCA_PIE_BETA,
+ TCA_PIE_ECN,
+ TCA_PIE_BYTEMODE,
+ __TCA_PIE_MAX
+};
+#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
+
+struct tc_pie_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+};
+
+/* CBS */
+struct tc_cbs_qopt {
+ __u8 offload;
+ __u8 _pad[3];
+ __s32 hicredit;
+ __s32 locredit;
+ __s32 idleslope;
+ __s32 sendslope;
+};
+
+enum {
+ TCA_CBS_UNSPEC,
+ TCA_CBS_PARMS,
+ __TCA_CBS_MAX,
+};
+
+#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
+
+/* TAPRIO */
+enum {
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_SCHED_ENTRY]
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+ TCA_TAPRIO_SCHED_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY,
+ __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+ TCA_TAPRIO_PAD,
+ __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
+#endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3caaa3428774..88cbd110ae58 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size);
}
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+{
+ int fd;
+
+ do {
+ fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+ } while (fd < 0 && errno == EAGAIN);
+
+ return fd;
+}
+
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
return fd;
@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
break;
}
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
goto done;
@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_size = log_buf_sz;
attr.log_level = 1;
log_buf[0] = 0;
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
done:
free(finfo);
free(linfo);
@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
- return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ return sys_bpf_prog_load(&attr, sizeof(attr));
}
int bpf_map_update_elem(int fd, const void *key, const void *value,
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 1076393e6f43..e18a3556f5e3 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -63,7 +63,8 @@ static const char *reg_names[] = {
[PERF_REG_POWERPC_TRAP] = "trap",
[PERF_REG_POWERPC_DAR] = "dar",
[PERF_REG_POWERPC_DSISR] = "dsisr",
- [PERF_REG_POWERPC_SIER] = "sier"
+ [PERF_REG_POWERPC_SIER] = "sier",
+ [PERF_REG_POWERPC_MMCRA] = "mmcra"
};
static inline const char *perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 07fcd977d93e..34d5134681d9 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(dar, PERF_REG_POWERPC_DAR),
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
SMPL_REG(sier, PERF_REG_POWERPC_SIER),
+ SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
SMPL_REG_END
};
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 70229de510f5..41ab7a3668b3 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \
test_xdp_vlan.sh
TEST_PROGS_EXTENDED := with_addr.sh \
+ with_tunnels.sh \
tcp_client.py \
tcp_server.py
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 126fc624290d..25f0083a9b2e 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void)
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto out;
@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void)
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void)
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return;
@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void)
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 94fdbf215c14..c4cf6e6d800e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -25,6 +25,7 @@ ALL_TESTS="
lag_unlink_slaves_test
lag_dev_deletion_test
vlan_interface_uppers_test
+ bridge_extern_learn_test
devlink_reload_test
"
NUM_NETIFS=2
@@ -541,6 +542,25 @@ vlan_interface_uppers_test()
ip link del dev br0
}
+bridge_extern_learn_test()
+{
+ # Test that externally learned entries added from user space are
+ # marked as offloaded
+ RET=0
+
+ ip link add name br0 type bridge
+ ip link set dev $swp1 master br0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
+
+ bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
+ check_err $? "fdb entry not marked as offloaded when should"
+
+ log_test "externally learned fdb entry"
+
+ ip link del dev br0
+}
+
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index 04c6431b2bd8..b90dff8d3a94 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
NUM_NETIFS=4
CHECK_TC="yes"
source lib.sh
@@ -109,6 +109,38 @@ vlan_deletion()
ping_ipv6
}
+extern_learn()
+{
+ local mac=de:ad:be:ef:13:37
+ local ageing_time
+
+ # Test that externally learned FDB entries can roam, but not age out
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "Did not find FDB entry when should"
+
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
+ # was not aged out
+ ageing_time=$(bridge_ageing_time_get br0)
+ sleep $((ageing_time + 10))
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry was aged out when should not"
+
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
+
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry did not roam when should"
+
+ log_test "Externally learned FDB entry - ageing & roaming"
+
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 2e563d17cf0c..d1bbafb16f47 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
cm->cmsg_type == IP_RECVERR) ||
(cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_RECVERR) ||
- (cm->cmsg_level = SOL_PACKET &&
+ (cm->cmsg_level == SOL_PACKET &&
cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea0219617..0da3545cabdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -41,7 +41,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -89,7 +89,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -137,7 +137,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
"matchCount": "0",
"teardown": []
},
@@ -207,7 +207,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -231,7 +231,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -255,7 +255,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -279,7 +279,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -303,7 +303,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -327,7 +327,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -351,7 +351,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -375,7 +375,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
"matchCount": "0",
"teardown": []
},
@@ -397,7 +397,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -421,7 +421,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -445,7 +445,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -469,7 +469,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -493,7 +493,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -517,7 +517,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -541,7 +541,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -565,7 +565,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -589,7 +589,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
"matchCount": "0",
"teardown": []
},
@@ -611,7 +611,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -635,7 +635,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -659,7 +659,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 11",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -683,7 +683,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -707,7 +707,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -731,7 +731,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -739,7 +739,7 @@
},
{
"id": "fac3",
- "name": "Create valid ife encode action with index at 32-bit maximnum",
+ "name": "Create valid ife encode action with index at 32-bit maximum",
"category": [
"actions",
"ife"
@@ -755,7 +755,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4294967295",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -779,7 +779,7 @@
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -803,7 +803,7 @@
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -827,7 +827,7 @@
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -851,7 +851,7 @@
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -875,7 +875,7 @@
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -899,7 +899,7 @@
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -923,7 +923,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4294967295999",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
"matchCount": "0",
"teardown": []
},
@@ -945,7 +945,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -967,7 +967,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -991,7 +991,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -1013,7 +1013,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
},
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d894e436..e7e15a7336b6 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
]
},
{
- "id": "ba4e",
- "name": "Add tunnel_key set action with missing mandatory id parameter",
- "category": [
- "actions",
- "tunnel_key"
- ],
- "setup": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ],
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
- "expExitCode": "255",
- "verifyCmd": "$TC actions list action tunnel_key",
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
- "matchCount": "0",
- "teardown": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ]
- },
- {
"id": "a5e0",
"name": "Add tunnel_key set action with invalid src_ip parameter",
"category": [
@@ -634,7 +605,7 @@
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 4",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"