summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-12-21 16:46:08 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-12-21 16:46:08 +0100
commitcd6a22310ec2a70092e136d0cd65bb77c1502521 (patch)
treec01fa788b27b240c7b426d7f329d92bd58c7b8f5 /drivers
parent1e12a521d6917004f8b95a3b5864b92edc2694c8 (diff)
parent177c459b08a34dcf004aa9a4c1f1d8be682ff3af (diff)
downloadlinux-cd6a22310ec2a70092e136d0cd65bb77c1502521.tar.bz2
Merge USB 4.20-rc8 mergepoint into usb-next
We need the USB changes in here for additional patches to be able to apply cleanly. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c78
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c20
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c54
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c44
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c591
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-ite.c1
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-thin.c72
-rw-r--r--drivers/md/dm-zoned-target.c122
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/Kconfig13
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c44
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c13
-rw-r--r--drivers/media/media-device.c4
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c13
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c4
-rw-r--r--drivers/mmc/core/block.c15
-rw-r--r--drivers/mmc/core/mmc.c24
-rw-r--r--drivers/mmc/host/omap.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-omap.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c8
-rw-r--r--drivers/mmc/host/sdhci.c22
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c48
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c503
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c28
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c4
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/usb/hso.c18
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c33
-rw-r--r--drivers/net/vxlan.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c96
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c1
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c4
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/st/stm_thermal.c12
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c23
-rw-r--r--drivers/video/backlight/pwm_bl.c41
150 files changed, 2075 insertions, 1074 deletions
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index ef1b267cb058..64da032bb9ed 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[])
- { "gpll0_sleep_clk_src" },
+ { "cxo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 7725b6ee14ef..59bb67d5a7ce 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -153,6 +153,11 @@ struct chtls_dev {
unsigned int cdev_state;
};
+struct chtls_listen {
+ struct chtls_dev *cdev;
+ struct sock *sk;
+};
+
struct chtls_hws {
struct sk_buff_head sk_recv_queue;
u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
u16 resv2;
u32 delack_mode;
u32 delack_seq;
+ u32 snd_win;
+ u32 rcv_win;
void *passive_reap_next; /* placeholder for passive */
struct chtls_hws tlshws;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 20209e29f814..931b96c220af 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
+#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
return mtu_idx;
}
-static unsigned int select_rcv_wnd(struct chtls_sock *csk)
-{
- unsigned int rcvwnd;
- unsigned int wnd;
- struct sock *sk;
-
- sk = csk->sk;
- wnd = tcp_full_space(sk);
-
- if (wnd < MIN_RCV_WND)
- wnd = MIN_RCV_WND;
-
- rcvwnd = MAX_RCV_WND;
-
- csk_set_flag(csk, CSK_UPDATE_RCV_WND);
- return min(wnd, rcvwnd);
-}
-
static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
{
int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
req);
opt0 = TCAM_BYPASS_F |
- WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
+ WND_SCALE_V(RCV_WSCALE(tp)) |
MSS_IDX_V(csk->mtu_idx) |
L2T_IDX_V(csk->l2t_entry->idx) |
NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
+static void chtls_set_tcp_window(struct chtls_sock *csk)
+{
+ struct net_device *ndev = csk->egress_dev;
+ struct port_info *pi = netdev_priv(ndev);
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+#define CHTLS_10G_RCVWIN (256 * 1024)
+ csk->rcv_win = CHTLS_10G_RCVWIN;
+ if (scale)
+ csk->rcv_win *= scale;
+#define CHTLS_10G_SNDWIN (256 * 1024)
+ csk->snd_win = CHTLS_10G_SNDWIN;
+ if (scale)
+ csk->snd_win *= scale;
+}
+
static struct sock *chtls_recv_sock(struct sock *lsk,
struct request_sock *oreq,
void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->port_id = port_id;
csk->egress_dev = ndev;
csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ chtls_set_tcp_window(csk);
+ tp->rcv_wnd = csk->rcv_win;
+ csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1076,9 +1081,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
cxgb4_port_viid(ndev));
- tp->rcv_wnd = select_rcv_wnd(csk);
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- WSCALE_OK(tp),
+ sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling,
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1130,6 +1135,7 @@ static void chtls_pass_accept_request(struct sock *sk,
struct cpl_t5_pass_accept_rpl *rpl;
struct cpl_pass_accept_req *req;
struct listen_ctx *listen_ctx;
+ struct vlan_ethhdr *vlan_eh;
struct request_sock *oreq;
struct sk_buff *reply_skb;
struct chtls_sock *csk;
@@ -1142,6 +1148,10 @@ static void chtls_pass_accept_request(struct sock *sk,
unsigned int stid;
unsigned int len;
unsigned int tid;
+ bool th_ecn, ect;
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ u16 eth_hdr_len;
+ bool ecn_ok;
req = cplhdr(skb) + RSS_HDR;
tid = GET_TID(req);
@@ -1180,24 +1190,40 @@ static void chtls_pass_accept_request(struct sock *sk,
oreq->mss = 0;
oreq->ts_recent = 0;
- eh = (struct ethhdr *)(req + 1);
- iph = (struct iphdr *)(eh + 1);
+ eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
+ if (eth_hdr_len == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ network_hdr = (void *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ network_hdr = (void *)(vlan_eh + 1);
+ }
if (iph->version != 0x4)
goto free_oreq;
- network_hdr = (void *)(eh + 1);
tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)req);
tcp_rsk(oreq)->tfo_listener = false;
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
chtls_set_req_port(oreq, tcph->source, tcph->dest);
- inet_rsk(oreq)->ecn_ok = 0;
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
- if (req->tcpopt.wsf <= 14) {
+ ip_dsfield = ipv4_get_dsfield(iph);
+ if (req->tcpopt.wsf <= 14 &&
+ sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+ th_ecn = tcph->ece && tcph->cwr;
+ if (th_ecn) {
+ ect = !INET_ECN_is_not_ect(ip_dsfield);
+ ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
+ inet_rsk(oreq)->ecn_ok = 1;
+ }
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index afebbd87c4aa..18f553fcc167 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
req_wr->lsodisable_to_flags =
htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
- FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+ TX_URG_V(skb_urgent(skb)) |
T6_TX_FORCE_F | wr_ulp_mode_force |
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
skb_queue_empty(&csk->txq)));
@@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
FW_OFLD_TX_DATA_WR_SHOVE_F);
req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
- FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
- FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
- (sk, CSK_TX_MORE_DATA)) &&
- skb_queue_empty(&csk->txq)));
+ TX_URG_V(skb_urgent(skb)) |
+ TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+ skb_queue_empty(&csk->txq)));
req->plen = htonl(len);
}
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int mss, flags, err;
int recordsz = 0;
int copied = 0;
- int hdrlen = 0;
long timeo;
lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
recordsz = tls_header_read(&hdr, &msg->msg_iter);
size -= TLS_HEADER_LENGTH;
- hdrlen += TLS_HEADER_LENGTH;
+ copied += TLS_HEADER_LENGTH;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = hdr.type;
if (skb)
@@ -1083,10 +1081,8 @@ new_buf:
int off = TCP_OFF(sk);
bool merge;
- if (!page)
- goto wait_for_memory;
-
- pg_size <<= compound_order(page);
+ if (page)
+ pg_size <<= compound_order(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@@ -1187,7 +1183,7 @@ out:
chtls_tcp_push(sk, flags);
done:
release_sock(sk);
- return copied + hdrlen;
+ return copied;
do_fault:
if (!skb->len) {
__skb_unlink(skb, &csk->txq);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f472c51abe56..563f8fe7686a 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
static int listen_notify_handler(struct notifier_block *this,
unsigned long event, void *data)
{
- struct chtls_dev *cdev;
- struct sock *sk;
- int ret;
+ struct chtls_listen *clisten;
+ int ret = NOTIFY_DONE;
- sk = data;
- ret = NOTIFY_DONE;
+ clisten = (struct chtls_listen *)data;
switch (event) {
case CHTLS_LISTEN_START:
+ ret = chtls_listen_start(clisten->cdev, clisten->sk);
+ kfree(clisten);
+ break;
case CHTLS_LISTEN_STOP:
- mutex_lock(&cdev_list_lock);
- list_for_each_entry(cdev, &cdev_list, list) {
- if (event == CHTLS_LISTEN_START)
- ret = chtls_listen_start(cdev, sk);
- else
- chtls_listen_stop(cdev, sk);
- }
- mutex_unlock(&cdev_list_lock);
+ chtls_listen_stop(clisten->cdev, clisten->sk);
+ kfree(clisten);
break;
}
return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static int chtls_start_listen(struct sock *sk)
+static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
+ struct chtls_listen *clisten;
int err;
if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
return -EADDRNOTAVAIL;
sk->sk_backlog_rcv = listen_backlog_rcv;
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return -ENOMEM;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
mutex_lock(&notify_mutex);
err = raw_notifier_call_chain(&listen_notify_list,
- CHTLS_LISTEN_START, sk);
+ CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
return err;
}
-static void chtls_stop_listen(struct sock *sk)
+static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
{
+ struct chtls_listen *clisten;
+
if (sk->sk_protocol != IPPROTO_TCP)
return;
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
mutex_lock(&notify_mutex);
raw_notifier_call_chain(&listen_notify_list,
- CHTLS_LISTEN_STOP, sk);
+ CHTLS_LISTEN_STOP, clisten);
mutex_unlock(&notify_mutex);
}
@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
if (sk->sk_state == TCP_LISTEN)
- return chtls_start_listen(sk);
+ return chtls_start_listen(cdev, sk);
return 0;
}
static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
if (sk->sk_state == TCP_LISTEN)
- chtls_stop_listen(sk);
+ chtls_stop_listen(cdev, sk);
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+ int i;
+
+ tls_unregister_device(&cdev->tlsdev);
+ kvfree(cdev->kmap.addr);
+ idr_destroy(&cdev->hwtid_idr);
+ for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+ kfree_skb(cdev->rspq_skb_cache[i]);
+ kfree(cdev->lldi);
+ kfree_skb(cdev->askb);
+ kfree(cdev);
+}
+
+static inline void chtls_dev_release(struct kref *kref)
+{
+ struct chtls_dev *cdev;
+ struct tls_device *dev;
+
+ dev = container_of(kref, struct tls_device, kref);
+ cdev = to_chtls_dev(dev);
+ chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
- tls_register_device(&cdev->tlsdev);
+ tlsdev->release = chtls_dev_release;
+ kref_init(&tlsdev->kref);
+ tls_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
-static void chtls_unregister_dev(struct chtls_dev *cdev)
-{
- tls_unregister_device(&cdev->tlsdev);
-}
-
static void process_deferq(struct work_struct *task_param)
{
struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
return NULL;
}
-static void chtls_free_uld(struct chtls_dev *cdev)
-{
- int i;
-
- chtls_unregister_dev(cdev);
- kvfree(cdev->kmap.addr);
- idr_destroy(&cdev->hwtid_idr);
- for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
- kfree_skb(cdev->rspq_skb_cache[i]);
- kfree(cdev->lldi);
- kfree_skb(cdev->askb);
- kfree(cdev);
-}
-
static void chtls_free_all_uld(void)
{
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
- if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
- chtls_free_uld(cdev);
+ if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
+ list_del(&cdev->list);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+ }
}
mutex_unlock(&cdev_mutex);
}
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
mutex_lock(&cdev_mutex);
list_del(&cdev->list);
mutex_unlock(&cdev_mutex);
- chtls_free_uld(cdev);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c697b205..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ } else {
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ }
break;
case CHIP_VEGAM:
strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..0acc8dee2cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..74b611e8a1b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea7e354..e4ded890b1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x6869, &vega10_device_info }, /* Vega10 */
+ { 0x686A, &vega10_device_info }, /* Vega10 */
+ { 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x686D, &vega10_device_info }, /* Vega10 */
+ { 0x686E, &vega10_device_info }, /* Vega10 */
+ { 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3367dd30cdd0..3b7fce5d7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
- data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+ data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..c1a99dfe4913 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d3824337b..a1e0ac9ae248 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ /* Apply avfs cks-off voltages to avoid the overshoot
+ * when switching to the highest sclk frequency
+ */
+ if (data->apply_avfs_cks_off_voltage)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f98f49..a6edd5df33b0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
- plane->tiled = !!(val & DISPPLANE_TILED);
+ plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..47062ee979cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..872a2e159a5f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -2148,6 +2150,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..6ae9a6080cc8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
}
- intel_gt_workarounds_apply(dev_priv);
+ intel_gt_apply_workarounds(dev_priv);
i915_gem_init_swizzling(dev_priv);
@@ -5500,6 +5500,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5669,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d4fac09095f8..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 6;
+ len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
-
- /* And again for good measure (blb/pnv) */
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
}
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3eb33e000d6f..db4128d6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..76b5f94ea6cb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->wa_list);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..58d1d3d47dd3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
+ *
+ * Furthermore, Braswell, at least, wants a full mb to be sure that
+ * the writes are coherent in memory (visible to the GPU) prior to
+ * execution, and not just visible to other CPUs (as is the result of
+ * wmb).
*/
- wmb();
+ mb();
return ce->lrc_desc;
}
@@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
+ i915_scratch_offset(engine->i915)
+ 2 * CACHELINE_BYTES);
}
@@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..1f8d2a66c791 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
+ unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
-
+ num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
+ if (mode & EMIT_FLUSH)
+ num_store_dw = 4;
- cs = intel_ring_begin(rq, 2);
+ cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = 0;
+ }
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
intel_ring_advance(rq, cs);
return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..767a7192c969 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -440,7 +441,7 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list wa_list;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..6e580891db96 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,6 +48,20 @@
* - Public functions to init or apply the given workaround type.
*/
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
+{
+ wal->name = name;
+}
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->count, wal->name);
+}
+
static void wa_add(struct drm_i915_private *i915,
i915_reg_t reg, const u32 mask, const u32 val)
{
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ const unsigned int grow = 1 << 4;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
+
+ wal->list[wal->count++] = *wa;
+}
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ wal_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ wal_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROADWELL(i915))
+ return;
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
+ return;
+
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
}
struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
whitelist_apply(engine, whitelist_build(engine, &w));
}
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* Wa_1604302699:icl */
+ wa_write_or(wal,
+ GEN10_L3_CHICKEN_MODE_REGISTER,
+ GEN11_I2M_WRITE_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..979695a53964 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,35 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
+#include <linux/slab.h>
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
+
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
int intel_ctx_workarounds_emit(struct i915_request *rq);
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
-
+ if (dsi->bridge) {
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ goto err_encoder_cleanup;
+ }
+ } else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6cbbae3f438b..db1bf7f88c1f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+}
+
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b2baf6e0e0d..d2928d43f29a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
+ err = nouveau_drm_device_init(drm);
+ if (err)
+ goto err_put;
+
platform_set_drvdata(pdev, drm);
return drm;
+err_put:
+ drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 941f35233b1f..5864cb452c5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
- rockchip_drm_platform_remove(pdev);
-}
-
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
- .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b958d67..d7a2dfb8ee9b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..aca974b14b55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -606,6 +606,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..f88247046721 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
drm_global_item_unref(&dev_priv->mem_global_ref);
}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..f116f092e00b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ed35c9a9a110..27519eb8ee63 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -17,6 +17,9 @@
#ifndef HID_IDS_H_FILE
#define HID_IDS_H_FILE
+#define USB_VENDOR_ID_258A 0x258a
+#define USB_DEVICE_ID_258A_6A88 0x6a88
+
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
@@ -941,6 +944,10 @@
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
+#define USB_VENDOR_ID_RETROUSB 0xf000
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
+
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 1882a4ab0f29..98b059d79bc8 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c85a79986b6a..94088c0ed68a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 25d43c8f1c2a..558de0b9895c 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
struct net_device *cookie_ndev = cookie;
bool match = false;
+ if (!rdma_ndev)
+ return false;
+
rcu_read_lock();
if (netif_is_bond_master(cookie_ndev) &&
rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9b20479dc710..7e6d70936c63 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
/* allocate space for the counter values */
- dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+ dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+ GFP_KERNEL);
if (!dd->cntrs)
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 1401b6ea4a28..2b882347d0c2 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -155,6 +155,8 @@ struct hfi1_ib_stats {
extern struct hfi1_ib_stats hfi1_stats;
extern const struct pci_error_handlers hfi1_pci_err_handler;
+extern int num_driver_cntrs;
+
/*
* First-cut criterion for "device is active" is
* two thousand dwords combined Tx, Rx traffic per
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 6f3bc4dab858..1a016248039f 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
default:
break;
}
+
+ /*
+ * System latency between send and schedule is large enough that
+ * forcing call_send to true for piothreshold packets is necessary.
+ */
+ if (wqe->length <= piothreshold)
+ *call_send = true;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 48e11e510358..a365089a9305 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static const char **dev_cntr_names;
static const char **port_cntr_names;
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
static int cntr_names_initialized;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 61aab7c0c513..45c421c87100 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
err = uverbs_get_flags32(&access, attrs,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_SUPPORTED);
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 2cc3d69ab6f6..4dc6cc640ce0 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -506,14 +506,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
+ int npages = 0, current_seq, page_shift, ret, np;
+ bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
- int npages = 0, page_shift, np;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
- int current_seq;
size_t size;
- int ret;
if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
@@ -521,7 +520,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
-
+ implicit = true;
} else {
odp = odp_mr;
}
@@ -600,7 +599,7 @@ next_mr:
out:
if (ret == -EAGAIN) {
- if (mr->parent || !odp->dying) {
+ if (implicit || !odp->dying) {
unsigned long timeout =
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 5936de71883f..6fc93834da44 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
bool dirty_flag;
*result = true;
+ if (from_cblock(cmd->cache_blocks) == 0)
+ /* Nothing to do */
+ return 0;
+
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 0bd8d498b3b9..dadd9696340c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
struct dm_thin_new_mapping;
/*
- * The pool runs in 4 modes. Ordered in degraded order for comparisons.
+ * The pool runs in various modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
@@ -282,9 +282,38 @@ struct pool {
mempool_t mapping_pool;
};
-static enum pool_mode get_pool_mode(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
+static enum pool_mode get_pool_mode(struct pool *pool)
+{
+ return pool->pf.mode;
+}
+
+static void notify_of_pool_mode_change(struct pool *pool)
+{
+ const char *descs[] = {
+ "write",
+ "out-of-data-space",
+ "read-only",
+ "read-only",
+ "fail"
+ };
+ const char *extra_desc = NULL;
+ enum pool_mode mode = get_pool_mode(pool);
+
+ if (mode == PM_OUT_OF_DATA_SPACE) {
+ if (!pool->pf.error_if_no_space)
+ extra_desc = " (queue IO)";
+ else
+ extra_desc = " (error IO)";
+ }
+
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s%s mode",
+ dm_device_name(pool->pool_md),
+ descs[(int)mode], extra_desc ? : "");
+}
+
/*
* Target context for a pool.
*/
@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
-
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true;
- notify_of_pool_mode_change_to_oods(pool);
+ notify_of_pool_mode_change(pool);
error_retry_list_with_code(pool, BLK_STS_NOSPC);
}
}
@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
/*----------------------------------------------------------------*/
-static enum pool_mode get_pool_mode(struct pool *pool)
-{
- return pool->pf.mode;
-}
-
-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
-{
- dm_table_event(pool->ti->table);
- DMINFO("%s: switching pool to %s mode",
- dm_device_name(pool->pool_md), new_mode);
-}
-
-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
-{
- if (!pool->pf.error_if_no_space)
- notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
- else
- notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
-}
-
static bool passdown_enabled(struct pool_c *pt)
{
return pt->adjusted_pf.discard_passdown;
@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
switch (new_mode) {
case PM_FAIL:
- if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (!is_read_only_pool_mode(old_mode))
- notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_bio_success;
@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* alarming rate. Adjust your low water mark if you're
* frequently seeing this mode.
*/
- if (old_mode != new_mode)
- notify_of_pool_mode_change_to_oods(pool);
pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
break;
case PM_WRITE:
- if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "write");
if (old_mode == PM_OUT_OF_DATA_SPACE)
cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false;
@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* doesn't cause an unexpected mode transition on resume.
*/
pt->adjusted_pf.mode = new_mode;
+
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool);
}
static void abort_transaction(struct pool *pool)
@@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 981154e59461..6af5babe6837 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -20,7 +20,6 @@ struct dmz_bioctx {
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
- blk_status_t status;
};
/*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
- if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
- bioctx->status = status;
- bio_endio(bio);
+ if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
+ bio->bi_status = status;
+
+ if (refcount_dec_and_test(&bioctx->ref)) {
+ struct dm_zone *zone = bioctx->zone;
+
+ if (zone) {
+ if (bio->bi_status != BLK_STS_OK &&
+ bio_op(bio) == REQ_OP_WRITE &&
+ dmz_is_seq(zone))
+ set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+ dmz_deactivate_zone(zone);
+ }
+ bio_endio(bio);
+ }
}
/*
- * Partial clone read BIO completion callback. This terminates the
+ * Completion callback for an internally cloned target BIO. This terminates the
* target BIO when there are no more references to its context.
*/
-static void dmz_read_bio_end_io(struct bio *bio)
+static void dmz_clone_endio(struct bio *clone)
{
- struct dmz_bioctx *bioctx = bio->bi_private;
- blk_status_t status = bio->bi_status;
+ struct dmz_bioctx *bioctx = clone->bi_private;
+ blk_status_t status = clone->bi_status;
- bio_put(bio);
+ bio_put(clone);
dmz_bio_endio(bioctx->bio, status);
}
/*
- * Issue a BIO to a zone. The BIO may only partially process the
+ * Issue a clone of a target BIO. The clone may only partially process the
* original target BIO.
*/
-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
- struct bio *bio, sector_t chunk_block,
- unsigned int nr_blocks)
+static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio, sector_t chunk_block,
+ unsigned int nr_blocks)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
- sector_t sector;
struct bio *clone;
- /* BIO remap sector */
- sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-
- /* If the read is not partial, there is no need to clone the BIO */
- if (nr_blocks == dmz_bio_blocks(bio)) {
- /* Setup and submit the BIO */
- bio->bi_iter.bi_sector = sector;
- refcount_inc(&bioctx->ref);
- generic_make_request(bio);
- return 0;
- }
-
- /* Partial BIO: we need to clone the BIO */
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- /* Setup the clone */
- clone->bi_iter.bi_sector = sector;
+ bio_set_dev(clone, dmz->dev->bdev);
+ clone->bi_iter.bi_sector =
+ dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
- clone->bi_end_io = dmz_read_bio_end_io;
+ clone->bi_end_io = dmz_clone_endio;
clone->bi_private = bioctx;
bio_advance(bio, clone->bi_iter.bi_size);
- /* Submit the clone */
refcount_inc(&bioctx->ref);
generic_make_request(clone);
+ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
+
return 0;
}
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
- ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@@ -229,25 +229,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
}
/*
- * Issue a write BIO to a zone.
- */
-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
- struct bio *bio, sector_t chunk_block,
- unsigned int nr_blocks)
-{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
- /* Setup and submit the BIO */
- bio_set_dev(bio, dmz->dev->bdev);
- bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- refcount_inc(&bioctx->ref);
- generic_make_request(bio);
-
- if (dmz_is_seq(zone))
- zone->wp_block += nr_blocks;
-}
-
-/*
* Write blocks directly in a data zone, at the write pointer.
* If a buffer zone is assigned, invalidate the blocks written
* in place.
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
- dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
/*
* Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
- dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
/*
* Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
- bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -624,35 +608,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
}
/*
- * Completed target BIO processing.
- */
-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
-{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
- if (bioctx->status == BLK_STS_OK && *error)
- bioctx->status = *error;
-
- if (!refcount_dec_and_test(&bioctx->ref))
- return DM_ENDIO_INCOMPLETE;
-
- /* Done */
- bio->bi_status = bioctx->status;
-
- if (bioctx->zone) {
- struct dm_zone *zone = bioctx->zone;
-
- if (*error && bio_op(bio) == REQ_OP_WRITE) {
- if (dmz_is_seq(zone))
- set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
- }
- dmz_deactivate_zone(zone);
- }
-
- return DM_ENDIO_DONE;
-}
-
-/*
* Get zoned device information.
*/
static int dmz_get_zoned_device(struct dm_target *ti, char *path)
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
.ctr = dmz_ctr,
.dtr = dmz_dtr,
.map = dmz_map,
- .end_io = dmz_end_io,
.io_hints = dmz_io_hints,
.prepare_ioctl = dmz_prepare_ioctl,
.postsuspend = dmz_suspend,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c510179a7f84..63a7c416b224 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
+ blk_queue_split(md->queue, &bio);
+
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8add62a18293..102eb35fcf3f 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
This is currently experimental.
+config MEDIA_CONTROLLER_REQUEST_API
+ bool "Enable Media controller Request API (EXPERIMENTAL)"
+ depends on MEDIA_CONTROLLER && STAGING_MEDIA
+ default n
+ ---help---
+ DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+
+ This option enables the Request API for the Media controller and V4L2
+ interfaces. It is currently needed by a few stateless codec drivers.
+
+ There is currently no intention to provide API or ABI stability for
+ this new API as of yet.
+
#
# Video4Linux support
# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 975ff5669f72..8ff8722cb6b1 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
}
atomic_dec(&q->owned_by_drv_count);
- if (vb->req_obj.req) {
+ if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
/* This is not supported at the moment */
WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
media_request_object_unbind(&vb->req_obj);
@@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
- if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
vb->state = VB2_BUF_STATE_DEQUEUED;
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+ }
}
static const struct media_request_object_ops vb2_core_req_ops = {
@@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
return ret;
vb->state = VB2_BUF_STATE_IN_REQUEST;
+
+ /*
+ * Increment the refcount and store the request.
+ * The request refcount is decremented again when the
+ * buffer is dequeued. This is to prevent vb2_buffer_done()
+ * from freeing the request from interrupt context, which can
+ * happen if the application closed the request fd after
+ * queueing the request.
+ */
+ media_request_get(req);
+ vb->request = req;
+
/* Fill buffer information for the userspace */
if (pb) {
call_void_bufop(q, copy_timestamp, vb, pb);
@@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
- if (vb->req_obj.req) {
- media_request_object_unbind(&vb->req_obj);
- media_request_object_put(&vb->req_obj);
- }
call_void_bufop(q, init_buffer, vb);
}
@@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
/* go back to dequeued state */
__vb2_dqbuf(vb);
+ if (WARN_ON(vb->req_obj.req)) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+
dprintk(2, "dqbuf of buffer %d, with state %d\n",
vb->index, vb->state);
@@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
vb->prepared = false;
}
__vb2_dqbuf(vb);
+
+ if (vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
}
}
@@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
ret = vb2_start_streaming(q);
- if (ret) {
- __vb2_queue_cancel(q);
+ if (ret)
return ret;
- }
}
q->streaming = 1;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index a17033ab2c22..1d35aeabfd85 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
- struct v4l2_buffer *b,
- const char *opname,
+ struct v4l2_buffer *b, bool is_prepare,
struct media_request **p_req)
{
+ const char *opname = is_prepare ? "prepare_buf" : "qbuf";
struct media_request *req;
struct vb2_v4l2_buffer *vbuf;
struct vb2_buffer *vb;
@@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return ret;
}
+ if (is_prepare)
+ return 0;
+
if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
if (q->uses_requests) {
dprintk(1, "%s: queue uses requests\n", opname);
@@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+#endif
}
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
return -EINVAL;
- ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
@@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
if (ret)
return ret;
ret = vb2_core_qbuf(q, b->index, b, req);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index bed24372e61f..b8ec88612df7 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
static long media_device_request_alloc(struct media_device *mdev,
int *alloc_fd)
{
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
return media_request_alloc(mdev, alloc_fd);
+#else
+ return -ENOTTY;
+#endif
}
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 013cdebecbc4..13fb69c58967 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -997,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q,
q_data->sequence = 0;
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!ctx->is_enc) {
+ state->width = q_data->width;
+ state->height = q_data->height;
+ }
return 0;
+ }
- state->width = q_data->width;
- state->height = q_data->height;
+ if (ctx->is_enc) {
+ state->width = q_data->width;
+ state->height = q_data->height;
+ }
state->ref_frame.width = state->ref_frame.height = 0;
state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
GFP_KERNEL);
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index dcdc80e272c2..9acc709b0740 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 903cebeb5ce5..d666271bdaed 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 9357c07e30d6..cd56476902a2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 9c8e8be81ce3..673772cd17d6 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index aaf13f03d5d4..628eae154ee7 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 0b18f0bd7419..8b0a26335d70 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
LIF_PAD_SOURCE);
- switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) {
+ switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
case VI6_IP_VERSION_MODEL_VSPD_GEN2:
case VI6_IP_VERSION_MODEL_VSPD_V2H:
hbth = 1536;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 5f2b033a7a42..10b8d94edbef 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
u64 offset;
s64 val;
- switch (ctrl->type) {
+ switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
case V4L2_CTRL_TYPE_INTEGER64:
@@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array = nr_of_dims > 0;
/* Prefill elem_size for all types handled by std_type_ops */
- switch (type) {
+ switch ((u32)type) {
case V4L2_CTRL_TYPE_INTEGER64:
elem_size = sizeof(s64);
break;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c35b5b08bb33..111934838da2 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -472,7 +472,7 @@ out:
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
- struct mmc_command cmd = {};
+ struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
@@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
if (idata->rpmb) {
- err = mmc_set_blockcount(card, data.blocks,
- idata->ic.write_flag & (1 << 31));
- if (err)
- return err;
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ /*
+ * We don't do any blockcount validation because the max size
+ * may be increased by a future standard. We just copy the
+ * 'Reliable Write' bit here.
+ */
+ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ mrq.sbc = &sbc;
}
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bc1bd2c25613..55997cf84b39 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -30,6 +30,7 @@
#include "pwrseq.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
- if (!mmc_card_broken_hpi(card) &&
- ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
+ card->ext_csd.hpi_en = 0;
err = 0;
- } else
+ } else {
card->ext_csd.hpi_en = 1;
+ }
}
/*
- * If cache size is higher than 0, this indicates
- * the existence of cache and it can be turned on.
+ * If cache size is higher than 0, this indicates the existence of cache
+ * and it can be turned on. Note that some eMMCs from Micron has been
+ * reported to need ~800 ms timeout, while enabling the cache after
+ * sudden power failure tests. Let's extend the timeout to a minimum of
+ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
*/
- if (!mmc_card_broken_hpi(card) &&
- card->ext_csd.cache_size > 0) {
+ if (card->ext_csd.cache_size > 0) {
+ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1,
- card->ext_csd.generic_cmd6_time);
+ EXT_CSD_CACHE_CTRL, 1, timeout_ms);
if (err && err != -EBADMSG)
goto free_card;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index adf32682f27a..c60a7625b1fa 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -104,6 +104,7 @@ struct mmc_omap_slot {
unsigned int vdd;
u16 saved_con;
u16 bus_mode;
+ u16 power_mode;
unsigned int fclk_freq;
struct tasklet_struct cover_tasklet;
@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmc_omap_slot *slot = mmc_priv(mmc);
struct mmc_omap_host *host = slot->host;
int i, dsor;
- int clk_enabled;
+ int clk_enabled, init_stream;
mmc_omap_select_slot(slot, 0);
@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->vdd = ios->vdd;
clk_enabled = 0;
+ init_stream = 0;
switch (ios->power_mode) {
case MMC_POWER_OFF:
mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
/* Cannot touch dsor yet, just power up MMC */
mmc_omap_set_power(slot, 1, ios->vdd);
+ slot->power_mode = ios->power_mode;
goto exit;
case MMC_POWER_ON:
mmc_omap_fclk_enable(host, 1);
clk_enabled = 1;
dsor |= 1 << 11;
+ if (slot->power_mode != MMC_POWER_ON)
+ init_stream = 1;
break;
}
+ slot->power_mode = ios->power_mode;
if (slot->bus_mode != ios->bus_mode) {
if (slot->pdata->set_bus_mode != NULL)
@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
for (i = 0; i < 2; i++)
OMAP_MMC_WRITE(host, CON, dsor);
slot->saved_con = dsor;
- if (ios->power_mode == MMC_POWER_ON) {
+ if (init_stream) {
/* worst case at 400kHz, 80 cycles makes 200 microsecs */
int usecs = 250;
@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->host = host;
slot->mmc = mmc;
slot->id = id;
+ slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id];
host->slots[id] = slot;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 467d889a1638..3f4ea8f624be 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
+ /*
+ * Limit the maximum segment size to the lower of the request size
+ * and the DMA engine device segment size limits. In reality, with
+ * 32-bit transfers, the DMA engine can do longer segments than this
+ * but there is no way to represent that in the DMA model - if we
+ * increase this figure here, we get warnings from the DMA API debug.
+ */
+ mmc->max_seg_size = min3(mmc->max_req_size,
+ dma_get_max_seg_size(host->rx_chan->device->dev),
+ dma_get_max_seg_size(host->tx_chan->device->dev));
+
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 88347ce78f23..d264391616f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct device *dev = omap_host->dev;
struct mmc_ios *ios = &mmc->ios;
u32 start_window = 0, max_window = 0;
+ bool dcrc_was_enabled = false;
u8 cur_match, prev_match = 0;
u32 length = 0, max_len = 0;
- u32 ier = host->ier;
u32 phase_delay = 0;
int ret = 0;
u32 reg;
@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
* during the tuning procedure. So disable it during the
* tuning procedure.
*/
- ier &= ~SDHCI_INT_DATA_CRC;
- sdhci_writel(host, ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ if (host->ier & SDHCI_INT_DATA_CRC) {
+ host->ier &= ~SDHCI_INT_DATA_CRC;
+ dcrc_was_enabled = true;
+ }
while (phase_delay <= MAX_PHASE_DELAY) {
sdhci_omap_set_dll(omap_host, phase_delay);
@@ -366,6 +367,9 @@ tuning_error:
ret:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ /* Reenable forbidden interrupt */
+ if (dcrc_was_enabled)
+ host->ier |= SDHCI_INT_DATA_CRC;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
return ret;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7b95d088fdef..e6ace31e2a41 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
- &autocal->pull_up_3v3);
+ &autocal->pull_up_3v3_timeout);
if (err)
autocal->pull_up_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
- &autocal->pull_down_3v3);
+ &autocal->pull_down_3v3_timeout);
if (err)
autocal->pull_down_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
- &autocal->pull_up_1v8);
+ &autocal->pull_up_1v8_timeout);
if (err)
autocal->pull_up_1v8_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
- &autocal->pull_down_1v8);
+ &autocal->pull_down_1v8_timeout);
if (err)
autocal->pull_down_1v8_timeout = 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 99bdae53fa2e..df05352b6a4a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
{
u16 ctrl2;
- ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl2 & SDHCI_CTRL_V4_MODE)
return;
ctrl2 |= SDHCI_CTRL_V4_MODE;
- sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
}
/*
@@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
timeout = ktime_add_ms(ktime_get(), 100);
/* hw clears the bit when it's done */
- while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
+ break;
+ if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_dumpregs(host);
@@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e05d4eddc935..24fb6a685039 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
u16 *p = _p;
int i;
- regs->version = 0;
+ regs->version = chip->info->prod_num;
memset(p, 0xff, 32 * sizeof(u16));
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889efddf78..50dd6bf176d0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0de487a8f0eb..5cd3135dfe30 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+ BNX2X_SP_RTNL_UPDATE_SVID,
};
enum bnx2x_iov_flag {
@@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
void bnx2x_init_ptp(struct bnx2x *bp);
int bnx2x_configure_ptp_filters(struct bnx2x *bp);
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+void bnx2x_register_phc(struct bnx2x *bp);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 686899d7e555..ecb1bd7eb508 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_rx_mode_inner(bp);
if (bp->flags & PTP_SUPPORTED) {
+ bnx2x_register_phc(bp);
bnx2x_init_ptp(bp);
bnx2x_configure_ptp_filters(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 95309b27c7d1..b164f705709d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_eee_event(bp);
if (val & DRV_STATUS_OEM_UPDATE_SVID)
- bnx2x_handle_update_svid_cmd(bp);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_UPDATE_SVID, 0);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
@@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
REG_WR(bp, reg_addr, val);
}
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
return 0;
}
@@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
/* Fill a user request section if needed */
if (!test_bit(RAMROD_CONT, ramrod_flags)) {
ramrod_param.user_req.u.vlan.vlan = vlan;
+ __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
/* Set the command: ADD or DEL */
if (set)
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+ unsigned long ramrod_flags = 0, vlan_flags = 0;
+ struct bnx2x_vlan_entry *vlan;
+ int rc;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ __set_bit(BNX2X_VLAN, &vlan_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+ if (rc)
+ return rc;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ return 0;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -9330,6 +9360,11 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
rc);
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+
/* Disable LLH */
if (!CHIP_IS_E1(bp))
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9417,8 +9452,13 @@ unload_error:
* function stop ramrod is sent, since as part of this ramrod FW access
* PTP registers.
*/
- if (bp->flags & PTP_SUPPORTED)
+ if (bp->flags & PTP_SUPPORTED) {
bnx2x_stop_ptp(bp);
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+ }
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
@@ -10359,6 +10399,9 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+ bnx2x_handle_update_svid_cmd(bp);
+
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
if (bnx2x_udp_port_update(bp)) {
@@ -11750,8 +11793,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_fcoe_conn)
+ if (!bp->cnic_eth_dev.max_fcoe_conn) {
bp->flags |= NO_FCOE_FLAG;
+ eth_zero_addr(bp->fip_mac);
+ }
}
static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -12494,9 +12539,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->dump_preset_idx = 1;
- if (CHIP_IS_E3B0(bp))
- bp->flags |= PTP_SUPPORTED;
-
return rc;
}
@@ -13024,13 +13066,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
{
- struct bnx2x_vlan_entry *vlan;
-
- /* The hw forgot all entries after reload */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
-
/* Don't set rx mode here. Our caller will do it. */
bnx2x_vlan_configure(bp, false);
@@ -13895,7 +13930,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
return -ENOTSUPP;
}
-static void bnx2x_register_phc(struct bnx2x *bp)
+void bnx2x_register_phc(struct bnx2x *bp)
{
/* Fill the ptp_clock_info struct and register PTP clock*/
bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14097,8 +14132,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev->base_addr, bp->pdev->irq, dev->dev_addr);
pcie_print_link_status(bp->pdev);
- bnx2x_register_phc(bp);
-
if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
@@ -14131,11 +14164,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
struct bnx2x *bp,
bool remove_netdev)
{
- if (bp->ptp_clock) {
- ptp_clock_unregister(bp->ptp_clock);
- bp->ptp_clock = NULL;
- }
-
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 0bf2fd470819..7a6e82db4231 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -265,6 +265,7 @@ enum {
BNX2X_ETH_MAC,
BNX2X_ISCSI_ETH_MAC,
BNX2X_NETQ_ETH_MAC,
+ BNX2X_VLAN,
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
@@ -272,7 +273,8 @@ enum {
#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
1 << BNX2X_ETH_MAC | \
1 << BNX2X_ISCSI_ETH_MAC | \
- 1 << BNX2X_NETQ_ETH_MAC)
+ 1 << BNX2X_NETQ_ETH_MAC | \
+ 1 << BNX2X_VLAN)
#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
((flags) & BNX2X_VLAN_MAC_CMP_MASK)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6cc69a58478a..6b51f4de6017 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2572,6 +2572,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static int bnxt_run_loopback(struct bnxt *bp)
{
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
struct bnxt_cp_ring_info *cpr;
int pkt_size, i = 0;
struct sk_buff *skb;
@@ -2579,7 +2580,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
u8 *data;
int rc;
- cpr = &txr->bnapi->cp_ring;
+ cpr = &rxr->bnapi->cp_ring;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1d86b4d5645a..4c816e5a841f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -61,7 +61,8 @@
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
+ | MACB_BIT(TXUBR))
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8
@@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
+ /* The low bits of RX address contain the RX_USED bit, clearing
+ * of which allows packet RX. Make sure the high bits are also
+ * visible to HW at that point.
+ */
+ dma_wmb();
}
#endif
desc->addr = lower_32_bits(addr);
@@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
- macb_set_addr(bp, desc, paddr);
desc->ctrl = 0;
+ /* Setting addr clears RX_USED and allows reception,
+ * make sure ctrl is cleared first to avoid a race.
+ */
+ dma_wmb();
+ macb_set_addr(bp, desc, paddr);
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
- desc->addr &= ~MACB_BIT(RX_USED);
desc->ctrl = 0;
+ dma_wmb();
+ desc->addr &= ~MACB_BIT(RX_USED);
}
}
@@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
addr = macb_get_addr(bp, desc);
- ctrl = desc->ctrl;
if (!rxused)
break;
+ /* Ensure ctrl is at least as up-to-date as rxused */
+ dma_rmb();
+
+ ctrl = desc->ctrl;
+
queue->rx_tail++;
count++;
@@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
/* Make hw descriptor updates visible to CPU */
rmb();
- ctrl = desc->ctrl;
-
if (!(desc->addr & MACB_BIT(RX_USED)))
break;
+ /* Ensure ctrl is at least as up-to-date as addr */
+ dma_rmb();
+
+ ctrl = desc->ctrl;
+
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
discard_partial_frame(queue, first_frag, tail);
@@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
netif_tx_start_all_queues(dev);
}
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ unsigned int head = queue->tx_head;
+ unsigned int tail = queue->tx_tail;
+ struct macb *bp = queue->bp;
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+ if (head == tail)
+ return;
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
+ if (status & MACB_BIT(TXUBR))
+ macb_tx_restart(queue);
+
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index cd5296b84229..a6dc47edc4cf 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
desc_ptp = macb_ptp_desc(queue->bp, desc);
tx_timestamp = &queue->tx_timestamps[head];
tx_timestamp->skb = skb;
+ /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+ dma_rmb();
tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
/* move head */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f152da1ce046..c62a0c830705 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1453,6 +1453,9 @@ struct cpl_tx_data {
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
+#define TX_URG_S 16
+#define TX_URG_V(x) ((x) << TX_URG_S)
+
#define TX_SHOVE_S 14
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b52029e26d15..ad1779fc410e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
hns_ae_ring_enable_all(handle, 0);
+ /* clean rx fbd. */
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
(void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index aaf72c055711..1790cdafd9b8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
struct mac_driver *drv = (struct mac_driver *)mac_drv;
/*enable GE rX/tX */
- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* enable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+ }
}
static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
struct mac_driver *drv = (struct mac_driver *)mac_drv;
/*disable GE rX/tX */
- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* disable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+ }
}
/* hns_gmac_get_en - get port enable
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3613e400e816..a97228c93831 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
return rc;
}
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+ if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+ return;
+
+ phy_device_remove(mac_cb->phy_dev);
+ phy_device_free(mac_cb->phy_dev);
+
+ mac_cb->phy_dev = NULL;
+}
+
#define MAC_MEDIA_TYPE_MAX_LEN 16
static const struct {
@@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
for (i = 0; i < max_port_num; i++) {
+ if (!dsaf_dev->mac_cb[i])
+ continue;
+
dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+ hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
dsaf_dev->mac_cb[i] = NULL;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e557a4ef5996..3b9e74be5fbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -935,6 +935,62 @@ static void hns_dsaf_tcam_mc_cfg(
}
/**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
* hns_dsaf_tcam_mc_invld - INT
* @dsaf_id: dsa fabric id
* @address
@@ -1493,6 +1549,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
}
/**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ int i;
+
+ soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+ for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+ /* search all entry from end to start.*/
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ return i;
+ soft_mac_entry--;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
* hns_dsaf_set_mac_key - set mac key
* @dsaf_dev: dsa fabric device struct pointer
* @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
/* pfc pause frame statistics stored in dsaf inode*/
if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
p[223 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
- p[224 + i] = dsaf_read_dev(ddev,
+ p[226 + i] = dsaf_read_dev(ddev,
DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
}
- p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+ p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[228 + i] = dsaf_read_dev(ddev,
+ p[230 + i] = dsaf_read_dev(ddev,
DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
}
- p[231] = dsaf_read_dev(ddev,
- DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+ p[233] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
/* dsaf inode registers */
for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[232 + i] = dsaf_read_dev(ddev,
+ p[234 + i] = dsaf_read_dev(ddev,
DSAF_SBM_CFG_REG_0_REG + j * 0x80);
- p[235 + i] = dsaf_read_dev(ddev,
+ p[237 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
- p[238 + i] = dsaf_read_dev(ddev,
+ p[240 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
- p[241 + i] = dsaf_read_dev(ddev,
+ p[243 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
- p[244 + i] = dsaf_read_dev(ddev,
+ p[246 + i] = dsaf_read_dev(ddev,
DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
- p[245 + i] = dsaf_read_dev(ddev,
+ p[249 + i] = dsaf_read_dev(ddev,
DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
- p[248 + i] = dsaf_read_dev(ddev,
+ p[252 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
- p[251 + i] = dsaf_read_dev(ddev,
+ p[255 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
- p[254 + i] = dsaf_read_dev(ddev,
+ p[258 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
- p[257 + i] = dsaf_read_dev(ddev,
+ p[261 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
- p[260 + i] = dsaf_read_dev(ddev,
+ p[264 + i] = dsaf_read_dev(ddev,
DSAF_SBM_INER_ST_0_REG + j * 0x80);
- p[263 + i] = dsaf_read_dev(ddev,
+ p[267 + i] = dsaf_read_dev(ddev,
DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
- p[266 + i] = dsaf_read_dev(ddev,
+ p[270 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
- p[269 + i] = dsaf_read_dev(ddev,
+ p[273 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
- p[272 + i] = dsaf_read_dev(ddev,
+ p[276 + i] = dsaf_read_dev(ddev,
DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
- p[275 + i] = dsaf_read_dev(ddev,
+ p[279 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
- p[278 + i] = dsaf_read_dev(ddev,
+ p[282 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
- p[281 + i] = dsaf_read_dev(ddev,
+ p[285 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
- p[284 + i] = dsaf_read_dev(ddev,
+ p[288 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
- p[287 + i] = dsaf_read_dev(ddev,
+ p[291 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
- p[290 + i] = dsaf_read_dev(ddev,
+ p[294 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
- p[293 + i] = dsaf_read_dev(ddev,
+ p[297 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
- p[296 + i] = dsaf_read_dev(ddev,
+ p[300 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
- p[299 + i] = dsaf_read_dev(ddev,
+ p[303 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
- p[302 + i] = dsaf_read_dev(ddev,
+ p[306 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
- p[305 + i] = dsaf_read_dev(ddev,
+ p[309 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
- p[308 + i] = dsaf_read_dev(ddev,
+ p[312 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
}
/* dsaf onode registers */
for (i = 0; i < DSAF_XOD_NUM; i++) {
- p[311 + i] = dsaf_read_dev(ddev,
+ p[315 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
- p[319 + i] = dsaf_read_dev(ddev,
+ p[323 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
- p[327 + i] = dsaf_read_dev(ddev,
+ p[331 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
- p[335 + i] = dsaf_read_dev(ddev,
+ p[339 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
- p[343 + i] = dsaf_read_dev(ddev,
+ p[347 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
- p[351 + i] = dsaf_read_dev(ddev,
+ p[355 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
}
- p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
- p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
- p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+ p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[362 + i] = dsaf_read_dev(ddev,
+ p[366 + i] = dsaf_read_dev(ddev,
DSAF_XOD_GNT_L_0_REG + j * 0x90);
- p[365 + i] = dsaf_read_dev(ddev,
+ p[369 + i] = dsaf_read_dev(ddev,
DSAF_XOD_GNT_H_0_REG + j * 0x90);
- p[368 + i] = dsaf_read_dev(ddev,
+ p[372 + i] = dsaf_read_dev(ddev,
DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
- p[371 + i] = dsaf_read_dev(ddev,
+ p[375 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
- p[374 + i] = dsaf_read_dev(ddev,
+ p[378 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
- p[377 + i] = dsaf_read_dev(ddev,
+ p[381 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
- p[380 + i] = dsaf_read_dev(ddev,
+ p[384 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
- p[383 + i] = dsaf_read_dev(ddev,
+ p[387 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
- p[386 + i] = dsaf_read_dev(ddev,
+ p[390 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
- p[389 + i] = dsaf_read_dev(ddev,
+ p[393 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
}
- p[392] = dsaf_read_dev(ddev,
+ p[396] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
- p[393] = dsaf_read_dev(ddev,
+ p[397] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
- p[394] = dsaf_read_dev(ddev,
+ p[398] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
- p[395] = dsaf_read_dev(ddev,
+ p[399] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
- p[396] = dsaf_read_dev(ddev,
+ p[400] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
- p[397] = dsaf_read_dev(ddev,
+ p[401] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
- p[398] = dsaf_read_dev(ddev,
+ p[402] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
- p[399] = dsaf_read_dev(ddev,
+ p[403] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
- p[400] = dsaf_read_dev(ddev,
+ p[404] = dsaf_read_dev(ddev,
DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
- p[401] = dsaf_read_dev(ddev,
+ p[405] = dsaf_read_dev(ddev,
DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
- p[402] = dsaf_read_dev(ddev,
+ p[406] = dsaf_read_dev(ddev,
DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
- p[403] = dsaf_read_dev(ddev,
+ p[407] = dsaf_read_dev(ddev,
DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
- p[404] = dsaf_read_dev(ddev,
+ p[408] = dsaf_read_dev(ddev,
DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
/* dsaf voq registers */
for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
j = (i * DSAF_COMM_CHN + port) * 0x90;
- p[405 + i] = dsaf_read_dev(ddev,
+ p[409 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
- p[408 + i] = dsaf_read_dev(ddev,
+ p[412 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
- p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
- p[414 + i] = dsaf_read_dev(ddev,
+ p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[418 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
- p[417 + i] = dsaf_read_dev(ddev,
+ p[421 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
- p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
- p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
- p[426 + i] = dsaf_read_dev(ddev,
+ p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[430 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
- p[429 + i] = dsaf_read_dev(ddev,
+ p[433 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
- p[432 + i] = dsaf_read_dev(ddev,
+ p[436 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
- p[435 + i] = dsaf_read_dev(ddev,
+ p[439 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
- p[438 + i] = dsaf_read_dev(ddev,
+ p[442 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_BP_ALL_THRD_0_REG + j);
}
/* dsaf tbl registers */
- p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
- p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
- p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
- p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
- p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
- p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
- p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
- p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
- p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
- p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
- p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
- p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
- p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
- p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
- p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
- p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
- p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
- p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
- p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
- p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
- p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
- p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
j = i * 0x8;
- p[464 + 2 * i] = dsaf_read_dev(ddev,
+ p[468 + 2 * i] = dsaf_read_dev(ddev,
DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
- p[465 + 2 * i] = dsaf_read_dev(ddev,
+ p[469 + 2 * i] = dsaf_read_dev(ddev,
DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
}
- p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
- p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
- p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
- p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
- p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
- p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
- p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
- p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
- p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
- p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
- p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
- p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
/* dsaf other registers */
- p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
- p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
- p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
- p[495] = dsaf_read_dev(ddev,
+ p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[499] = dsaf_read_dev(ddev,
DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
- p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
- p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+ p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
if (!is_ver1)
- p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+ p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
/* mark end of dsaf regs */
- for (i = 499; i < 504; i++)
+ for (i = 503; i < 504; i++)
p[i] = 0xdddddddd;
}
@@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
-/* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
- (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
-void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
- u32 port, bool enable)
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
- struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- u16 entry_index;
- struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
- struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+ struct dsaf_drv_mac_single_dest_entry mask_entry;
+ struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct hns_mac_cb *mac_cb;
+ u8 addr[ETH_ALEN] = {0};
+ u8 port_num;
+ u16 mskid;
+
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index != DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* put promisc tcam entry in the end. */
+ /* 1. set promisc unicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable uc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ mac_cb = dsaf_dev->mac_cb[port];
+ (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+ tbl_tcam_ucast.tbl_ucast_out_port = port_num;
- if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+ /* update software entry */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. set promisc multicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable mc promisc failed (port:%#x)\n",
+ port);
return;
+ }
+
+ memset(&mask_entry, 0x0, sizeof(mask_entry));
+ memset(&mask_key, 0x0, sizeof(mask_key));
+ memset(&temp_key, 0x0, sizeof(temp_key));
+ mask_entry.addr[0] = 0x01;
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+ port, mask_entry.addr);
+ tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+ tbl_tcam_mcast.tbl_mcast_old_en = 0;
- /* find the tcam entry index for promisc */
- entry_index = dsaf_promisc_tcam_entry(port);
-
- memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
- memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
- /* config key mask */
- if (enable) {
- dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
- DSAF_TBL_TCAM_KEY_PORT_M,
- DSAF_TBL_TCAM_KEY_PORT_S, port);
- dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
- DSAF_TBL_TCAM_KEY_PORT_M,
- DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
- /* SUB_QID */
- dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
- DSAF_SERVICE_NW_NUM, true);
- mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
+ if (port < DSAF_SERVICE_NW_NUM) {
+ mskid = port;
+ } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
} else {
- mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
+ dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port,
+ mask_key.high.val, mask_key.low.val);
+ return;
}
- dev_dbg(dsaf_dev->dev,
- "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
- tbl_tcam_data.low.val, entry_index);
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ memcpy(&temp_key, &mask_key, sizeof(mask_key));
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ (struct dsaf_tbl_tcam_data *)(&mask_key),
+ &tbl_tcam_mcast);
+
+ /* update software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+ soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
- /* config promisc entry with mask */
- hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
- (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
- &mac_data);
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ u8 addr[ETH_ALEN] = {0};
- /* config software entry */
+ /* 1. delete uc vague tcam entry. */
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask, &tbl_tcam_ucast);
+ /* update soft management table. */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. delete mc vague tcam entry. */
+ addr[0] = 0x01;
+ memset(&mac_key, 0x0, sizeof(mac_key));
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config mc vague table */
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ &tbl_tcam_mask, &tbl_tcam_mcast);
+ /* update soft management table. */
soft_mac_entry += entry_index;
- soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable)
+{
+ if (enable)
+ set_promisc_tcam_enable(dsaf_dev, port);
+ else
+ set_promisc_tcam_disable(dsaf_dev, port);
}
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 74d935d82cbc..b9733b0b8482 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -176,7 +176,7 @@
#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
@@ -404,11 +404,11 @@
#define RCB_ECC_ERR_ADDR4_REG 0x460
#define RCB_ECC_ERR_ADDR5_REG 0x464
-#define RCB_COM_SF_CFG_INTMASK_RING 0x480
-#define RCB_COM_SF_CFG_RING_STS 0x484
-#define RCB_COM_SF_CFG_RING 0x488
-#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
+#define RCB_COM_SF_CFG_INTMASK_RING 0x470
+#define RCB_COM_SF_CFG_RING_STS 0x474
+#define RCB_COM_SF_CFG_RING 0x478
+#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
#define RCB_COM_RCB_RD_BD_BUSY 0x490
#define RCB_COM_RCB_FBD_CRT_EN 0x494
#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
@@ -534,6 +534,7 @@
#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
#define GMAC_LOOP_REG 0x01DCUL
#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_PCS_RX_EN_REG 0x01E4UL
#define GMAC_VLAN_CODE_REG 0x01E8UL
#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e907831b0e..6242249c9f4c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1186,6 +1186,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+ phy_stop(phy_dev);
+
return 0;
}
@@ -1281,6 +1284,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
return cpu;
}
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < q_num * 2; i++) {
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ priv->ring_data[i].ring->irq_init_flag =
+ RCB_IRQ_NOT_INITED;
+ }
+ }
+}
+
static int hns_nic_init_irq(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
@@ -1306,7 +1325,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
rd->ring->irq);
- return ret;
+ goto out_free_irq;
}
disable_irq(rd->ring->irq);
@@ -1321,6 +1340,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
}
return 0;
+
+out_free_irq:
+ hns_nic_free_irq(h->q_num, priv);
+ return ret;
}
static int hns_nic_net_up(struct net_device *ndev)
@@ -1330,6 +1353,9 @@ static int hns_nic_net_up(struct net_device *ndev)
int i, j;
int ret;
+ if (!test_bit(NIC_STATE_DOWN, &priv->state))
+ return 0;
+
ret = hns_nic_init_irq(priv);
if (ret != 0) {
netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1365,6 +1391,7 @@ out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
+ hns_nic_free_irq(h->q_num, priv);
set_bit(NIC_STATE_DOWN, &priv->state);
return ret;
@@ -1482,11 +1509,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
}
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
static void hns_nic_net_timeout(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- hns_tx_timeout_reset(priv);
+ if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+ ndev->watchdog_timeo *= 2;
+ netdev_info(ndev, "watchdog_timo changed to %d.\n",
+ ndev->watchdog_timeo);
+ } else {
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ hns_tx_timeout_reset(priv);
+ }
}
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2049,11 +2084,11 @@ static void hns_nic_service_task(struct work_struct *work)
= container_of(work, struct hns_nic_priv, service_task);
struct hnae_handle *h = priv->ae_handle;
+ hns_nic_reset_subtask(priv);
hns_nic_update_link_status(priv->netdev);
h->dev->ops->update_led_status(h);
hns_nic_update_stats(priv->netdev);
- hns_nic_reset_subtask(priv);
hns_nic_service_event_complete(priv);
}
@@ -2339,7 +2374,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->min_mtu = MAC_MIN_MTU;
switch (priv->enet_ver) {
case AE_VERSION_2:
- ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ed50b8dee44f..67cc6d9c8fd7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1939,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rwi *rwi;
+ unsigned long flags;
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
if (!list_empty(&adapter->rwi_list)) {
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1950,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
rwi = NULL;
}
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
return rwi;
}
@@ -2025,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
struct list_head *entry, *tmp_entry;
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
int ret;
if (adapter->state == VNIC_REMOVING ||
@@ -2041,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
}
}
- rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+ rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
@@ -2069,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
adapter->resetting = true;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
@@ -4759,7 +4761,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
- mutex_init(&adapter->rwi_lock);
+ spin_lock_init(&adapter->rwi_lock);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c4f8d331ce..f2018dbebfa5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
- struct mutex rwi_lock;
+ spinlock_t rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a3f45335437c..0e5dc74b4ef2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1543,17 +1543,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
/* Copy the address first, so that we avoid a possible race with
- * .set_rx_mode(). If we copy after changing the address in the filter
- * list, we might open ourselves to a narrow race window where
- * .set_rx_mode could delete our dev_addr filter and prevent traffic
- * from passing.
+ * .set_rx_mode().
+ * - Remove old address from MAC filter
+ * - Copy new address
+ * - Add new address to MAC filter
*/
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- i40e_add_mac_filter(vsi, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index aef3c89ee79c..d0a95424ce58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1559,24 +1559,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+ u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(vlan_tag));
+ }
+
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
-
/* populate checksum, VLAN, and protocol */
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 09809dffe399..8af0e99c6c0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
union i40e_rx_desc *rx_desc,
u64 qw);
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype);
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag);
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb);
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
void i40e_update_rx_stats(struct i40e_ring *rx_ring,
unsigned int total_rx_bytes,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 433c8e688c78..870cf654e436 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *bi;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
total_rx_packets++;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 5dacfc870259..345701af7749 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u8 num_tcs = adapter->hw_tcs;
u32 reg_val;
u32 queue;
- u32 word;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
}
}
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 word;
+
/* Clear VF's mailbox memory */
for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* reset the filters for the device */
ixgbe_vf_reset_event(adapter, vf);
+ ixgbe_vf_clear_mbx(adapter, vf);
+
/* set vf mac address */
if (!is_zero_ether_addr(vf_mac))
ixgbe_set_vf_mac(adapter, vf, vf_mac);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e5397c8197b9..61b23497f836 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -408,7 +408,6 @@ struct mvneta_port {
struct mvneta_pcpu_stats __percpu *stats;
int pkt_size;
- unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
if (!pp->bm_priv) {
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
- mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+ mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+ PAGE_SIZE :
+ MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
@@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 4551ec134494..6a059d6ee03f 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4390,12 +4390,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 10000baseKR_Full);
+ if (port->gop_id == 0) {
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ }
/* Fall-through */
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4406,7 +4409,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10000baseT_Full);
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 25c1c4f96841..f480763dcd0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1190,11 +1190,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
- int ret;
-
- ret = ethtool_op_get_ts_info(priv->netdev, info);
- if (ret)
- return ret;
info->phc_index = mlx5_clock_get_ptp_index(mdev);
@@ -1202,9 +1197,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
info->phc_index == -1)
return 0;
- info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 871313d6b34d..b70cb6fd164c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
return !params->lro_en && frag_sz <= PAGE_SIZE;
}
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
@@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
if (!mlx5e_rx_is_linear_skb(mdev, params))
return false;
+ if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ return false;
+
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
return true;
@@ -1396,6 +1401,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_rate_limit rl = {0};
+ cancel_work_sync(&sq->dim.work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
rl.rate = sq->rate_limit;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c3c657548824..820fe85100b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -46,6 +46,7 @@
#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -466,8 +467,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
- !ether_addr_equal(e->h_dest, ha))
+ if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+ (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
mlx5e_tc_encap_flows_del(priv, e);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
@@ -1083,9 +1084,7 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
if (err)
return err;
-
- priv->channels.params.num_channels =
- mlx5e_get_netdev_max_channels(netdev);
+ priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 624eed345b5d..0b5ef6d4e815 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1190,7 +1190,7 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5e_xdpsq *xdpsq;
+ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
@@ -1201,10 +1201,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
+ if (!cqe) {
+ if (unlikely(work_done))
+ goto out;
return 0;
-
- xdpsq = &rq->xdpsq;
+ }
do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1219,6 +1220,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+out:
if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
xdpsq->doorbell = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3e99d0728b2f..4337afd610d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -74,7 +74,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -198,7 +197,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
- s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 3f8e870ef4c9..3ff69ddae2d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
u64 tx_recover;
u64 tx_cqes;
u64 tx_queue_wake;
- u64 tx_udp_seg_rem;
u64 tx_cqe_err;
u64 tx_xdp_xmit;
u64 tx_xdp_full;
@@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
- u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fca6f4132c91..9dabe9d4b279 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -870,9 +870,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- slow_attr->mirror_count = 0,
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->mirror_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -887,6 +887,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *slow_attr)
{
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->mirror_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
}
@@ -907,11 +910,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_priv *out_priv;
int err = 0, encap_err = 0;
- /* if prios are not supported, keep the old behaviour of using same prio
- * for all offloaded rules.
- */
- if (!mlx5_eswitch_prios_supported(esw))
- attr->prio = 1;
+ if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ return -EOPNOTSUPP;
+ }
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
@@ -1094,10 +1096,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
flow->rule[0] = rule;
}
- if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
- }
+ /* we know that the encap is valid */
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
}
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -2966,8 +2967,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = dest_chain;
continue;
@@ -2980,6 +2980,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9d73eb955f75..08233cf44871 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
update_fte = true;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 30f751e69698..f7154f358f27 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -81,6 +81,7 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool reload_fail;
+ bool fw_flash_in_progress;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
struct rcu_head rcu;
};
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
+#define MLXSW_EMAD_TIMEOUT_MS 200
static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+ if (trans->core->fw_flash_in_progress)
+ timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
}
@@ -1854,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c35be477856f..c4e4971764e5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bec940330a4..f84b9c02fcc5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -309,8 +309,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
},
.mlxsw_sp = mlxsw_sp
};
+ int err;
+
+ mlxsw_core_fw_flash_start(mlxsw_sp->core);
+ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ mlxsw_core_fw_flash_end(mlxsw_sp->core);
- return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ return err;
}
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -3521,6 +3526,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 5c13674439f1..b5b54b41349a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -977,6 +977,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
- mlxsw_sp->nve = NULL;
kfree(mlxsw_sp->nve);
+ mlxsw_sp->nve = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 6f18f4d3322a..451216dd7f6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -60,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e8ca98c070f6..20c9377e99cb 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
u32 mac_addr_hi = 0;
u32 mac_addr_lo = 0;
u32 data;
- int ret;
netdev = adapter->netdev;
- lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
- ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
- 0, 1000, 20000, 100);
- if (ret)
- return ret;
/* setup auto duplex, and speed detection */
data = lan743x_csr_read(adapter, MAC_CR);
@@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
- /* set to internal PHY id */
- adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+ /* LAN7430 uses internal phy at address 1 */
+ adapter->mdiobus->phy_mask = ~(u32)BIT(1);
/* register mdiobus */
ret = mdiobus_register(adapter->mdiobus);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 4c1fb7e57888..7cde387e5ec6 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0, data1 = 0, steer_ctrl = 0;
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
status = vxge_hw_vpath_fw_api(vpath,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2f49eb75f3cc..67e576fe7fc0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -345,13 +345,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
!(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
return -EOPNOTSUPP;
- /* We need to store TCP flags in the IPv4 key space, thus
- * we need to ensure we include a IPv4 key layer if we have
- * not done so already.
+ /* We need to store TCP flags in the either the IPv4 or IPv6 key
+ * space, thus we need to ensure we include a IPv4/IPv6 key
+ * layer if we have not done so already.
*/
- if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
- key_layer |= NFP_FLOWER_LAYER_IPV4;
- key_size += sizeof(struct nfp_flower_ipv4);
+ if (!key_basic)
+ return -EOPNOTSUPP;
+
+ if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+ !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+ switch (key_basic->n_proto) {
+ case cpu_to_be16(ETH_P_IP):
+ key_layer |= NFP_FLOWER_LAYER_IPV4;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 052b3d2c07a1..c662c6f5bee3 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 5c221ebaa7b3..b38e12c9de9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12831,8 +12831,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index aa633381aa47..c6f4bab67a5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2496,6 +2496,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
DP_NOTICE(cdev,
"Unable to map frag - dropping packet\n");
+ rc = -ENOMEM;
goto err;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1fd01688d37b..209566f8097b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6469,7 +6469,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
goto out;
}
- if (status & LinkChg)
+ if (status & LinkChg && tp->dev->phydev)
phy_mac_interrupt(tp->dev->phydev);
if (unlikely(status & RxFIFOOver &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5551fead8f66..c4a35e932f05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4250,6 +4250,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
dev_err(priv->device, "failed to create workqueue\n");
+ ret = -ENOMEM;
goto error_wq;
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0ff5a403a8dc..b2ff903a9cb6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
static void ca8210_rx_done(struct cas_control *cas_ctl)
{
u8 *buf;
- u8 len;
+ unsigned int len;
struct work_priv_container *mlme_reset_wpc;
struct ca8210_priv *priv = cas_ctl->priv;
@@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
if (len > CA8210_SPI_BUF_SIZE) {
dev_crit(
&priv->spi->dev,
- "Received packet len (%d) erroneously long\n",
+ "Received packet len (%u) erroneously long\n",
len
);
goto finish;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 51b5198d5943..b6743f03dce0 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
hwsim_edge_policy, NULL))
return -EINVAL;
@@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
hwsim_edge_policy, NULL))
return -EINVAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 18e92c19c5ab..26c41ede54a4 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -308,11 +308,8 @@ static int mdio_bus_phy_restore(struct device *dev)
if (ret < 0)
return ret;
- /* The PHY needs to renegotiate. */
- phydev->link = 0;
- phydev->state = PHY_UP;
-
- phy_start_machine(phydev);
+ if (phydev->attached_dev && phydev->adjust_link)
+ phy_start_machine(phydev);
return 0;
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 184c24baca15..d6916f787fce 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
return -EIO;
}
+ /* check if we have a valid interface */
+ if (if_num > 16) {
+ kfree(config_data);
+ return -EINVAL;
+ }
+
switch (config_data[if_num]) {
case 0x0:
result = 0;
@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
/* Get the interface/port specification from either driver_info or from
* the device itself */
- if (id->driver_info)
+ if (id->driver_info) {
+ /* if_num is controlled by the device, driver_info is a 0 terminated
+ * array. Make sure, the access is in bounds! */
+ for (i = 0; i <= if_num; ++i)
+ if (((u32 *)(id->driver_info))[i] == 0)
+ goto exit;
port_spec = ((u32 *)(id->driver_info))[if_num];
- else
+ } else {
port_spec = hso_get_config_data(interface);
+ if (port_spec < 0)
+ goto exit;
+ }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index be1917be28f2..77d3c85febf1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2320,6 +2320,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ /* Added to support MAC address changes */
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
return 0;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 72a55b6b4211..c8872dd5ff5e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
+ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f1b5201cc320..60dd1ec1665f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -129,6 +129,7 @@
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
+#define USB_MISC_1 0xd81f
#define USB_AFE_CTRL2 0xd824
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
@@ -555,6 +556,7 @@ enum spd_duplex {
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
+#define BND_MASK 0x0004
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
@@ -1150,7 +1152,7 @@ out1:
return ret;
}
-/* Devices containing RTL8153-AD can support a persistent
+/* Devices containing proper chips can support a persistent
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
*/
@@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
/* test for -AD variant of RTL8153 */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) != 0x1000)
- return -ENODEV;
-
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1)
- return -ENODEV;
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
+ }
/* returns _AUXMAC_#AABBCCDDEEFF# */
status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
@@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01) {
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
} else {
- /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
- * or system doesn't provide valid _SB.AMAC this will be
- * be expected to non-zero
+ /* if device doesn't support MAC pass through this will
+ * be expected to be non-zero
*/
ret = vendor_mac_passthru_addr_read(tp, &sa);
if (ret < 0)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 297cdeaef479..0565f8880199 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -568,6 +568,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
rd->remote_port = port;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
+ rd->offloaded = false;
return 1;
}
@@ -3258,6 +3259,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f = NULL;
+ bool unregister = false;
int err;
err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3283,12 +3285,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
err = register_netdevice(dev);
if (err)
goto errout;
+ unregister = true;
err = rtnl_configure_link(dev, NULL);
- if (err) {
- unregister_netdevice(dev);
+ if (err)
goto errout;
- }
/* notify default fdb entry */
if (f)
@@ -3296,9 +3297,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
+
errout:
+ /* unregister_netdevice() destroys the default FDB entry with deletion
+ * notification. But the addition notification was not sent yet, so
+ * destroy the entry by hand here.
+ */
if (f)
vxlan_fdb_destroy(vxlan, f, false);
+ if (unregister)
+ unregister_netdevice(dev);
return err;
}
@@ -3534,7 +3542,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct vxlan_rdst *dst = &vxlan->default_dst;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
- struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
@@ -3560,19 +3567,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
old_dst.remote_ifindex, 0);
if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF, &f);
+ NTF_SELF);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
}
spin_unlock_bh(&vxlan->hash_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index da607febfd82..d210b0ed59be 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2418,6 +2418,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
return 0;
}
+static int ath10k_core_compat_services(struct ath10k *ar)
+{
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ /* all 10.x firmware versions support thermal throttling but don't
+ * advertise the support via service flags so we have to hardcode
+ * it here
+ */
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@@ -2617,6 +2639,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_core_compat_services(ar);
+ if (status) {
+ ath10k_err(ar, "compat services failed: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* Some firmware revisions do not properly set up hardware rx filter
* registers.
*
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 15964b374f68..02988fc378a1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
&fops_pktlog_filter);
- debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
- &fops_quiet_period);
+ if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+ &fops_quiet_period);
debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_tpc_stats);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aa8978a8d751..fe35edcd3ec8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return;
@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
struct device *hwmon_dev;
int ret;
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return 0;
+
cdev = thermal_cooling_device_register("ath10k_thermal", ar,
&ath10k_thermal_ops);
@@ -216,6 +222,9 @@ err_cooling_destroy:
void ath10k_thermal_unregister(struct ath10k *ar)
{
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
thermal_cooling_device_unregister(ar->thermal.cdev);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 92c25f51bf86..c2cb413392ee 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
+ WMI_SERVICE_THERM_THROT,
+ WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f7badd079051..c5a343c93013 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -205,6 +205,7 @@ enum wmi_service {
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_SERVICE_THERM_THROT,
/* keep last */
WMI_SERVICE_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 2ba890445c35..1689bead1b4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -881,6 +881,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ /*
+ * This command is not supported on earlier firmware versions.
+ * Unfortunately, we don't have a TLV API flag to rely on, so
+ * rely on the major version which is in the first byte of
+ * ucode_ver.
+ */
+ if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+ return 0;
+
ret = iwl_mvm_sar_get_wgds_table(mvm);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e2addd8b878b..5d75c971004b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
- return;
+ goto exit;
}
}
+exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8e63d14c1e1c..5380fba652cc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
+ unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
+ unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- if (!tbl->rx_reorder_ptr[i])
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ if (!tbl->rx_reorder_ptr[i]) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
break;
+ }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
+
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
+ unsigned long flags;
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
return tbl;
+ }
+ }
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
- if (!memcmp(tbl->ta, ta, ETH_ALEN))
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ }
+ }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+ struct mwifiex_private *priv = ctx->priv;
+ unsigned long flags;
int i;
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
return i;
+ }
+ }
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
- unsigned long flags;
ctx->timer_is_set = false;
- spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0) {
- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+ if (seq_num < 0)
return;
- }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -651,8 +666,6 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
-
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
- unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list)
+ &priv->rx_reorder_tbl_ptr, list) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
- unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index a83c5afc256a..5ce85d5727e4 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7cbce03aa65b..aa426b838ffa 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ continue;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&mtxq->hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index f4122c8fdd97..ef9b502ce576 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rtl_c2h_fast_cmd(hw, skb)) {
rtl_c2h_content_parsing(hw, skb);
+ kfree_skb(skb);
return;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602e6171..5b97cc946d70 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- BUG_ON(pull_to <= skb_headlen(skb));
+ BUG_ON(pull_to < skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a90a9194ac4a..fed29de783e0 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1064,7 +1064,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
.regs = aer_regs,
};
- if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+ if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
&aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 53d449076dee..ea87d739f534 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
- meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+ meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+ &bit);
ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), 0);
if (ret)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 6838b38555a1..1bfb0ae6b387 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -33,7 +33,7 @@ enum {
}
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
.name = "gpio" #id, \
.pins = gpio##id##_pins, \
@@ -51,11 +51,12 @@ enum {
msm_mux_##f9 \
}, \
.nfuncs = 10, \
- .ctl_reg = base + REG_SIZE * id, \
- .io_reg = base + 0x4 + REG_SIZE * id, \
- .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
- .intr_status_reg = base + 0xc + REG_SIZE * id, \
- .intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .tile = _tile, \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -82,6 +83,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = NORTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = {
PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
- SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
- SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
- SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
};
static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 6624499eae72..4ada80317a3b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
};
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index cd160f2ec75d..bcd30e2374f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2364,7 +2364,7 @@ static int _bnx2fc_create(struct net_device *netdev,
if (!interface) {
printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
rc = -ENOMEM;
- goto ifput_err;
+ goto netdev_err;
}
if (is_vlan_dev(netdev)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b658b9a5eb1e..d0ecc729a90a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4886,10 +4886,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index a7a34e89c42d..3252efa422f9 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS
depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on HAS_DMA
depends on OF
+ depends on MEDIA_CONTROLLER_REQUEST_API
select SUNXI_SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 32adbcbe6175..07520a2ce179 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -255,10 +255,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(dev->dev, res);
- if (!dev->base) {
+ if (IS_ERR(dev->base)) {
v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(dev->base);
goto err_sram;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index c4111a98f1a7..2d26ae80e202 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- data->nr_sensors = 2;
+ data->nr_sensors = 1;
data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
data->nr_sensors, GFP_KERNEL);
@@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
return ret;
}
- ret = platform_get_irq_byname(pdev, sensor->irq_name);
+ ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 47623da0f91b..bbd73c5a4a4e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
sensor->t0 = TS1_T0_VAL1;
/* Retrieve fmt0 and put it on Hz */
- sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
- & TS1_FMT0_MASK;
+ sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
+ DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
/* Retrieve ramp coefficient */
sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
@@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
if (ret)
return ret;
+ ret = stm_thermal_read_factory_settings(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
ret = stm_thermal_calibration(sensor);
if (ret)
goto thermal_unprepare;
@@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
/* Populate sensor */
sensor->base = base;
- ret = stm_thermal_read_factory_settings(sensor);
- if (ret)
- return ret;
-
sensor->clk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(sensor->clk)) {
dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index c7601750c37f..e2eece693655 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1551,7 +1551,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
portsc_buf[port_index] = 0;
/* Bail out if a USB3 port has a new device in link training */
- if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ if ((hcd->speed >= HCD_USB3) &&
+ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a1638f2aa338..652dc36e3012 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1854,6 +1854,8 @@ struct xhci_hcd {
struct xhci_hub usb3_rhub;
/* support xHCI 1.0 spec USB2 hardware LPM */
unsigned hw_lpm_support:1;
+ /* Broken Suspend flag for SNPS Suspend resume issue */
+ unsigned broken_suspend:1;
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
@@ -1871,8 +1873,6 @@ struct xhci_hcd {
void *dbc;
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
- /* Broken Suspend flag for SNPS Suspend resume issue */
- u8 broken_suspend;
};
/* Platform specific overrides to generic XHCI hc_driver ops */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e24ff16d4147..1ce27f3ff7a7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ab11b2bee273..ad7a6f475a44 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -513,7 +513,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
struct socket *sock;
struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
- mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+ /* Try to hold the vq mutex of the paired virtqueue. We can't
+ * use mutex_lock() here since we could not guarantee a
+ * consistenet lock ordering.
+ */
+ if (!mutex_trylock(&vq->mutex))
+ return;
+
vhost_disable_notify(&net->dev, vq);
sock = rvq->private_data;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6b98d8e3a5bf..55e5aa662ad5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
{
int i;
- for (i = 0; i < d->nvqs; ++i) {
- mutex_lock(&d->vqs[i]->mutex);
+ for (i = 0; i < d->nvqs; ++i)
__vhost_vq_meta_reset(d->vqs[i]);
- mutex_unlock(&d->vqs[i]->mutex);
- }
}
static void vhost_vq_reset(struct vhost_dev *dev,
@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
@@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
int ret = 0;
mutex_lock(&dev->mutex);
+ vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) {
@@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
+ vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex);
return ret;
@@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return -EFAULT;
}
if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 678b27063198..f9ef0673a083 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- if (!data->levels) {
+ if (data->levels) {
+ /*
+ * For the DT case, only when brightness levels is defined
+ * data->levels is filled. For the non-DT case, data->levels
+ * can come from platform data, however is not usual.
+ */
+ for (i = 0; i <= data->max_brightness; i++) {
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
+
+ pb->levels = data->levels;
+ }
+ } else if (!data->max_brightness) {
+ /*
+ * If no brightness levels are provided and max_brightness is
+ * not set, use the default brightness table. For the DT case,
+ * max_brightness is set to 0 when brightness levels is not
+ * specified. For the non-DT case, max_brightness is usually
+ * set to some value.
+ */
+
+ /* Get the PWM period (in nanoseconds) */
+ pwm_get_state(pb->pwm, &state);
+
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
"failed to setup default brightness table\n");
goto err_alloc;
}
- }
- for (i = 0; i <= data->max_brightness; i++) {
- if (data->levels[i] > pb->scale)
- pb->scale = data->levels[i];
+ for (i = 0; i <= data->max_brightness; i++) {
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
- pb->levels = data->levels;
+ pb->levels = data->levels;
+ }
+ } else {
+ /*
+ * That only happens for the non-DT case, where platform data
+ * sets the max_brightness value.
+ */
+ pb->scale = data->max_brightness;
}
pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);