diff options
-rw-r--r-- | arch/s390/include/asm/qdio.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/realtek/r8169_main.c | 4 | ||||
-rw-r--r-- | drivers/net/usb/r8152.c | 184 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 75 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core.h | 52 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 410 | ||||
-rw-r--r-- | drivers/s390/net/qeth_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 12 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 9 | ||||
-rw-r--r-- | include/linux/netdevice.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/net_dropmon.h | 2 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 14 | ||||
-rw-r--r-- | net/core/skmsg.c | 3 | ||||
-rw-r--r-- | net/ipv6/ip6_input.c | 4 | ||||
-rw-r--r-- | net/rds/ib_recv.c | 11 | ||||
-rw-r--r-- | net/rds/rds.h | 9 | ||||
-rw-r--r-- | net/rds/recv.c | 22 |
23 files changed, 600 insertions, 323 deletions
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index f647d565bd6d..78e8a888306d 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -16,6 +16,7 @@ #define QDIO_MAX_QUEUES_PER_IRQ 4 #define QDIO_MAX_BUFFERS_PER_Q 128 #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) +#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK) #define QDIO_MAX_ELEMENTS_PER_BUFFER 16 #define QDIO_SBAL_SIZE 256 @@ -359,7 +360,7 @@ struct qdio_initialize { qdio_handler_t *output_handler; void (**queue_start_poll_array) (struct ccw_device *, int, unsigned long); - int scan_threshold; + unsigned int scan_threshold; unsigned long int_parm; struct qdio_buffer **input_sbal_addr_array; struct qdio_buffer **output_sbal_addr_array; @@ -416,6 +417,9 @@ extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, extern int qdio_start_irq(struct ccw_device *, int); extern int qdio_stop_irq(struct ccw_device *, int); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); +extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, + bool is_input, unsigned int *bufnr, + unsigned int *error); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4c790ffa1a73..b9ad43d3dc51 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4985,6 +4985,7 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) struct hwrm_vnic_qcaps_input req = {0}; int rc; + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); if (bp->hwrm_spec_code < 0x10600) return 0; @@ -5004,8 +5005,6 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (bp->max_tpa_v2) bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats_ext); - else - bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 9d64c4304e5e..dde17be33767 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -364,6 +364,50 @@ static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { HCLGE_OPC_DFX_SSU_REG_2 }; +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7c28933e6f3d..7ff03b9605e4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -539,50 +539,6 @@ struct key_info { u8 key_length; /* use bit as unit */ }; -static const struct key_info meta_data_key_info[] = { - { PACKET_TYPE_ID, 6}, - { IP_FRAGEMENT, 1}, - { ROCE_TYPE, 1}, - { NEXT_KEY, 5}, - { VLAN_NUMBER, 2}, - { SRC_VPORT, 12}, - { DST_VPORT, 12}, - { TUNNEL_PACKET, 1}, -}; - -static const struct key_info tuple_key_info[] = { - { OUTER_DST_MAC, 48}, - { OUTER_SRC_MAC, 48}, - { OUTER_VLAN_TAG_FST, 16}, - { OUTER_VLAN_TAG_SEC, 16}, - { OUTER_ETH_TYPE, 16}, - { OUTER_L2_RSV, 16}, - { OUTER_IP_TOS, 8}, - { OUTER_IP_PROTO, 8}, - { OUTER_SRC_IP, 32}, - { OUTER_DST_IP, 32}, - { OUTER_L3_RSV, 16}, - { OUTER_SRC_PORT, 16}, - { OUTER_DST_PORT, 16}, - { OUTER_L4_RSV, 32}, - { OUTER_TUN_VNI, 24}, - { OUTER_TUN_FLOW_ID, 8}, - { INNER_DST_MAC, 48}, - { INNER_SRC_MAC, 48}, - { INNER_VLAN_TAG_FST, 16}, - { INNER_VLAN_TAG_SEC, 16}, - { INNER_ETH_TYPE, 16}, - { INNER_L2_RSV, 16}, - { INNER_IP_TOS, 8}, - { INNER_IP_PROTO, 8}, - { INNER_SRC_IP, 32}, - { INNER_DST_IP, 32}, - { INNER_L3_RSV, 16}, - { INNER_SRC_PORT, 16}, - { INNER_DST_PORT, 16}, - { INNER_L4_RSV, 32}, -}; - #define MAX_KEY_LENGTH 400 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c index cf08d02703fb..583dc7e2aca8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c @@ -12,7 +12,7 @@ static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, int bytes_returned; int block_id; - if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len % HV_CONFIG_BLOCK_SIZE_MAX) + if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX) return -EINVAL; block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX; @@ -25,8 +25,8 @@ static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, HV_CONFIG_BLOCK_SIZE_MAX, block_id); /* Make sure len bytes were read successfully */ - if (read) - rc |= !(len == bytes_returned); + if (read && !rc && len != bytes_returned) + rc = -EIO; if (rc) { mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n", diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 910944120a65..6182e7d33e01 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5822,6 +5822,10 @@ process_pkt: skb->tail += pkt_size; skb->len = pkt_size; + dma_sync_single_for_device(tp_to_dev(tp), + le64_to_cpu(desc->addr), + pkt_size, DMA_FROM_DEVICE); + rtl8169_rx_csum(skb, status); skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 1aa61610f0bb..17f0e9e98697 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -751,6 +751,7 @@ struct r8152 { atomic_t rx_count; + bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; @@ -762,6 +763,7 @@ struct r8152 { u16 ocp_base; u16 speed; + u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; @@ -3200,10 +3202,76 @@ static void r8152_eee_en(struct r8152 *tp, bool enable) ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } -static void r8152b_enable_eee(struct r8152 *tp) +static void r8153_eee_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + u16 config; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + config = ocp_reg_read(tp, OCP_EEE_CFG); + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config |= EEE10_EN; + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config &= ~EEE10_EN; + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CFG, config); +} + +static void r8153b_eee_en(struct r8152 *tp, bool enable) +{ + r8153_eee_en(tp, enable); + + if (enable) + r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); + else + r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); +} + +static void rtl_eee_enable(struct r8152 *tp, bool enable) { - r8152_eee_en(tp, true); - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_07: + if (enable) { + r8152_eee_en(tp, true); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, + tp->eee_adv); + } else { + r8152_eee_en(tp, false); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); + } + break; + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + if (enable) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; + case RTL_VER_08: + case RTL_VER_09: + if (enable) { + r8153b_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8153b_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; + default: + break; + } } static void r8152b_enable_fc(struct r8152 *tp) @@ -3224,7 +3292,7 @@ static void rtl8152_disable(struct r8152 *tp) static void r8152b_hw_phy_cfg(struct r8152 *tp) { - r8152b_enable_eee(tp); + rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3418,36 +3486,6 @@ static void r8153b_aldps_en(struct r8152 *tp, bool enable) r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS); } -static void r8153_eee_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - u16 config; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - config = ocp_reg_read(tp, OCP_EEE_CFG); - - if (enable) { - ocp_data |= EEE_RX_EN | EEE_TX_EN; - config |= EEE10_EN; - } else { - ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); - config &= ~EEE10_EN; - } - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CFG, config); -} - -static void r8153b_eee_en(struct r8152 *tp, bool enable) -{ - r8153_eee_en(tp, enable); - - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); -} - static void r8153b_enable_fc(struct r8152 *tp) { r8152b_enable_fc(tp); @@ -3463,8 +3501,7 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ - r8153_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); @@ -3495,8 +3532,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); - r8153_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3536,8 +3573,7 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153b_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ - r8153b_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false); r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); @@ -3599,8 +3635,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153b_ups_flags_w1w0(tp, ups_flags, 0); - r8153b_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true); r8153b_aldps_en(tp, true); r8153b_enable_fc(tp); @@ -4891,7 +4927,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); @@ -4903,13 +4939,10 @@ static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp; return 0; @@ -4919,19 +4952,17 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee) { u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - r8152_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; + tp->eee_en = eee->eee_enabled; + tp->eee_adv = val; - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + rtl_eee_enable(tp, tp->eee_en); return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); @@ -4943,46 +4974,15 @@ static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = ocp_reg_read(tp, OCP_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp; return 0; } -static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - -static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153b_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) { @@ -5353,6 +5353,8 @@ static int rtl_ops_init(struct r8152 *tp) ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; tp->rx_buf_sz = 16 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: @@ -5366,11 +5368,13 @@ static int rtl_ops_init(struct r8152 *tp) ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: @@ -5382,11 +5386,13 @@ static int rtl_ops_init(struct r8152 *tp) ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153b_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; default: diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a06944399865..a58b45df95d7 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -206,8 +206,6 @@ struct qdio_output_q { struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; - /* used SBALs before tasklet schedule */ - int scan_threshold; }; /* @@ -295,6 +293,7 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + unsigned int scan_threshold; /* used SBALs before tasklet schedule */ int perf_stat_enabled; struct qdr *qdr; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4142c85e77d8..5b63c505a2f7 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); - if (q->u.out.use_cq) - qdio_handle_aobs(q, start, count); } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, @@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) count = get_outbound_buffer_frontier(q, start); - if (count) + if (count) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); + if (q->u.out.use_cq) + qdio_handle_aobs(q, start, count); + } return count; } @@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) struct qdio_q *out; int i; - if (!pci_out_supported(irq)) + if (!pci_out_supported(irq) || !irq->scan_threshold) return; for_each_output_queue(irq, out, i) @@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!pci_out_supported(irq_ptr)) + if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) return; for_each_output_queue(irq_ptr, q, i) { @@ -1527,6 +1528,7 @@ set: static int handle_outbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { + const unsigned int scan_threshold = q->irq_ptr->scan_threshold; unsigned char state = 0; int used, rc = 0; @@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, rc = qdio_kick_outbound_q(q, 0); } + /* Let drivers implement their own completion scanning: */ + if (!scan_threshold) + return rc; + /* in case of SIGA errors we must process the error immediately */ - if (used >= q->u.out.scan_threshold || rc) + if (used >= scan_threshold || rc) qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ @@ -1655,6 +1661,44 @@ rescan: } EXPORT_SYMBOL(qdio_start_irq); +static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, + unsigned int *error) +{ + unsigned int start = q->first_to_check; + int count; + + count = q->is_input_q ? qdio_inbound_q_moved(q, start) : + qdio_outbound_q_moved(q, start); + if (count == 0) + return 0; + + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_check = add_buf(start, count); + q->qdio_error = 0; + + return count; +} + +int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + + if (!irq_ptr) + return -ENODEV; + q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; + + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + + return __qdio_inspect_queue(q, bufnr, error); +} +EXPORT_SYMBOL_GPL(qdio_inspect_queue); + /** * qdio_get_next_buffers - process input buffers * @cdev: associated ccw_device for the qdio subchannel @@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, { struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; - unsigned int start; - int count; if (!irq_ptr) return -ENODEV; q = irq_ptr->input_qs[nr]; - start = q->first_to_check; /* * Cannot rely on automatic sync after interrupt since queues may @@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, qdio_check_outbound_pci_queues(irq_ptr); - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return 0; - - start = add_buf(start, count); - q->first_to_check = start; - /* Note: upper-layer MUST stop processing immediately here ... */ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return -EIO; - *bufnr = q->first_to_kick; - *error = q->qdio_error; - - /* for the next time */ - q->first_to_kick = add_buf(q->first_to_kick, count); - q->qdio_error = 0; - - return count; + return __qdio_inspect_queue(q, bufnr, error); } EXPORT_SYMBOL(qdio_get_next_buffers); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index d4101cecdc8d..f4ca1d29d61b 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; q->is_input_q = 0; - q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; irq_ptr->cdev = init_data->cdev; + irq_ptr->scan_threshold = init_data->scan_threshold; ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 72755a025b4d..e4b55f9aa062 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -22,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> @@ -30,6 +31,7 @@ #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/addrconf.h> +#include <net/sch_generic.h> #include <net/tcp.h> #include <asm/debug.h> @@ -376,6 +378,28 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ +static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1, + struct qeth_hdr_layer2 *h2) +{ + return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) && + ipv6_addr_equal(&h1->next_hop.ipv6_addr, + &h2->next_hop.ipv6_addr); +} + enum qeth_qdio_info_states { QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED, @@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; int next_element_to_fill; + unsigned int bytes; struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; @@ -473,6 +498,8 @@ struct qeth_out_q_stats { u64 tso_bytes; u64 packing_mode_switch; u64 stopped; + u64 completion_yield; + u64 completion_timer; /* rtnl_link_stats64 */ u64 tx_packets; @@ -481,6 +508,8 @@ struct qeth_out_q_stats { u64 tx_dropped; }; +#define QETH_TX_TIMER_USECS 500 + struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; @@ -499,13 +528,36 @@ struct qeth_qdio_out_q { atomic_t used_buffers; /* indicates whether PCI flag must be set (or if one is outstanding) */ atomic_t set_pci_flags_count; + struct napi_struct napi; + struct timer_list timer; + struct qeth_hdr *prev_hdr; + u8 bulk_start; }; +#define qeth_for_each_output_queue(card, q, i) \ + for (i = 0; i < card->qdio.no_out_queues && \ + (q = card->qdio.out_qs[i]); i++) + +#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) + +static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue) +{ + if (timer_pending(&queue->timer)) + return; + mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) + + jiffies); +} + static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) { return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; } +static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) == 0; +} + struct qeth_qdio_info { atomic_t state; /* input */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 44fbaa4f7264..8b4ea5f2832b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -71,7 +71,8 @@ static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static void qeth_close_dev_handler(struct work_struct *work) @@ -411,7 +412,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, /* release here to avoid interleaving between outbound tasklet and inbound tasklet regarding notifications and lifecycle */ - qeth_release_skbs(c); + qeth_tx_complete_buf(c, forced_cleanup, 0); c = f->next_pending; WARN_ON_ONCE(head->next_pending != f); @@ -1077,22 +1078,52 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, } } -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget) { + struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); + qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); - while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) - consume_skb(skb); + /* Empty buffer? */ + if (buf->next_element_to_fill == 0) + return; + + QETH_TXQ_STAT_INC(queue, bufs); + QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); + while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { + unsigned int bytes = qdisc_pkt_len(skb); + bool is_tso = skb_is_gso(skb); + unsigned int packets; + + packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; + if (error) { + QETH_TXQ_STAT_ADD(queue, tx_errors, packets); + } else { + QETH_TXQ_STAT_ADD(queue, tx_packets, packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes); + if (skb->ip_summed == CHECKSUM_PARTIAL) + QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); + if (skb_is_nonlinear(skb)) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); + } + } + + napi_consume_skb(skb, budget); + } } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) + struct qeth_qdio_out_buffer *buf, + bool error, int budget) { int i; @@ -1100,7 +1131,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - qeth_release_skbs(buf); + qeth_tx_complete_buf(buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) @@ -1111,6 +1142,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); buf->next_element_to_fill = 0; + buf->bytes = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } @@ -1122,7 +1154,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j]); + qeth_clear_output_buffer(q, q->bufs[j], true, 0); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -2255,6 +2287,14 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) return q; } +static void qeth_tx_completion_timer(struct timer_list *timer) +{ + struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); + + napi_schedule(&queue->napi); + QETH_TXQ_STAT_INC(queue, completion_timer); +} + static int qeth_alloc_qdio_queues(struct qeth_card *card) { int i, j; @@ -2276,17 +2316,22 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) /* outbound */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = qeth_alloc_output_queue(); - if (!card->qdio.out_qs[i]) + struct qeth_qdio_out_q *queue; + + queue = qeth_alloc_output_queue(); + if (!queue) goto out_freeoutq; QETH_CARD_TEXT_(card, 2, "outq %i", i); - QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *)); - card->qdio.out_qs[i]->card = card; - card->qdio.out_qs[i]->queue_no = i; + QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); + card->qdio.out_qs[i] = queue; + queue->card = card; + queue->queue_no = i; + timer_setup(&queue->timer, qeth_tx_completion_timer, 0); + /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); - if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + WARN_ON(queue->bufs[j]); + if (qeth_init_qdio_out_buf(queue, j)) goto out_freeoutqbufs; } } @@ -2626,9 +2671,12 @@ int qeth_init_qdio_queues(struct qeth_card *card) queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); queue->next_buf_to_fill = 0; queue->do_pack = 0; + queue->prev_hdr = NULL; + queue->bulk_start = 0; atomic_set(&queue->used_buffers, 0); atomic_set(&queue->set_pci_flags_count, 0); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); } return 0; } @@ -3197,6 +3245,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { + struct qeth_card *card = queue->card; struct qeth_qdio_out_buffer *buf; int rc; int i; @@ -3240,14 +3289,17 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } - QETH_TXQ_STAT_ADD(queue, bufs, count); qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); + + /* Fake the TX completion interrupt: */ + if (IS_IQD(card)) + napi_schedule(&queue->napi); + if (rc) { - QETH_TXQ_STAT_ADD(queue, tx_errors, count); /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; @@ -3264,6 +3316,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } +static void qeth_flush_queue(struct qeth_qdio_out_q *queue) +{ + qeth_flush_buffers(queue, queue->bulk_start, 1); + + queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1); + queue->prev_hdr = NULL; +} + static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; @@ -3425,48 +3485,12 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; qeth_handle_send_error(card, buffer, qdio_error); - - if (queue->bufstates && - (queue->bufstates[bidx].flags & - QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { - WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); - - if (atomic_cmpxchg(&buffer->state, - QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_PENDING) == - QETH_QDIO_BUF_PRIMED) { - qeth_notify_skbs(queue, buffer, - TX_NOTIFY_PENDING); - } - QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - - /* prepare the queue slot for re-use: */ - qeth_scrub_qdio_buffer(buffer->buffer, - queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { - QETH_CARD_TEXT(card, 2, "outofbuf"); - qeth_schedule_recovery(card); - } - } else { - if (card->options.cq == QETH_CQ_ENABLED) { - enum iucv_tx_notify n; - - n = qeth_compute_cq_notification( - buffer->buffer->element[15].sflags, 0); - qeth_notify_skbs(queue, buffer, n); - } - - qeth_clear_output_buffer(queue, buffer); - } - qeth_cleanup_handled_pending(queue, bidx, 0); + qeth_clear_output_buffer(queue, buffer, qdio_error, 0); } + atomic_sub(count, &queue->used_buffers); - /* check if we need to do something on this outbound queue */ - if (!IS_IQD(card)) - qeth_check_outbound_queue(queue); + qeth_check_outbound_queue(queue); - if (IS_IQD(card)) - __queue = qeth_iqd_translate_txq(dev, __queue); txq = netdev_get_tx_queue(dev, __queue); /* xmit may have observed the full-condition, but not yet stopped the * txq. In which case the code below won't trigger. So before returning, @@ -3655,9 +3679,32 @@ check_layout: return 0; } -static void __qeth_fill_buffer(struct sk_buff *skb, - struct qeth_qdio_out_buffer *buf, - bool is_first_elem, unsigned int offset) +static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buffer, + struct sk_buff *curr_skb, + struct qeth_hdr *curr_hdr) +{ + struct qeth_hdr *prev_hdr = queue->prev_hdr; + + if (!prev_hdr) + return true; + + /* All packets must have the same target: */ + if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); + + return ether_addr_equal(eth_hdr(prev_skb)->h_dest, + eth_hdr(curr_skb)->h_dest) && + qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); + } + + return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && + qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); +} + +static unsigned int __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) { struct qdio_buffer *buffer = buf->buffer; int element = buf->next_element_to_fill; @@ -3714,24 +3761,21 @@ static void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; buf->next_element_to_fill = element; + return element; } /** * qeth_fill_buffer() - map skb into an output buffer - * @queue: QDIO queue to submit the buffer on * @buf: buffer to transport the skb * @skb: skb to map into the buffer * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated * from qeth_core_header_cache. * @offset: when mapping the skb, start at skb->data + offset * @hd_len: if > 0, build a dedicated header element of this size - * flush: Prepare the buffer to be flushed, regardless of its fill level. */ -static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len, - bool flush) +static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; @@ -3751,35 +3795,22 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buf, is_first_elem, offset); - - if (!queue->do_pack) { - QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); - } else { - QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); - - QETH_TXQ_STAT_INC(queue, skbs_pack); - /* If the buffer still has free elements, keep using it. */ - if (!flush && - buf->next_element_to_fill < queue->max_elements) - return 0; - } - - /* flush out the buffer */ - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); - queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % - QDIO_MAX_BUFFERS_PER_Q; - return 1; + return __qeth_fill_buffer(skb, buf, is_first_elem, offset); } -static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len) +static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, unsigned int elements, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) { - int index = queue->next_buf_to_fill; - struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; + unsigned int bytes = qdisc_pkt_len(skb); + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; + bool flush; + + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); /* Just a sanity check, the wake/stop logic should ensure that we always * get a free buffer. @@ -3787,9 +3818,19 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); + if ((buffer->next_element_to_fill + elements > queue->max_elements) || + !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + qeth_flush_queue(queue); + buffer = queue->bufs[queue->bulk_start]; + + /* Sanity-check again: */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + } - if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { /* If a TX completion happens right _here_ and misses to wake * the txq, then our re-check below will catch the race. */ @@ -3798,8 +3839,17 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, stopped = true; } - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); - qeth_flush_buffers(queue, index, 1); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + buffer->bytes += bytes; + queue->prev_hdr = hdr; + + flush = __netdev_tx_sent_queue(txq, bytes, + !stopped && netdev_xmit_more()); + + if (flush || next_element >= queue->max_elements) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + qeth_flush_queue(queue); + } if (stopped && !qeth_out_queue_is_full(queue)) netif_tx_start_queue(txq); @@ -3812,6 +3862,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed) { struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; int start_index; @@ -3874,8 +3925,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, stopped = true; } - flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, - stopped); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + + if (queue->do_pack) + QETH_TXQ_STAT_INC(queue, skbs_pack); + if (!queue->do_pack || stopped || next_element >= queue->max_elements) { + flush_count++; + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + } + if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -3942,7 +4002,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, unsigned int hd_len = 0; unsigned int elements; int push_len, rc; - bool is_sg; if (is_tso) { hw_hdr_len = sizeof(struct qeth_hdr_tso); @@ -3971,10 +4030,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, frame_len - proto_len, skb, proto_len); - is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - hd_len); + rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, + hd_len); } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); @@ -3982,18 +4040,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, hd_len, elements); } - if (!rc) { - QETH_TXQ_STAT_ADD(queue, buf_elements, elements); - if (is_sg) - QETH_TXQ_STAT_INC(queue, skbs_sg); - if (is_tso) { - QETH_TXQ_STAT_INC(queue, skbs_tso); - QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); - } - } else { - if (!push_len) - kmem_cache_free(qeth_core_header_cache, hdr); - } + if (rc && !push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; } EXPORT_SYMBOL_GPL(qeth_xmit); @@ -4724,7 +4773,7 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; - init_data.scan_threshold = IS_IQD(card) ? 1 : 32; + init_data.scan_threshold = IS_IQD(card) ? 0 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -5138,6 +5187,107 @@ out: } EXPORT_SYMBOL_GPL(qeth_poll); +static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, + unsigned int bidx, bool error, int budget) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; + u8 sflags = buffer->buffer->element[15].sflags; + struct qeth_card *card = queue->card; + + if (queue->bufstates && (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING)) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING) == + QETH_QDIO_BUF_PRIMED) + qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); + + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + + return; + } + + if (card->options.cq == QETH_CQ_ENABLED) + qeth_notify_skbs(queue, buffer, + qeth_compute_cq_notification(sflags, 0)); + qeth_clear_output_buffer(queue, buffer, error, budget); +} + +static int qeth_tx_poll(struct napi_struct *napi, int budget) +{ + struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); + unsigned int queue_no = queue->queue_no; + struct qeth_card *card = queue->card; + struct net_device *dev = card->dev; + unsigned int work_done = 0; + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + + while (1) { + unsigned int start, error, i; + unsigned int packets = 0; + unsigned int bytes = 0; + int completed; + + if (qeth_out_queue_is_empty(queue)) { + napi_complete(napi); + return 0; + } + + /* Give the CPU a breather: */ + if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { + QETH_TXQ_STAT_INC(queue, completion_yield); + if (napi_complete_done(napi, 0)) + napi_schedule(napi); + return 0; + } + + completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, + &start, &error); + if (completed <= 0) { + /* Ensure we see TX completion for pending work: */ + if (napi_complete_done(napi, 0)) + qeth_tx_arm_timer(queue); + return 0; + } + + for (i = start; i < start + completed; i++) { + struct qeth_qdio_out_buffer *buffer; + unsigned int bidx = QDIO_BUFNR(i); + + buffer = queue->bufs[bidx]; + packets += skb_queue_len(&buffer->skb_list); + bytes += buffer->bytes; + + qeth_handle_send_error(card, buffer, error); + qeth_iqd_tx_complete(queue, bidx, error, budget); + qeth_cleanup_handled_pending(queue, bidx, false); + } + + netdev_tx_completed_queue(txq, packets, bytes); + atomic_sub(completed, &queue->used_buffers); + work_done += completed; + + /* xmit may have observed the full-condition, but not yet + * stopped the txq. In which case the code below won't trigger. + * So before returning, xmit will re-check the txq's fill level + * and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && + !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); + } +} + static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) { if (!cmd->hdr.return_code) @@ -6084,6 +6234,17 @@ int qeth_open(struct net_device *dev) napi_enable(&card->napi); local_bh_disable(); napi_schedule(&card->napi); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + qeth_for_each_output_queue(card, queue, i) { + netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, + QETH_NAPI_WEIGHT); + napi_enable(&queue->napi); + napi_schedule(&queue->napi); + } + } /* kick-start the NAPI softirq: */ local_bh_enable(); return 0; @@ -6095,7 +6256,26 @@ int qeth_stop(struct net_device *dev) struct qeth_card *card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "qethstop"); - netif_tx_disable(dev); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + /* Quiesce the NAPI instances: */ + qeth_for_each_output_queue(card, queue, i) { + napi_disable(&queue->napi); + del_timer_sync(&queue->timer); + } + + /* Stop .ndo_start_xmit, might still access queue->napi. */ + netif_tx_disable(dev); + + /* Queues may get re-allocated, so remove the NAPIs here. */ + qeth_for_each_output_queue(card, queue, i) + netif_napi_del(&queue->napi); + } else { + netif_tx_disable(dev); + } + napi_disable(&card->napi); return 0; } diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 4166eb29f0bd..096698df3886 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Queue stopped", stopped), + QETH_TXQ_STAT("Completion yield", completion_yield), + QETH_TXQ_STAT("Completion timer", completion_timer), }; static const struct qeth_stats card_stats[] = { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 662bd51f922f..b8799cd3e7aa 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; } else { hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; - if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->ip_summed == CHECKSUM_PARTIAL) qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - QETH_TXQ_STAT_INC(queue, skbs_csum); - } } /* set byte byte 3 to casting flags */ @@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct qeth_card *card = dev->ml_priv; u16 txq = skb_get_queue_mapping(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) txq = qeth_iqd_translate_txq(dev, txq); queue = card->qdio.out_qs[txq]; @@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), qeth_l2_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 54799fe6a700..d7bfc7a0e4c0 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1957,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, /* some HW requires combined L3+L4 csum offload: */ if (ipv == 4) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -2044,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, u16 txq = skb_get_queue_mapping(skb); int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) { queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; @@ -2069,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, else rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 55ac223553f8..b5d28dadf964 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3272,7 +3272,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, */ smp_mb(); - if (dql_avail(&dev_queue->dql) < 0) + if (unlikely(dql_avail(&dev_queue->dql) < 0)) return; if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 75a35dccb675..8bf79a9eb234 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -75,7 +75,7 @@ enum net_dm_attr { NET_DM_ATTR_PC, /* u64 */ NET_DM_ATTR_SYMBOL, /* string */ NET_DM_ATTR_IN_PORT, /* nested */ - NET_DM_ATTR_TIMESTAMP, /* struct timespec */ + NET_DM_ATTR_TIMESTAMP, /* u64 */ NET_DM_ATTR_PROTO, /* u16 */ NET_DM_ATTR_PAYLOAD, /* binary */ NET_DM_ATTR_PAD, diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index bfc024024aa3..cc60cc22e2db 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -552,7 +552,7 @@ static size_t net_dm_packet_report_size(size_t payload_len) /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ - nla_total_size(sizeof(struct timespec)) + + nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ @@ -592,7 +592,6 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc; char buf[NET_DM_MAX_SYMBOL_LEN]; struct nlattr *attr; - struct timespec ts; void *hdr; int rc; @@ -615,8 +614,8 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, if (rc) goto nla_put_failure; - if (ktime_to_timespec_cond(skb->tstamp, &ts) && - nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, + ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) @@ -716,7 +715,7 @@ net_dm_hw_packet_report_size(size_t payload_len, /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ - nla_total_size(sizeof(struct timespec)) + + nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ @@ -730,7 +729,6 @@ static int net_dm_hw_packet_report_fill(struct sk_buff *msg, { struct net_dm_hw_metadata *hw_metadata; struct nlattr *attr; - struct timespec ts; void *hdr; hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; @@ -761,8 +759,8 @@ static int net_dm_hw_packet_report_fill(struct sk_buff *msg, goto nla_put_failure; } - if (ktime_to_timespec_cond(skb->tstamp, &ts) && - nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, + ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6832eeb4b785..cf390e0aa73d 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -190,8 +190,7 @@ static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, sk_msg_check_to_free(msg, i, msg->sg.size); sge = sk_msg_elem(msg, i); } - if (msg->skb) - consume_skb(msg->skb); + consume_skb(msg->skb); sk_msg_init(msg); return freed; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index fa014d5f1732..d432d0011c16 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head) { struct sk_buff *skb, *next; - list_for_each_entry_safe(skb, next, head, list) + list_for_each_entry_safe(skb, next, head, list) { + skb_list_del_init(skb); dst_input(skb); + } } static void ip6_list_rcv_finish(struct net *net, struct sock *sk, diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 1a8a4a760b84..a0f99bbf362c 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -1048,9 +1048,14 @@ int rds_ib_recv_init(void) si_meminfo(&si); rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; - rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", - sizeof(struct rds_ib_incoming), - 0, SLAB_HWCACHE_ALIGN, NULL); + rds_ib_incoming_slab = + kmem_cache_create_usercopy("rds_ib_incoming", + sizeof(struct rds_ib_incoming), + 0, SLAB_HWCACHE_ALIGN, + offsetof(struct rds_ib_incoming, + ii_inc.i_usercopy), + sizeof(struct rds_inc_usercopy), + NULL); if (!rds_ib_incoming_slab) goto out; diff --git a/net/rds/rds.h b/net/rds/rds.h index ad605fd61655..53e86911773a 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -271,6 +271,12 @@ struct rds_ext_header_rdma_dest { #define RDS_MSG_RX_END 2 #define RDS_MSG_RX_CMSG 3 +/* The following values are whitelisted for usercopy */ +struct rds_inc_usercopy { + rds_rdma_cookie_t rdma_cookie; + ktime_t rx_tstamp; +}; + struct rds_incoming { refcount_t i_refcount; struct list_head i_item; @@ -280,8 +286,7 @@ struct rds_incoming { unsigned long i_rx_jiffies; struct in6_addr i_saddr; - rds_rdma_cookie_t i_rdma_cookie; - ktime_t i_rx_tstamp; + struct rds_inc_usercopy i_usercopy; u64 i_rx_lat_trace[RDS_RX_MAX_TRACES]; }; diff --git a/net/rds/recv.c b/net/rds/recv.c index 853de4876088..7e451c82595b 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -47,8 +47,8 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, INIT_LIST_HEAD(&inc->i_item); inc->i_conn = conn; inc->i_saddr = *saddr; - inc->i_rdma_cookie = 0; - inc->i_rx_tstamp = ktime_set(0, 0); + inc->i_usercopy.rdma_cookie = 0; + inc->i_usercopy.rx_tstamp = ktime_set(0, 0); memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace)); } @@ -62,8 +62,8 @@ void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp, inc->i_conn = cp->cp_conn; inc->i_conn_path = cp; inc->i_saddr = *saddr; - inc->i_rdma_cookie = 0; - inc->i_rx_tstamp = ktime_set(0, 0); + inc->i_usercopy.rdma_cookie = 0; + inc->i_usercopy.rx_tstamp = ktime_set(0, 0); } EXPORT_SYMBOL_GPL(rds_inc_path_init); @@ -186,7 +186,7 @@ static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock case RDS_EXTHDR_RDMA_DEST: /* We ignore the size for now. We could stash it * somewhere and use it for error checking. */ - inc->i_rdma_cookie = rds_rdma_make_cookie( + inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie( be32_to_cpu(buffer.rdma_dest.h_rdma_rkey), be32_to_cpu(buffer.rdma_dest.h_rdma_offset)); @@ -380,7 +380,7 @@ void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, be32_to_cpu(inc->i_hdr.h_len), inc->i_hdr.h_dport); if (sock_flag(sk, SOCK_RCVTSTAMP)) - inc->i_rx_tstamp = ktime_get_real(); + inc->i_usercopy.rx_tstamp = ktime_get_real(); rds_inc_addref(inc); inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); list_add_tail(&inc->i_item, &rs->rs_recv_queue); @@ -540,16 +540,18 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, { int ret = 0; - if (inc->i_rdma_cookie) { + if (inc->i_usercopy.rdma_cookie) { ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, - sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie); + sizeof(inc->i_usercopy.rdma_cookie), + &inc->i_usercopy.rdma_cookie); if (ret) goto out; } - if ((inc->i_rx_tstamp != 0) && + if ((inc->i_usercopy.rx_tstamp != 0) && sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) { - struct __kernel_old_timeval tv = ns_to_kernel_old_timeval(inc->i_rx_tstamp); + struct __kernel_old_timeval tv = + ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp); if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) { ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, |