diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox')
44 files changed, 8212 insertions, 636 deletions
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 8cf7563a8d92..52a6665b7abf 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -7,9 +7,7 @@ config NET_VENDOR_MELLANOX default y depends on PCI ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. + If you have a network (Ethernet) card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 529ef0594b90..82040137d7d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -49,6 +49,7 @@ #include "mlx4.h" #include "fw.h" #include "fw_qos.h" +#include "mlx4_stats.h" #define CMD_POLL_TOKEN 0xffff #define INBOX_MASK 0xffffffffffffff00ULL @@ -882,7 +883,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, { struct ib_smp *smp = inbox->buf; u32 index; - u8 port; + u8 port, slave_port; u8 opcode_modifier; u16 *table; int err; @@ -894,7 +895,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, __be32 slave_cap_mask; __be64 slave_node_guid; - port = vhcr->in_modifier; + slave_port = vhcr->in_modifier; + port = mlx4_slave_convert_port(dev, slave, slave_port); /* network-view bit is for driver use only, and should not be passed to FW */ opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ @@ -930,8 +932,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { /*get the slave specific caps:*/ /*do the command */ + smp->attr_mod = cpu_to_be32(port); err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, - vhcr->in_modifier, opcode_modifier, + port, opcode_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); /* modify the response for slaves */ if (!err && slave != mlx4_master_func_num(dev)) { @@ -975,7 +978,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, } if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, - vhcr->in_modifier, opcode_modifier, + port, opcode_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (!err) { slave_node_guid = mlx4_get_slave_node_guid(dev, slave); @@ -2915,7 +2918,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; - mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; } @@ -3164,6 +3167,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat } EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); +int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, + struct mlx4_counter *counter_stats, int reset) +{ + struct mlx4_cmd_mailbox *mailbox = NULL; + struct mlx4_counter *tmp_counter; + int err; + u32 if_stat_in_mod; + + if (!counter_stats) + return -EINVAL; + + if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memset(mailbox->buf, 0, sizeof(struct mlx4_counter)); + if_stat_in_mod = counter_index; + if (reset) + if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET; + err = mlx4_cmd_box(dev, 0, mailbox->dma, + if_stat_in_mod, 0, + MLX4_CMD_QUERY_IF_STAT, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n", + __func__, counter_index); + goto if_stat_out; + } + tmp_counter = (struct mlx4_counter *)mailbox->buf; + counter_stats->counter_mode = tmp_counter->counter_mode; + if (counter_stats->counter_mode == 0) { + counter_stats->rx_frames = + cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) + + be64_to_cpu(tmp_counter->rx_frames)); + counter_stats->tx_frames = + cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) + + be64_to_cpu(tmp_counter->tx_frames)); + counter_stats->rx_bytes = + cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) + + be64_to_cpu(tmp_counter->rx_bytes)); + counter_stats->tx_bytes = + cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) + + be64_to_cpu(tmp_counter->tx_bytes)); + } + +if_stat_out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_get_counter_stats); + +int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, + struct ifla_vf_stats *vf_stats) +{ + struct mlx4_counter tmp_vf_stats; + int slave; + int err = 0; + + if (!vf_stats) + return -EINVAL; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf_idx); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats); + if (!err && tmp_vf_stats.counter_mode == 0) { + vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames); + vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames); + vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes); + vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes); + } + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_get_vf_stats); + int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -3197,6 +3286,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, int enabled) { struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); if (slave == mlx4_master_func_num(dev)) return 0; @@ -3206,6 +3301,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, enabled < 0 || enabled > 1) return -EINVAL; + if (min_port == max_port && dev->caps.num_ports > 1) { + mlx4_info(dev, "SMI access disallowed for single ported VFs\n"); + return -EPROTONOSUPPORT; + } + priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index e71f31387ac6..3348e646db70 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, u64 mtt_addr; int err; - if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) + if (vector >= dev->caps.num_comp_vectors) return -EINVAL; cq->vector = vector; @@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq_context->flags |= cpu_to_be32(1 << 19); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); - cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; + cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); @@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, init_completion(&cq->free); cq->comp = mlx4_add_cq_to_tasklet; cq->tasklet_ctx.priv = - &priv->eq_table.eq[cq->vector].tasklet_ctx; + &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx; INIT_LIST_HEAD(&cq->tasklet_ctx.list); - cq->irq = priv->eq_table.eq[cq->vector].irq; + cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; return 0; err_radix: @@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); - synchronize_irq(priv->eq_table.eq[cq->vector].irq); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); + if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != + priv->eq_table.eq[MLX4_EQ_ASYNC].irq) + synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 22da4d0d0f05..63769df872a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->ring = ring; cq->is_tx = mode; + cq->vector = mdev->dev->caps.num_comp_vectors; /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. @@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int err = 0; char name[25]; int timestamp_en = 0; - struct cpu_rmap *rmap = -#ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap; -#else - NULL; -#endif + bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; @@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, memset(cq->buf, 0, cq->buf_size); if (cq->is_tx == RX) { - if (mdev->dev->caps.comp_pool) { - if (!cq->vector) { - sprintf(name, "%s-%d", priv->dev->name, - cq->ring); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(mdev->dev, name, rmap, - &cq->vector)) { - cq->vector = (cq->ring + 1 + priv->port) - % mdev->dev->caps.num_comp_vectors; - mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", - name); - } - + if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, + cq->vector)) { + cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); + + err = mlx4_assign_eq(mdev->dev, priv->port, + &cq->vector); + if (err) { + mlx4_err(mdev, "Failed assigning an EQ to %s\n", + name); + goto free_eq; } - } else { - cq->vector = (cq->ring + 1 + priv->port) % - mdev->dev->caps.num_comp_vectors; + + assigned_eq = true; } cq->irq_desc = @@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, 0, timestamp_en); if (err) - return err; + goto free_eq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; @@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); } else { - struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; - - err = irq_set_affinity_hint(cq->mcq.irq, - ring->affinity_mask); - if (err) - mlx4_warn(mdev, "Failed setting affinity hint\n"); - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_hash_add(&cq->napi); } @@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, napi_enable(&cq->napi); return 0; + +free_eq: + if (assigned_eq) + mlx4_release_eq(mdev->dev, cq->vector); + cq->vector = mdev->dev->caps.num_comp_vectors; + return err; } void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) @@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - if (priv->mdev->dev->caps.comp_pool && cq->vector) { + if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && + cq->is_tx == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); - } cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; @@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (!cq->is_tx) { napi_hash_del(&cq->napi); synchronize_rcu(); - irq_set_affinity_hint(cq->mcq.irq, NULL); } netif_napi_del(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a2ddf3d75ff8..99ba1c50e585 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -119,6 +119,12 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", + /* pf statistics */ + "pf_rx_packets", + "pf_rx_bytes", + "pf_tx_packets", + "pf_tx_bytes", + /* priority flow control statistics rx */ "rx_pause_prio_0", "rx_pause_duration_prio_0", "rx_pause_transition_prio_0", @@ -368,6 +374,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&priv->port_stats)[i]; + for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = + ((unsigned long *)&priv->pf_stats)[i]; + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) @@ -448,6 +459,12 @@ static void mlx4_en_get_strings(struct net_device *dev, strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[strings]); + for (i = 0; i < NUM_PF_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + for (i = 0; i < NUM_FLOW_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index cf467a9f6cc7..77179d7ae4cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1597,6 +1597,9 @@ int mlx4_en_start_port(struct net_device *dev) } mdev->mac_removed[priv->port] = 0; + priv->counter_index = + mlx4_get_default_counter_index(mdev->dev, priv->port); + err = mlx4_en_config_rss_steer(priv); if (err) { en_err(priv, "Failed configuring rss steering\n"); @@ -1755,6 +1758,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Set port as not active */ priv->port_up = false; + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); /* Promsicuous mode */ if (mdev->dev->caps.steering_mode == @@ -1891,6 +1895,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) sizeof(priv->rx_priority_flowstats)); memset(&priv->tx_priority_flowstats, 0, sizeof(priv->tx_priority_flowstats)); + memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); for (i = 0; i < priv->tx_ring_num; i++) { priv->tx_ring[i]->bytes = 0; @@ -1954,7 +1959,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) int i; #ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); priv->dev->rx_cpu_rmap = NULL; #endif @@ -2012,11 +2016,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - if (priv->mdev->dev->caps.comp_pool) { - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; - } + priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); #endif return 0; @@ -2292,6 +2292,15 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); } +static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, + struct ifla_vf_stats *vf_stats) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); +} + #define PORT_ID_BYTE_LEN 8 static int mlx4_en_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid) @@ -2489,6 +2498,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_rate = mlx4_en_set_vf_rate, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, + .ndo_get_vf_stats = mlx4_en_get_vf_stats, .ndo_get_vf_config = mlx4_en_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, @@ -2686,7 +2696,7 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, u8 rx_ppp, u8 rx_pause, u8 tx_ppp, u8 tx_pause) { - int last_i = NUM_MAIN_STATS + NUM_PORT_STATS; + int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; if (!mlx4_is_slave(dev) && (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { @@ -2748,6 +2758,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); last_i += NUM_PORT_STATS; + if (mlx4_is_master(dev)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_PF_STATS); + last_i += NUM_PF_STATS; + mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, rx_ppp, rx_pause, tx_ppp, tx_pause); @@ -2783,6 +2798,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv = netdev_priv(dev); memset(priv, 0, sizeof(struct mlx4_en_priv)); + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0a56f010c846..ee99e67187f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -149,6 +149,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) { + struct mlx4_counter tmp_counter_stats; struct mlx4_en_stat_out_mbox *mlx4_en_stats; struct mlx4_en_stat_out_flow_control_mbox *flowstats; struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); @@ -156,7 +157,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) struct mlx4_cmd_mailbox *mailbox; u64 in_mod = reset << 8 | port; int err; - int i; + int i, counter_index; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -202,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.tso_packets += ring->tso_packets; priv->port_stats.xmit_more += ring->xmit_more; } + if (mlx4_is_master(mdev->dev)) { + stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0, + &mlx4_en_stats->RTOT_prio_1, + NUM_PRIORITIES); + stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0, + &mlx4_en_stats->TTOT_prio_1, + NUM_PRIORITIES); + stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0, + &mlx4_en_stats->ROCT_prio_1, + NUM_PRIORITIES); + stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0, + &mlx4_en_stats->TOCT_prio_1, + NUM_PRIORITIES); + } /* net device stats */ stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + @@ -296,6 +311,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) spin_unlock_bh(&priv->stats_lock); + memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); + counter_index = mlx4_get_default_counter_index(mdev->dev, port); + err = mlx4_get_counter_stats(mdev->dev, counter_index, + &tmp_counter_stats, reset); + /* 0xffs indicates invalid value */ memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); @@ -314,6 +334,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) spin_lock_bh(&priv->stats_lock); + if (tmp_counter_stats.counter_mode == 0) { + priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); + priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); + priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames); + priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames); + } + for (i = 0; i < MLX4_NUM_PRIORITIES; i++) { priv->rx_priority_flowstats[i].rx_pause = be64_to_cpu(flowstats[i].rx_pause); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 34f2fdf4fe5d..e482fa1bb741 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -66,7 +66,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->pri_path.sched_queue |= user_prio << 3; context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; } - context->pri_path.counter_index = 0xff; + context->pri_path.counter_index = priv->counter_index; context->cqn_send = cpu_to_be32(cqn); context->cqn_recv = cpu_to_be32(cqn); context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 2a77a6b19121..35f726c17e48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) struct mlx4_dev *dev = mdev->dev; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (!dev->caps.comp_pool) - num_of_eqs = max_t(int, MIN_RX_RINGS, - min_t(int, - dev->caps.num_comp_vectors, - DEF_RX_RINGS)); - else - num_of_eqs = min_t(int, MAX_MSIX_P_PORT, - dev->caps.comp_pool/ - dev->caps.num_ports) - 1; + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + mlx4_get_eqs_per_port(mdev->dev, i), + DEF_RX_RINGS)); num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : min_t(int, num_of_eqs, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2619c9fbf42d..aae13adfb492 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work) struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; struct mlx4_eqe *eqe; u8 slave; - int i; + int i, phys_port, slave_port; for (eqe = next_slave_event_eqe(slave_eq); eqe; eqe = next_slave_event_eqe(slave_eq)) { @@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work) /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i <= dev->persist->num_vfs; i++) { + phys_port = 0; + if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT && + eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) { + phys_port = eqe->event.port_mgmt_change.port; + slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); + if (slave_port < 0) /* VF doesn't have this port */ + continue; + eqe->event.port_mgmt_change.port = slave_port; + } if (mlx4_GEN_EQE(dev, i, eqe)) mlx4_warn(dev, "Failed to generate event for slave %d\n", i); + if (phys_port) + eqe->event.port_mgmt_change.port = phys_port; } } else { if (mlx4_GEN_EQE(dev, slave, eqe)) @@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, slave_event(dev, slave, eqe); } +#if defined(CONFIG_SMP) +static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) +{ + int hint_err; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_eq *eq = &priv->eq_table.eq[vec]; + + if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) + return; + + hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); + if (hint_err) + mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); +} +#endif + int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_eqe eqe; @@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; - eqe.event.port_mgmt_change.port = port; + eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); return mlx4_GEN_EQE(dev, slave, &eqe); } @@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; - eqe.event.port_mgmt_change.port = port; + eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); return mlx4_GEN_EQE(dev, slave, &eqe); } @@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change) { struct mlx4_eqe eqe; + u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); /*don't send if we don't have the that slave */ if (dev->persist->num_vfs < slave) @@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; eqe.subtype = port_subtype_change; - eqe.event.port_change.port = cpu_to_be32(port << 28); + eqe.event.port_change.port = cpu_to_be32(slave_port << 28); mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, port_subtype_change, slave, port); @@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { if (i == mlx4_master_func_num(dev)) continue; + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); mlx4_slave_event(dev, i, eqe); } } @@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) * we need to map, take the difference of highest index and * the lowest index we'll use and add 1. */ - return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + - dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - + dev->caps.reserved_eqs / 4 + 1; } static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) @@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev, static void mlx4_free_irqs(struct mlx4_dev *dev) { struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; - struct mlx4_priv *priv = mlx4_priv(dev); - int i, vec; + int i; if (eq_table->have_irq) free_irq(dev->persist->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { + free_cpumask_var(eq_table->eq[i].affinity_mask); +#if defined(CONFIG_SMP) + irq_set_affinity_hint(eq_table->eq[i].irq, NULL); +#endif free_irq(eq_table->eq[i].irq, eq_table->eq + i); eq_table->eq[i].have_irq = 0; } - for (i = 0; i < dev->caps.comp_pool; i++) { - /* - * Freeing the assigned irq's - * all bits should be 0, but we need to validate - */ - if (priv->msix_ctl.pool_bm & 1ULL << i) { - /* NO need protecting*/ - vec = dev->caps.num_comp_vectors + 1 + i; - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - } - } - - kfree(eq_table->irq_names); } @@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) } priv->eq_table.irq_names = - kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + - dev->caps.comp_pool), + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), GFP_KERNEL); if (!priv->eq_table.irq_names) { err = -ENOMEM; - goto err_out_bitmap; + goto err_out_clr_int; } - for (i = 0; i < dev->caps.num_comp_vectors; ++i) { - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; - } - } - - err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, - &priv->eq_table.eq[dev->caps.num_comp_vectors]); - if (err) - goto err_out_comp; - - /*if additional completion vectors poolsize is 0 this loop will not run*/ - for (i = dev->caps.num_comp_vectors + 1; - i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i == MLX4_EQ_ASYNC) { + err = mlx4_create_eq(dev, + MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); + } else { + struct mlx4_eq *eq = &priv->eq_table.eq[i]; +#ifdef CONFIG_RFS_ACCEL + int port = find_first_bit(eq->actv_ports.ports, + dev->caps.num_ports) + 1; + + if (port <= dev->caps.num_ports) { + struct mlx4_port_info *info = + &mlx4_priv(dev)->port[port]; + + if (!info->rmap) { + info->rmap = alloc_irq_cpu_rmap( + mlx4_get_eqs_per_port(dev, port)); + if (!info->rmap) { + mlx4_warn(dev, "Failed to allocate cpu rmap\n"); + err = -ENOMEM; + goto err_out_unmap; + } + } - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; + err = irq_cpu_rmap_add( + info->rmap, eq->irq); + if (err) + mlx4_warn(dev, "Failed adding irq rmap\n"); + } +#endif + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? + i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, + eq); } + if (err) + goto err_out_unmap; } - if (dev->flags & MLX4_FLAG_MSI_X) { const char *eq_name; - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { - if (i < dev->caps.num_comp_vectors) { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-comp-%d@pci:%s", i, - pci_name(dev->persist->pdev)); - } else { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-async@pci:%s", - pci_name(dev->persist->pdev)); - } + snprintf(priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-async@pci:%s", + pci_name(dev->persist->pdev)); + eq_name = priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; - eq_name = priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE; - err = request_irq(priv->eq_table.eq[i].irq, - mlx4_msi_x_interrupt, 0, eq_name, - priv->eq_table.eq + i); - if (err) - goto err_out_async; + err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + MLX4_EQ_ASYNC); + if (err) + goto err_out_unmap; - priv->eq_table.eq[i].have_irq = 1; - } + priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; } else { snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, @@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) - goto err_out_async; + goto err_out_unmap; priv->eq_table.have_irq = 1; } err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) - eq_set_ci(&priv->eq_table.eq[i], 1); + /* arm ASYNC eq */ + eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); return 0; -err_out_async: - mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); - -err_out_comp: - i = dev->caps.num_comp_vectors - 1; - err_out_unmap: - while (i >= 0) { - mlx4_free_eq(dev, &priv->eq_table.eq[i]); - --i; + while (i >= 0) + mlx4_free_eq(dev, &priv->eq_table.eq[i--]); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } } +#endif + mlx4_free_irqs(dev); + +err_out_clr_int: if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); - mlx4_free_irqs(dev); err_out_bitmap: mlx4_unmap_uar(dev); @@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) int i; mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } + } +#endif mlx4_free_irqs(dev); - for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); if (!mlx4_is_slave(dev)) @@ -1355,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) /* Return to default */ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); return err; } EXPORT_SYMBOL(mlx4_test_interrupts); -int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, - int *vector) +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) { + struct mlx4_priv *priv = mlx4_priv(dev); + + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || + (vector == MLX4_EQ_ASYNC)) + return false; + + return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); +} +EXPORT_SYMBOL(mlx4_is_eq_vector_valid); +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) +{ struct mlx4_priv *priv = mlx4_priv(dev); - int vec = 0, err = 0, i; + unsigned int i; + unsigned int sum = 0; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) + sum += !!test_bit(port - 1, + priv->eq_table.eq[i].actv_ports.ports); + + return sum; +} +EXPORT_SYMBOL(mlx4_get_eqs_per_port); + +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) + return -EINVAL; + + return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, + dev->caps.num_ports) > 1); +} +EXPORT_SYMBOL(mlx4_is_eq_shared); + +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) +{ + return mlx4_priv(dev)->port[port].rmap; +} +EXPORT_SYMBOL(mlx4_get_cpu_rmap); + +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err = 0, i = 0; + u32 min_ref_count_val = (u32)-1; + int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); + int *prequested_vector = NULL; + mutex_lock(&priv->msix_ctl.pool_lock); - for (i = 0; !vec && i < dev->caps.comp_pool; i++) { - if (~priv->msix_ctl.pool_bm & 1ULL << i) { - priv->msix_ctl.pool_bm |= 1ULL << i; - vec = dev->caps.num_comp_vectors + 1 + i; - snprintf(priv->eq_table.irq_names + - vec * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, "%s", name); -#ifdef CONFIG_RFS_ACCEL - if (rmap) { - err = irq_cpu_rmap_add(rmap, - priv->eq_table.eq[vec].irq); - if (err) - mlx4_warn(dev, "Failed adding irq rmap\n"); + if (requested_vector < (dev->caps.num_comp_vectors + 1) && + (requested_vector >= 0) && + (requested_vector != MLX4_EQ_ASYNC)) { + if (test_bit(port - 1, + priv->eq_table.eq[requested_vector].actv_ports.ports)) { + prequested_vector = &requested_vector; + } else { + struct mlx4_eq *eq; + + for (i = 1; i < port; + requested_vector += mlx4_get_eqs_per_port(dev, i++)) + ; + + eq = &priv->eq_table.eq[requested_vector]; + if (requested_vector < dev->caps.num_comp_vectors + 1 && + test_bit(port - 1, eq->actv_ports.ports)) { + prequested_vector = &requested_vector; } -#endif - err = request_irq(priv->eq_table.eq[vec].irq, - mlx4_msi_x_interrupt, 0, - &priv->eq_table.irq_names[vec<<5], - priv->eq_table.eq + vec); - if (err) { - /*zero out bit by fliping it*/ - priv->msix_ctl.pool_bm ^= 1 << i; - vec = 0; - continue; - /*we dont want to break here*/ + } + } + + if (!prequested_vector) { + requested_vector = -1; + for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; + i++) { + struct mlx4_eq *eq = &priv->eq_table.eq[i]; + + if (min_ref_count_val > eq->ref_count && + test_bit(port - 1, eq->actv_ports.ports)) { + min_ref_count_val = eq->ref_count; + requested_vector = i; } + } + + if (requested_vector < 0) { + err = -ENOSPC; + goto err_unlock; + } + + prequested_vector = &requested_vector; + } + + if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && + dev->flags & MLX4_FLAG_MSI_X) { + set_bit(*prequested_vector, priv->msix_ctl.pool_bm); + snprintf(priv->eq_table.irq_names + + *prequested_vector * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "mlx4-%d@%s", + *prequested_vector, dev_name(&dev->persist->pdev->dev)); - eq_set_ci(&priv->eq_table.eq[vec], 1); + err = request_irq(priv->eq_table.eq[*prequested_vector].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[*prequested_vector << 5], + priv->eq_table.eq + *prequested_vector); + + if (err) { + clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); + *prequested_vector = -1; + } else { +#if defined(CONFIG_SMP) + mlx4_set_eq_affinity_hint(priv, *prequested_vector); +#endif + eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); + priv->eq_table.eq[*prequested_vector].have_irq = 1; } } + + if (!err && *prequested_vector >= 0) + priv->eq_table.eq[*prequested_vector].ref_count++; + +err_unlock: mutex_unlock(&priv->msix_ctl.pool_lock); - if (vec) { - *vector = vec; - } else { + if (!err && *prequested_vector >= 0) + *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); + else *vector = 0; - err = (i == dev->caps.comp_pool) ? -ENOSPC : err; - } + return err; } EXPORT_SYMBOL(mlx4_assign_eq); -int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) { struct mlx4_priv *priv = mlx4_priv(dev); - return priv->eq_table.eq[vec].irq; + return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; } EXPORT_SYMBOL(mlx4_eq_get_irq); void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); - /*bm index*/ - int i = vec - dev->caps.num_comp_vectors - 1; - - if (likely(i >= 0)) { - /*sanity check , making sure were not trying to free irq's - Belonging to a legacy EQ*/ - mutex_lock(&priv->msix_ctl.pool_lock); - if (priv->msix_ctl.pool_bm & 1ULL << i) { - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - priv->msix_ctl.pool_bm &= ~(1ULL << i); - } - mutex_unlock(&priv->msix_ctl.pool_lock); - } + int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); + mutex_lock(&priv->msix_ctl.pool_lock); + priv->eq_table.eq[eq_vec].ref_count--; + + /* once we allocated EQ, we don't release it because it might be binded + * to cpu_rmap. + */ + mutex_unlock(&priv->msix_ctl.pool_lock); } EXPORT_SYMBOL(mlx4_release_eq); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 70de39c6a397..12fbfcb44d8a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -479,7 +479,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } } - dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); + if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && + (port_type_array[0] == MLX4_PORT_TYPE_IB) && + (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { + mlx4_warn(dev, + "Granular QoS per VF not supported with IB/Eth configuration\n"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; + } + + dev->caps.max_counters = dev_cap->max_counters; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = @@ -2212,20 +2220,73 @@ err_free_icm: static int mlx4_init_counters_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - int nent; + int nent_pow2; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return -ENOENT; - nent = dev->caps.max_counters; - return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); + if (!dev->caps.max_counters) + return -ENOSPC; + + nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); + /* reserve last counter index for sink counter */ + return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, + nent_pow2 - 1, 0, + nent_pow2 - dev->caps.max_counters + 1); } static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) { + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return; + + if (!dev->caps.max_counters) + return; + mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); } +static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port; + + for (port = 0; port < dev->caps.num_ports; port++) + if (priv->def_counter[port] != -1) + mlx4_counter_free(dev, priv->def_counter[port]); +} + +static int mlx4_allocate_default_counters(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port, err = 0; + u32 idx; + + for (port = 0; port < dev->caps.num_ports; port++) + priv->def_counter[port] = -1; + + for (port = 0; port < dev->caps.num_ports; port++) { + err = mlx4_counter_alloc(dev, &idx); + + if (!err || err == -ENOSPC) { + priv->def_counter[port] = idx; + } else if (err == -ENOENT) { + err = 0; + continue; + } else { + mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", + __func__, port + 1, err); + mlx4_cleanup_default_counters(dev); + return err; + } + + mlx4_dbg(dev, "%s: default counter index %d for port %d\n", + __func__, priv->def_counter[port], port + 1); + } + + return err; +} + int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2234,8 +2295,10 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) return -ENOENT; *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); - if (*idx == -1) - return -ENOMEM; + if (*idx == -1) { + *idx = MLX4_SINK_COUNTER_INDEX(dev); + return -ENOSPC; + } return 0; } @@ -2258,8 +2321,35 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) } EXPORT_SYMBOL_GPL(mlx4_counter_alloc); +static int __mlx4_clear_if_stat(struct mlx4_dev *dev, + u8 counter_index) +{ + struct mlx4_cmd_mailbox *if_stat_mailbox; + int err; + u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; + + if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(if_stat_mailbox)) + return PTR_ERR(if_stat_mailbox); + + err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, + MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, if_stat_mailbox); + return err; +} + void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return; + + if (idx == MLX4_SINK_COUNTER_INDEX(dev)) + return; + + __mlx4_clear_if_stat(dev, idx); + mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); return; } @@ -2279,6 +2369,14 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) } EXPORT_SYMBOL_GPL(mlx4_counter_free); +int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->def_counter[port - 1]; +} +EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); + void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2383,11 +2481,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_warn(dev, "Trying again without MSI-X\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@ -2414,10 +2512,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_srq_table_free; } - err = mlx4_init_counters_table(dev); - if (err && err != -ENOENT) { - mlx4_err(dev, "Failed to initialize counters table, aborting\n"); - goto err_qp_table_free; + if (!mlx4_is_slave(dev)) { + err = mlx4_init_counters_table(dev); + if (err && err != -ENOENT) { + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); + goto err_qp_table_free; + } + } + + err = mlx4_allocate_default_counters(dev); + if (err) { + mlx4_err(dev, "Failed to allocate default counters, aborting\n"); + goto err_counters_table_free; } if (!mlx4_is_slave(dev)) { @@ -2451,15 +2557,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", port); - goto err_counters_table_free; + goto err_default_countes_free; } } } return 0; +err_default_countes_free: + mlx4_cleanup_default_counters(dev); + err_counters_table_free: - mlx4_cleanup_counters_table(dev); + if (!mlx4_is_slave(dev)) + mlx4_cleanup_counters_table(dev); err_qp_table_free: mlx4_cleanup_qp_table(dev); @@ -2500,14 +2610,45 @@ err_uar_table_free: return err; } +static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) +{ + int requested_cpu = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_eq *eq; + int off = 0; + int i; + + if (eqn > dev->caps.num_comp_vectors) + return -EINVAL; + + for (i = 1; i < port; i++) + off += mlx4_get_eqs_per_port(dev, i); + + requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); + + /* Meaning EQs are shared, and this call comes from the second port */ + if (requested_cpu < 0) + return 0; + + eq = &priv->eq_table.eq[eqn]; + + if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(requested_cpu, eq->affinity_mask); + + return 0; +} + static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int i; + int port = 0; if (msi_x) { - int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + int nreq = dev->caps.num_ports * num_online_cpus() + 1; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); @@ -2522,20 +2663,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq); - if (nreq < 0) { + if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { kfree(entries); goto no_msi; - } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { - /*Working in legacy mode , all EQ's shared*/ - dev->caps.comp_pool = 0; - dev->caps.num_comp_vectors = nreq - 1; - } else { - dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; - dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; } - for (i = 0; i < nreq; ++i) - priv->eq_table.eq[i].irq = entries[i].vector; + /* 1 is reserved for events (asyncrounous EQ) */ + dev->caps.num_comp_vectors = nreq - 1; + + priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; + bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, + dev->caps.num_ports); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { + if (i == MLX4_EQ_ASYNC) + continue; + + priv->eq_table.eq[i].irq = + entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; + + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + /* We don't set affinity hint when there + * aren't enough EQs + */ + } else { + set_bit(port, + priv->eq_table.eq[i].actv_ports.ports); + if (mlx4_init_affinity_hint(dev, port + 1, i)) + mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", + i); + } + /* We divide the Eqs evenly between the two ports. + * (dev->caps.num_comp_vectors / dev->caps.num_ports) + * refers to the number of Eqs per port + * (i.e eqs_per_port). Theoretically, we would like to + * write something like (i + 1) % eqs_per_port == 0. + * However, since there's an asynchronous Eq, we have + * to skip over it by comparing this condition to + * !!((i + 1) > MLX4_EQ_ASYNC). + */ + if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && + ((i + 1) % + (dev->caps.num_comp_vectors / dev->caps.num_ports)) == + !!((i + 1) > MLX4_EQ_ASYNC)) + /* If dev->caps.num_comp_vectors < dev->caps.num_ports, + * everything is shared anyway. + */ + port++; + } dev->flags |= MLX4_FLAG_MSI_X; @@ -2545,10 +2721,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) no_msi: dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; - for (i = 0; i < 2; ++i) + BUG_ON(MLX4_EQ_ASYNC >= 2); + for (i = 0; i < 2; ++i) { priv->eq_table.eq[i].irq = dev->persist->pdev->irq; + if (i != MLX4_EQ_ASYNC) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + } + } } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) @@ -2613,6 +2794,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(info->rmap); + info->rmap = NULL; +#endif } static int mlx4_init_steering(struct mlx4_dev *dev) @@ -2768,6 +2953,7 @@ disable_sriov: free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); + dev->dev_vfs = NULL; return dev_flags & ~MLX4_FLAG_MASTER; } @@ -2919,6 +3105,7 @@ slave_start: existing_vfs, reset_flow); + mlx4_close_fw(dev); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); dev->flags = dev_flags; if (!SRIOV_VALID_STATE(dev->flags)) { @@ -3007,18 +3194,6 @@ slave_start: /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { - int ib_ports = 0; - - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - ib_ports++; - - if (ib_ports && - (num_vfs_argc > 1 || probe_vfs_argc > 1)) { - mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); - err = -EINVAL; - goto err_close; - } if (dev->caps.num_ports < 2 && num_vfs_argc > 1) { err = -EINVAL; @@ -3055,7 +3230,7 @@ slave_start: if (err) goto err_master_mfunc; - priv->msix_ctl.pool_bm = 0; + bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); mutex_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); @@ -3077,7 +3252,6 @@ slave_start: !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } @@ -3128,7 +3302,9 @@ err_port: for (--port; port >= 1; --port) mlx4_cleanup_port_info(&priv->port[port]); - mlx4_cleanup_counters_table(dev); + mlx4_cleanup_default_counters(dev); + if (!mlx4_is_slave(dev)) + mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); @@ -3426,7 +3602,9 @@ static void mlx4_unload_one(struct pci_dev *pdev) mlx4_free_resource_tracker(dev, RES_TR_FREE_SLAVES_ONLY); - mlx4_cleanup_counters_table(dev); + mlx4_cleanup_default_counters(dev); + if (!mlx4_is_slave(dev)) + mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 502d3dd2c888..a092c5c34d43 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -65,6 +65,8 @@ #define INIT_HCA_TPT_MW_ENABLE (1 << 7) +#define MLX4_QUERY_IF_STAT_RESET BIT(31) + enum { MLX4_HCR_BASE = 0x80680, MLX4_HCR_SIZE = 0x0001c, @@ -287,6 +289,12 @@ struct mlx4_icm_table { #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 +#define MLX4_EQ_ASYNC 0 +#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) +#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -391,6 +399,9 @@ struct mlx4_eq { struct mlx4_buf_list *page_list; struct mlx4_mtt mtt; struct mlx4_eq_tasklet tasklet_ctx; + struct mlx4_active_ports actv_ports; + u32 ref_count; + cpumask_var_t affinity_mask; }; struct mlx4_slave_eqe { @@ -808,6 +819,7 @@ struct mlx4_port_info { struct mlx4_vlan_table vlan_table; struct mlx4_roce_gid_table gid_table; int base_qpn; + struct cpu_rmap *rmap; }; struct mlx4_sense { @@ -818,7 +830,7 @@ struct mlx4_sense { }; struct mlx4_msix_ctl { - u64 pool_bm; + DECLARE_BITMAP(pool_bm, MAX_MSIX); struct mutex pool_lock; }; @@ -864,6 +876,7 @@ struct mlx4_priv { struct mlx4_qp_table qp_table; struct mlx4_mcg_table mcg_table; struct mlx4_bitmap counters_bitmap; + int def_counter[MLX4_MAX_PORTS]; struct mlx4_catas_err catas_err; @@ -997,6 +1010,8 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list); int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, + struct mlx4_counter *data); int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d021f079f181..d5f9adb6a784 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -338,7 +338,7 @@ struct mlx4_en_cq { struct napi_struct napi; int size; int buf_size; - unsigned vector; + int vector; enum cq_type is_tx; u16 moder_time; u16 moder_cnt; @@ -566,6 +566,7 @@ struct mlx4_en_priv { #endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; + struct mlx4_en_counter_stats pf_stats; struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES]; struct mlx4_en_flow_stats_rx rx_flowstats; @@ -582,6 +583,7 @@ struct mlx4_en_priv { int base_tx_qpn; struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; struct hwtstamp_config hwtstamp_config; + u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB struct ieee_ets ets; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 00555832a4ae..7fd466c0b929 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -23,6 +23,14 @@ struct mlx4_en_pkt_stats { #define NUM_PKT_STATS 43 }; +struct mlx4_en_counter_stats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +#define NUM_PF_STATS 4 +}; + struct mlx4_en_port_stats { unsigned long tso_packets; unsigned long xmit_more; @@ -99,7 +107,7 @@ enum { }; #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ - NUM_FLOW_STATS + NUM_PERF_STATS) + NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS) #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ sizeof(((struct net_device_stats *)0)->n)) diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 2bf437aafc53..bae8b22edbb7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -82,7 +82,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 total_size = 0; struct mlx4_resource *profile; - struct mlx4_resource tmp; struct sysinfo si; int i, j; @@ -149,11 +148,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, */ for (i = MLX4_RES_NUM; i > 0; --i) for (j = 1; j < i; ++j) { - if (profile[j].size > profile[j - 1].size) { - tmp = profile[j]; - profile[j] = profile[j - 1]; - profile[j - 1] = tmp; - } + if (profile[j].size > profile[j - 1].size) + swap(profile[j], profile[j - 1]); } for (i = 0; i < MLX4_RES_NUM; ++i) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index b75214a80d0e..20268634a9ab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -749,7 +749,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) { int sort[MLX4_NUM_QP_REGION]; - int i, j, tmp; + int i, j; int last_base = dev->caps.num_qps; for (i = 1; i < MLX4_NUM_QP_REGION; ++i) @@ -758,11 +758,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { if (dev->caps.reserved_qps_cnt[sort[j]] > - dev->caps.reserved_qps_cnt[sort[j - 1]]) { - tmp = sort[j]; - sort[j] = sort[j - 1]; - sort[j - 1] = tmp; - } + dev->caps.reserved_qps_cnt[sort[j - 1]]) + swap(sort[j], sort[j - 1]); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index bafe2180cf0c..731423ca575d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -46,8 +46,11 @@ #include "mlx4.h" #include "fw.h" +#include "mlx4_stats.h" #define MLX4_MAC_VALID (1ull << 63) +#define MLX4_PF_COUNTERS_PER_PORT 2 +#define MLX4_VF_COUNTERS_PER_PORT 1 struct mac_res { struct list_head list; @@ -459,11 +462,21 @@ void mlx4_init_quotas(struct mlx4_dev *dev) dev->quotas.mpt = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; } + +static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) +{ + /* reduce the sink counter */ + return (dev->caps.max_counters - 1 - + (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) + / MLX4_MAX_PORTS; +} + int mlx4_init_resource_tracker(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i, j; int t; + int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); priv->mfunc.master.res_tracker.slave_list = kzalloc(dev->num_slaves * sizeof(struct slave_list), @@ -499,6 +512,9 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) res_alloc->allocated = kzalloc((dev->persist-> num_vfs + 1) * sizeof(int), GFP_KERNEL); + /* Reduce the sink counter */ + if (i == RES_COUNTER) + res_alloc->res_free = dev->caps.max_counters - 1; if (!res_alloc->quota || !res_alloc->guaranteed || !res_alloc->allocated) @@ -577,9 +593,17 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_COUNTER: res_alloc->quota[t] = dev->caps.max_counters; - res_alloc->guaranteed[t] = 0; if (t == mlx4_master_func_num(dev)) - res_alloc->res_free = res_alloc->quota[t]; + res_alloc->guaranteed[t] = + MLX4_PF_COUNTERS_PER_PORT * + MLX4_MAX_PORTS; + else if (t <= max_vfs_guarantee_counter) + res_alloc->guaranteed[t] = + MLX4_VF_COUNTERS_PER_PORT * + MLX4_MAX_PORTS; + else + res_alloc->guaranteed[t] = 0; + res_alloc->res_free -= res_alloc->guaranteed[t]; break; default: break; @@ -700,6 +724,9 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, } } +static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, + u8 slave, int port); + static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, u8 slave, u32 qpn) @@ -715,6 +742,10 @@ static int update_vport_qp_param(struct mlx4_dev *dev, vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + err = handle_counter(dev, qpc, slave, port); + if (err) + goto out; + if (MLX4_VGT != vp_oper->state.default_vlan) { /* the reserved QPs (special, proxy, tunnel) * do not operate over vlans @@ -859,6 +890,83 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, spin_unlock_irq(mlx4_tlock(dev)); } +static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int port); + +static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, + int counter_index) +{ + struct res_common *r; + struct res_counter *counter; + int ret = 0; + + if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) + return ret; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, counter_index, RES_COUNTER); + if (!r || r->owner != slave) + ret = -EINVAL; + counter = container_of(r, struct res_counter, com); + if (!counter->port) + counter->port = port; + + spin_unlock_irq(mlx4_tlock(dev)); + return ret; +} + +static int handle_unexisting_counter(struct mlx4_dev *dev, + struct mlx4_qp_context *qpc, u8 slave, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *tmp; + struct res_counter *counter; + u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry(tmp, + &tracker->slave_list[slave].res_list[RES_COUNTER], + list) { + counter = container_of(tmp, struct res_counter, com); + if (port == counter->port) { + qpc->pri_path.counter_index = counter->com.res_id; + spin_unlock_irq(mlx4_tlock(dev)); + return 0; + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + /* No existing counter, need to allocate a new counter */ + err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx, + port); + if (err == -ENOENT) { + err = 0; + } else if (err && err != -ENOSPC) { + mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n", + __func__, slave, err); + } else { + qpc->pri_path.counter_index = counter_idx; + mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n", + __func__, slave, qpc->pri_path.counter_index); + err = 0; + } + + return err; +} + +static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, + u8 slave, int port) +{ + if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) + return handle_existing_counter(dev, slave, port, + qpc->pri_path.counter_index); + + return handle_unexisting_counter(dev, qpc, slave, port); +} + static struct res_common *alloc_qp_tr(int id) { struct res_qp *ret; @@ -952,7 +1060,7 @@ static struct res_common *alloc_srq_tr(int id) return &ret->com; } -static struct res_common *alloc_counter_tr(int id) +static struct res_common *alloc_counter_tr(int id, int port) { struct res_counter *ret; @@ -962,6 +1070,7 @@ static struct res_common *alloc_counter_tr(int id) ret->com.res_id = id; ret->com.state = RES_COUNTER_ALLOCATED; + ret->port = port; return &ret->com; } @@ -1022,7 +1131,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, pr_err("implementation missing\n"); return NULL; case RES_COUNTER: - ret = alloc_counter_tr(id); + ret = alloc_counter_tr(id, extra); break; case RES_XRCD: ret = alloc_xrcdn_tr(id); @@ -1039,6 +1148,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, return ret; } +int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, + struct mlx4_counter *data) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *tmp; + struct res_counter *counter; + int *counters_arr; + int i = 0, err = 0; + + memset(data, 0, sizeof(*data)); + + counters_arr = kmalloc_array(dev->caps.max_counters, + sizeof(*counters_arr), GFP_KERNEL); + if (!counters_arr) + return -ENOMEM; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry(tmp, + &tracker->slave_list[slave].res_list[RES_COUNTER], + list) { + counter = container_of(tmp, struct res_counter, com); + if (counter->port == port) { + counters_arr[i] = (int)tmp->res_id; + i++; + } + } + spin_unlock_irq(mlx4_tlock(dev)); + counters_arr[i] = -1; + + i = 0; + + while (counters_arr[i] != -1) { + err = mlx4_get_counter_stats(dev, counters_arr[i], data, + 0); + if (err) { + memset(data, 0, sizeof(*data)); + goto table_changed; + } + i++; + } + +table_changed: + kfree(counters_arr); + return 0; +} + static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, enum mlx4_resource type, int extra) { @@ -2001,7 +2157,7 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, } static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) + u64 in_param, u64 *out_param, int port) { u32 index; int err; @@ -2019,7 +2175,7 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; } - err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); + err = add_res_range(dev, slave, index, 1, RES_COUNTER, port); if (err) { __mlx4_counter_free(dev, index); mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); @@ -2101,7 +2257,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, case RES_COUNTER: err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); + vhcr->in_param, &vhcr->out_param, 0); break; case RES_XRCD: @@ -2335,6 +2491,9 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, return -EINVAL; index = get_param_l(&in_param); + if (index == MLX4_SINK_COUNTER_INDEX(dev)) + return 0; + err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); if (err) return err; @@ -2703,6 +2862,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, context->qkey = cpu_to_be32(qkey); } +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox); + int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -2725,6 +2888,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, struct res_srq *srq; int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) + return err; + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); if (err) return err; @@ -3526,8 +3693,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | ((port & 1) << 6); - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || - mlx4_is_eth(dev, port + 1)) { + if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) || + qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) { qpc->pri_path.sched_queue = pri_sched_queue; } @@ -3965,6 +4132,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, return 0; } +static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, + struct _rule_hw *eth_header) +{ + if (is_multicast_ether_addr(eth_header->eth.dst_mac) || + is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + struct mlx4_net_trans_rule_hw_eth *eth = + (struct mlx4_net_trans_rule_hw_eth *)eth_header; + struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); + bool last_rule = next_rule->size == 0 && next_rule->id == 0 && + next_rule->rsvd == 0; + + if (last_rule) + ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); + } +} + /* * In case of missing eth header, append eth header with a MAC address * assigned to the VF. @@ -4117,6 +4300,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, rule_header = (struct _rule_hw *)(ctrl + 1); header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) + handle_eth_header_mcast_prio(ctrl, rule_header); + + if (slave == dev->caps.function) + goto execute; + switch (header_id) { case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { @@ -4143,6 +4332,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, goto err_put; } +execute: err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, vhcr->in_modifier, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 8ff57e8e3e91..158c88c69ef9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -3,6 +3,18 @@ # config MLX5_CORE - tristate + tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" depends on PCI default n + ---help--- + Core driver for low level functionality of the ConnectX-4 and + Connect-IB cards by Mellanox Technologies. + +config MLX5_CORE_EN + bool "Mellanox Technologies ConnectX-4 Ethernet support" + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + default n + ---help--- + Ethernet support in Mellanox Technologies ConnectX-4 NIC. + Ethernet and Infiniband support in ConnectX-4 are currently mutually + exclusive. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 105780bb980b..26a68b8af2c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o + mad.o transobj.o vport.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \ + en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ + en_txrx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index ac0f7bf4be95..0715b497511f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -42,95 +42,36 @@ #include "mlx5_core.h" /* Handling for queue buffers -- we allocate a bunch of memory and - * register it in a memory region at HCA virtual address 0. If the - * requested size is > max_direct, we split the allocation into - * multiple pages, so we don't require too much contiguous memory. + * register it in a memory region at HCA virtual address 0. */ -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf) +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) { dma_addr_t t; buf->size = size; - if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); - if (!buf->direct.buf) - return -ENOMEM; - - buf->direct.map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - } else { - int i; - - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; - buf->page_shift = PAGE_SHIFT; - buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - GFP_KERNEL); - if (!buf->page_list) - return -ENOMEM; - - for (i = 0; i < buf->nbufs; i++) { - buf->page_list[i].buf = - dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!buf->page_list[i].buf) - goto err_free; - - buf->page_list[i].map = t; - } - - if (BITS_PER_LONG == 64) { - struct page **pages; - pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); - if (!pages) - goto err_free; - for (i = 0; i < buf->nbufs; i++) - pages[i] = virt_to_page(buf->page_list[i].buf); - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; - } - } + buf->npages = 1; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; - return 0; + buf->direct.map = t; -err_free: - mlx5_buf_free(dev, buf); + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } - return -ENOMEM; + return 0; } EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) { - int i; - - if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); - else { - if (BITS_PER_LONG == 64) - vunmap(buf->direct.buf); - - for (i = 0; i < buf->nbufs; i++) - if (buf->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - buf->page_list[i].buf, - buf->page_list[i].map); - kfree(buf->page_list); - } + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); } EXPORT_SYMBOL_GPL(mlx5_buf_free); @@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) int i; for (i = 0; i < buf->npages; i++) { - if (buf->nbufs == 1) - addr = buf->direct.map + (i << buf->page_shift); - else - addr = buf->page_list[i].map; + addr = buf->direct.map + (i << buf->page_shift); pas[i] = cpu_to_be64(addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e3273faf4568..75ff58dc1ff5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -75,25 +75,6 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; -enum { - MLX5_CMD_STAT_OK = 0x0, - MLX5_CMD_STAT_INT_ERR = 0x1, - MLX5_CMD_STAT_BAD_OP_ERR = 0x2, - MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, - MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, - MLX5_CMD_STAT_BAD_RES_ERR = 0x5, - MLX5_CMD_STAT_RES_BUSY = 0x6, - MLX5_CMD_STAT_LIM_ERR = 0x8, - MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, - MLX5_CMD_STAT_IX_ERR = 0xa, - MLX5_CMD_STAT_NO_RES_ERR = 0xf, - MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, - MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, - MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, - MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, - MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, -}; - static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, @@ -390,8 +371,17 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; - case MLX5_CMD_OP_RESIZE_SRQ: - return "RESIZE_SRQ"; + case MLX5_CMD_OP_CREATE_XRC_SRQ: + return "CREATE_XRC_SRQ"; + + case MLX5_CMD_OP_DESTROY_XRC_SRQ: + return "DESTROY_XRC_SRQ"; + + case MLX5_CMD_OP_QUERY_XRC_SRQ: + return "QUERY_XRC_SRQ"; + + case MLX5_CMD_OP_ARM_XRC_SRQ: + return "ARM_XRC_SRQ"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; @@ -408,8 +398,8 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; - case MLX5_CMD_OP_DETACH_FROM_MCG: - return "DETACH_FROM_MCG"; + case MLX5_CMD_OP_DETTACH_FROM_MCG: + return "DETTACH_FROM_MCG"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index eb0cf81f5f45..04ab7e445eae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, } EXPORT_SYMBOL(mlx5_core_modify_cq); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, + u16 cq_period, + u16 cq_max_count) +{ + struct mlx5_modify_cq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.cqn = cpu_to_be32(cq->cqn); + in.ctx.cq_period = cpu_to_be16(cq_period); + in.ctx.cq_max_count = cpu_to_be16(cq_max_count); + in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | + MLX5_CQ_MODIFY_COUNT); + + return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); +} + int mlx5_init_cq_table(struct mlx5_core_dev *dev) { struct mlx5_cq_table *table = &dev->priv.cq_table; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h new file mode 100644 index 000000000000..3d23bd657e3c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/if_vlan.h> +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/vport.h> +#include "wq.h" +#include "transobj.h" +#include "mlx5_core.h" + +#define MLX5E_MAX_NUM_TC 8 + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd + +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024) +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 +#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 + +#define MLX5E_TX_CQ_POLL_BUDGET 128 +#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ + +static const char vport_strings[][ETH_GSTRING_LEN] = { + /* vport statistics */ + "rx_packets", + "rx_bytes", + "tx_packets", + "tx_bytes", + "rx_error_packets", + "rx_error_bytes", + "tx_error_packets", + "tx_error_bytes", + "rx_unicast_packets", + "rx_unicast_bytes", + "tx_unicast_packets", + "tx_unicast_bytes", + "rx_multicast_packets", + "rx_multicast_bytes", + "tx_multicast_packets", + "tx_multicast_bytes", + "rx_broadcast_packets", + "rx_broadcast_bytes", + "tx_broadcast_packets", + "tx_broadcast_bytes", + + /* SW counters */ + "tso_packets", + "tso_bytes", + "lro_packets", + "lro_bytes", + "rx_csum_good", + "rx_csum_none", + "tx_csum_offload", + "tx_queue_stopped", + "tx_queue_wake", + "tx_queue_dropped", + "rx_wqe_err", +}; + +struct mlx5e_vport_stats { + /* HW counters */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_error_packets; + u64 rx_error_bytes; + u64 tx_error_packets; + u64 tx_error_bytes; + u64 rx_unicast_packets; + u64 rx_unicast_bytes; + u64 tx_unicast_packets; + u64 tx_unicast_bytes; + u64 rx_multicast_packets; + u64 rx_multicast_bytes; + u64 tx_multicast_packets; + u64 tx_multicast_bytes; + u64 rx_broadcast_packets; + u64 rx_broadcast_bytes; + u64 tx_broadcast_packets; + u64 tx_broadcast_bytes; + + /* SW counters */ + u64 tso_packets; + u64 tso_bytes; + u64 lro_packets; + u64 lro_bytes; + u64 rx_csum_good; + u64 rx_csum_none; + u64 tx_csum_offload; + u64 tx_queue_stopped; + u64 tx_queue_wake; + u64 tx_queue_dropped; + u64 rx_wqe_err; + +#define NUM_VPORT_COUNTERS 31 +}; + +static const char rq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "csum_none", + "lro_packets", + "lro_bytes", + "wqe_err" +}; + +struct mlx5e_rq_stats { + u64 packets; + u64 csum_none; + u64 lro_packets; + u64 lro_bytes; + u64 wqe_err; +#define NUM_RQ_STATS 5 +}; + +static const char sq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "tso_packets", + "tso_bytes", + "csum_offload_none", + "stopped", + "wake", + "dropped", + "nop" +}; + +struct mlx5e_sq_stats { + u64 packets; + u64 tso_packets; + u64 tso_bytes; + u64 csum_offload_none; + u64 stopped; + u64 wake; + u64 dropped; + u64 nop; +#define NUM_SQ_STATS 8 +}; + +struct mlx5e_stats { + struct mlx5e_vport_stats vport; +}; + +struct mlx5e_params { + u8 log_sq_size; + u8 log_rq_size; + u16 num_channels; + u8 default_vlan_prio; + u8 num_tc; + u16 rx_cq_moderation_usec; + u16 rx_cq_moderation_pkts; + u16 tx_cq_moderation_usec; + u16 tx_cq_moderation_pkts; + u16 min_rx_wqes; + u16 rx_hash_log_tbl_sz; + bool lro_en; + u32 lro_wqe_sz; +}; + +enum { + MLX5E_RQ_STATE_POST_WQES_ENABLE, +}; + +enum cq_flags { + MLX5E_CQ_HAS_CQES = 1, +}; + +struct mlx5e_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + unsigned long flags; + + /* data path - accessed per napi poll */ + struct napi_struct *napi; + struct mlx5_core_cq mcq; + struct mlx5e_channel *channel; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; +} ____cacheline_aligned_in_smp; + +struct mlx5e_rq { + /* data path */ + struct mlx5_wq_ll wq; + u32 wqe_sz; + struct sk_buff **skb; + + struct device *pdev; + struct net_device *netdev; + struct mlx5e_rq_stats stats; + struct mlx5e_cq cq; + + unsigned long state; + int ix; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; + u32 rqn; + struct mlx5e_channel *channel; +} ____cacheline_aligned_in_smp; + +struct mlx5e_tx_skb_cb { + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) + +struct mlx5e_sq_dma { + dma_addr_t addr; + u32 size; +}; + +enum { + MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, +}; + +struct mlx5e_sq { + /* data path */ + + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + u32 bf_offset; + struct mlx5e_sq_stats stats; + + struct mlx5e_cq cq; + + /* pointers to per packet info: write@xmit, read@completion */ + struct sk_buff **skb; + struct mlx5e_sq_dma *dma_fifo; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 dma_fifo_mask; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u32 bf_buf_size; + u16 max_inline; + u16 edge; + struct device *pdev; + __be32 mkey_be; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_uar uar; + struct mlx5e_channel *channel; + int tc; +} ____cacheline_aligned_in_smp; + +static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) +{ + return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || + (sq->cc == sq->pc)); +} + +enum channel_flags { + MLX5E_CHANNEL_NAPI_SCHED = 1, +}; + +struct mlx5e_channel { + /* data path */ + struct mlx5e_rq rq; + struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + u8 num_tc; + unsigned long flags; + int tc_to_txq_map[MLX5E_MAX_NUM_TC]; + + /* control */ + struct mlx5e_priv *priv; + int ix; + int cpu; +}; + +enum mlx5e_traffic_types { + MLX5E_TT_IPV4_TCP = 0, + MLX5E_TT_IPV6_TCP = 1, + MLX5E_TT_IPV4_UDP = 2, + MLX5E_TT_IPV6_UDP = 3, + MLX5E_TT_IPV4 = 4, + MLX5E_TT_IPV6 = 5, + MLX5E_TT_ANY = 6, + MLX5E_NUM_TT = 7, +}; + +enum { + MLX5E_RQT_SPREADING = 0, + MLX5E_RQT_DEFAULT_RQ = 1, + MLX5E_NUM_RQT = 2, +}; + +struct mlx5e_eth_addr_info { + u8 addr[ETH_ALEN + 2]; + u32 tt_vec; + u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ +}; + +#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) + +struct mlx5e_eth_addr_db { + struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct mlx5e_eth_addr_info broadcast; + struct mlx5e_eth_addr_info allmulti; + struct mlx5e_eth_addr_info promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; +}; + +enum { + MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_OPENED, +}; + +struct mlx5e_vlan_db { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 active_vlans_ft_ix[VLAN_N_VID]; + u32 untagged_rule_ft_ix; + u32 any_vlan_rule_ft_ix; + bool filter_disabled; +}; + +struct mlx5e_flow_table { + void *vlan; + void *main; +}; + +struct mlx5e_priv { + /* priv data path fields - start */ + int num_tc; + int default_vlan_prio; + struct mlx5e_sq **txq_to_sq_map; + /* priv data path fields - end */ + + unsigned long state; + struct mutex state_lock; /* Protects Interface state */ + struct mlx5_uar cq_uar; + u32 pdn; + u32 tdn; + struct mlx5_core_mr mr; + + struct mlx5e_channel **channel; + u32 tisn[MLX5E_MAX_NUM_TC]; + u32 rqtn; + u32 tirn[MLX5E_NUM_TT]; + + struct mlx5e_flow_table ft; + struct mlx5e_eth_addr_db eth_addr; + struct mlx5e_vlan_db vlan; + + struct mlx5e_params params; + spinlock_t async_events_spinlock; /* sync hw events */ + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct delayed_work update_stats_work; + + struct mlx5_core_dev *mdev; + struct net_device *netdev; + struct mlx5e_stats stats; +}; + +#define MLX5E_NET_IP_ALIGN 2 + +struct mlx5e_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_eth_seg eth; +}; + +struct mlx5e_rx_wqe { + struct mlx5_wqe_srq_next_seg next; + struct mlx5_wqe_data_seg data; +}; + +enum mlx5e_link_mode { + MLX5E_1000BASE_CX_SGMII = 0, + MLX5E_1000BASE_KX = 1, + MLX5E_10GBASE_CX4 = 2, + MLX5E_10GBASE_KX4 = 3, + MLX5E_10GBASE_KR = 4, + MLX5E_20GBASE_KR2 = 5, + MLX5E_40GBASE_CR4 = 6, + MLX5E_40GBASE_KR4 = 7, + MLX5E_56GBASE_R4 = 8, + MLX5E_10GBASE_CR = 12, + MLX5E_10GBASE_SR = 13, + MLX5E_10GBASE_ER = 14, + MLX5E_40GBASE_SR4 = 15, + MLX5E_40GBASE_LR4 = 16, + MLX5E_100GBASE_CR4 = 20, + MLX5E_100GBASE_SR4 = 21, + MLX5E_100GBASE_KR4 = 22, + MLX5E_100GBASE_LR4 = 23, + MLX5E_100BASE_TX = 24, + MLX5E_100BASE_T = 25, + MLX5E_10GBASE_T = 26, + MLX5E_25GBASE_CR = 27, + MLX5E_25GBASE_KR = 28, + MLX5E_25GBASE_SR = 29, + MLX5E_50GBASE_CR2 = 30, + MLX5E_50GBASE_KR2 = 31, + MLX5E_LINK_MODES_NUMBER, +}; + +#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); + +void mlx5e_completion_event(struct mlx5_core_cq *mcq); +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); +int mlx5e_napi_poll(struct napi_struct *napi, int budget); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); + +void mlx5e_update_stats(struct mlx5e_priv *priv); + +int mlx5e_open_flow_table(struct mlx5e_priv *priv); +void mlx5e_close_flow_table(struct mlx5e_priv *priv); +void mlx5e_init_eth_addr(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_work(struct work_struct *work); + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv); +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); + +int mlx5e_open_locked(struct net_device *netdev); +int mlx5e_close_locked(struct net_device *netdev); +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params); + +static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, + struct mlx5e_tx_wqe *wqe) +{ + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + *sq->wq.db = cpu_to_be32(sq->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)&wqe->ctrl, + sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, + NULL); + + sq->bf_offset ^= sq->bf_buf_size; +} + +static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) +{ + struct mlx5_core_cq *mcq; + + mcq = &cq->mcq; + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); +} + +extern const struct ethtool_ops mlx5e_ethtool_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c new file mode 100644 index 000000000000..388938482ff9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +static void mlx5e_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), + sizeof(drvinfo->bus_info)); +} + +static const struct { + u32 supported; + u32 advertised; + u32 speed; +} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_1000BASE_KX] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_10GBASE_CX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_20GBASE_KR2] = { + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + [MLX5E_40GBASE_CR4] = { + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_KR4] = { + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + [MLX5E_56GBASE_R4] = { + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + [MLX5E_10GBASE_CR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_SR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_ER] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_40GBASE_SR4] = { + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_LR4] = { + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + [MLX5E_100GBASE_CR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_SR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_KR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_LR4] = { + .speed = 100000, + }, + [MLX5E_100BASE_TX] = { + .speed = 100, + }, + [MLX5E_100BASE_T] = { + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + [MLX5E_10GBASE_T] = { + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 1000, + }, + [MLX5E_25GBASE_CR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_KR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_SR] = { + .speed = 25000, + }, + [MLX5E_50GBASE_CR2] = { + .speed = 50000, + }, + [MLX5E_50GBASE_KR2] = { + .speed = 50000, + }, +}; + +static int mlx5e_get_sset_count(struct net_device *dev, int sset) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return NUM_VPORT_COUNTERS + + priv->params.num_channels * NUM_RQ_STATS + + priv->params.num_channels * priv->num_tc * + NUM_SQ_STATS; + /* fallthrough */ + default: + return -EOPNOTSUPP; + } +} + +static void mlx5e_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i, j, tc, idx = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + break; + + case ETH_SS_TEST: + break; + + case ETH_SS_STATS: + /* VPORT counters */ + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_strings[i]); + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + "rx%d_%s", i, rq_stats_strings[j]); + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + + (idx++) * ETH_GSTRING_LEN, + "tx%d_%d_%s", i, tc, + sq_stats_strings[j]); + break; + } +} + +static void mlx5e_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int i, j, tc, idx = 0; + + if (!data) + return; + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_stats(priv); + mutex_unlock(&priv->state_lock); + + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + data[idx++] = ((u64 *)&priv->stats.vport)[i]; + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->rq.stats)[j]; + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->sq[tc].stats)[j]; +} + +static void mlx5e_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; + param->rx_pending = 1 << priv->params.log_rq_size; + param->tx_pending = 1 << priv->params.log_sq_size; +} + +static int mlx5e_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_params new_params; + u16 min_rx_wqes; + u8 log_rq_size; + u8 log_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(dev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(dev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + + log_rq_size = order_base_2(param->rx_pending); + log_sq_size = order_base_2(param->tx_pending); + min_rx_wqes = min_t(u16, param->rx_pending - 1, + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES); + + if (log_rq_size == priv->params.log_rq_size && + log_sq_size == priv->params.log_sq_size && + min_rx_wqes == priv->params.min_rx_wqes) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.log_rq_size = log_rq_size; + new_params.log_sq_size = log_sq_size; + new_params.min_rx_wqes = min_rx_wqes; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static void mlx5e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + + ch->max_combined = ncv; + ch->combined_count = priv->params.num_channels; +} + +static int mlx5e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + unsigned int count = ch->combined_count; + struct mlx5e_params new_params; + int err = 0; + + if (!count) { + netdev_info(dev, "%s: combined_count=0 not supported\n", + __func__); + return -EINVAL; + } + if (ch->rx_count || ch->tx_count) { + netdev_info(dev, "%s: separate rx/tx count not supported\n", + __func__); + return -EINVAL; + } + if (count > ncv) { + netdev_info(dev, "%s: count (%d) > max (%d)\n", + __func__, count, ncv); + return -EINVAL; + } + + if (priv->params.num_channels == count) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.num_channels = count; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + + return 0; +} + +static int mlx5e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channel *c; + int tc; + int i; + + priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + + for (i = 0; i < priv->params.num_channels; ++i) { + c = priv->channel[i]; + + for (tc = 0; tc < c->num_tc; tc++) { + mlx5_core_modify_cq_moderation(mdev, + &c->sq[tc].cq.mcq, + coal->tx_coalesce_usecs, + coal->tx_max_coalesced_frames); + } + + mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, + coal->rx_coalesce_usecs, + coal->rx_max_coalesced_frames); + } + + return 0; +} + +static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +{ + int i; + u32 supported_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + supported_modes |= ptys2ethtool_table[i].supported; + } + return supported_modes; +} + +static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +{ + int i; + u32 advertising_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + advertising_modes |= ptys2ethtool_table[i].advertised; + } + return advertising_modes; +} + +static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +{ + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static void get_speed_duplex(struct net_device *netdev, + u32 eth_proto_oper, + struct ethtool_cmd *cmd) +{ + int i; + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + + if (!netif_carrier_ok(netdev)) + goto out; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_oper & MLX5E_PROT_MASK(i)) { + speed = ptys2ethtool_table[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static void get_supported(u32 eth_proto_cap, u32 *supported) +{ + *supported |= ptys2ethtool_supported_port(eth_proto_cap); + *supported |= ptys2ethtool_supported_link(eth_proto_cap); + *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; +} + +static void get_advertising(u32 eth_proto_cap, u8 tx_pause, + u8 rx_pause, u32 *advertising) +{ + *advertising |= ptys2ethtool_adver_link(eth_proto_cap); + *advertising |= tx_pause ? ADVERTISED_Pause : 0; + *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; +} + +static u8 get_connector_port(u32 eth_proto) +{ + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { + return PORT_NONE; + } + + return PORT_OTHER; +} + +static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +{ + *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); +} + +static int mlx5e_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_lp; + u32 eth_proto_oper; + int err; + + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + + if (err) { + netdev_err(netdev, "%s: query port ptys failed: %d\n", + __func__, err); + goto err_query_ptys; + } + + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + + cmd->supported = 0; + cmd->advertising = 0; + + get_supported(eth_proto_cap, &cmd->supported); + get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); + get_speed_duplex(netdev, eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + + cmd->port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + + cmd->transceiver = XCVR_INTERNAL; + +err_query_ptys: + return err; +} + +static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +{ + u32 i, ptys_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].advertised & link_modes) + ptys_modes |= MLX5E_PROT_MASK(i); + } + + return ptys_modes; +} + +static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) +{ + u32 i, speed_links = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].speed == speed) + speed_links |= MLX5E_PROT_MASK(i); + } + + return speed_links; +} + +static int mlx5e_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 link_modes; + u32 speed; + u32 eth_proto_cap, eth_proto_admin; + u8 port_status; + int err; + + speed = ethtool_cmd_speed(cmd); + + link_modes = cmd->autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + mlx5e_ethtool2ptys_speed_link(speed); + + err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto cap failed: %d\n", + __func__, err); + goto out; + } + + link_modes = link_modes & eth_proto_cap; + if (!link_modes) { + netdev_err(netdev, "%s: Not supported link mode(s) requested", + __func__); + err = -EINVAL; + goto out; + } + + err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + if (link_modes == eth_proto_admin) + goto out; + + err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: set port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + err = mlx5_query_port_status(mdev, &port_status); + if (err) + goto out; + + if (port_status == MLX5_PORT_DOWN) + return 0; + + err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN); + if (err) + goto out; + err = mlx5_set_port_status(mdev, MLX5_PORT_UP); +out: + return err; +} + +const struct ethtool_ops mlx5e_ethtool_ops = { + .get_drvinfo = mlx5e_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_get_strings, + .get_sset_count = mlx5e_get_sset_count, + .get_ethtool_stats = mlx5e_get_ethtool_stats, + .get_ringparam = mlx5e_get_ringparam, + .set_ringparam = mlx5e_set_ringparam, + .get_channels = mlx5e_get_channels, + .set_channels = mlx5e_set_channels, + .get_coalesce = mlx5e_get_coalesce, + .set_coalesce = mlx5e_set_coalesce, + .get_settings = mlx5e_get_settings, + .set_settings = mlx5e_set_settings, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c new file mode 100644 index 000000000000..120db80c47aa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -0,0 +1,860 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/mlx5/flow_table.h> +#include "en.h" + +enum { + MLX5E_FULLMATCH = 0, + MLX5E_ALLMULTI = 1, + MLX5E_PROMISC = 2, +}; + +enum { + MLX5E_UC = 0, + MLX5E_MC_IPV4 = 1, + MLX5E_MC_IPV6 = 2, + MLX5E_MC_OTHER = 3, +}; + +enum { + MLX5E_ACTION_NONE = 0, + MLX5E_ACTION_ADD = 1, + MLX5E_ACTION_DEL = 2, +}; + +struct mlx5e_eth_addr_hash_node { + struct hlist_node hlist; + u8 action; + struct mlx5e_eth_addr_info ai; +}; + +static inline int mlx5e_hash_eth_addr(u8 *addr) +{ + return addr[5]; +} + +static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +{ + struct mlx5e_eth_addr_hash_node *hn; + int ix = mlx5e_hash_eth_addr(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal_64bits(hn->ai.addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = MLX5E_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->ai.addr, addr); + hn->action = MLX5E_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai) +{ + void *ft = priv->ft.main; + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); + + if (ai->tt_vec & (1 << MLX5E_TT_ANY)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); +} + +static int mlx5e_get_eth_addr_type(u8 *addr) +{ + if (is_unicast_ether_addr(addr)) + return MLX5E_UC; + + if ((addr[0] == 0x01) && + (addr[1] == 0x00) && + (addr[2] == 0x5e) && + !(addr[3] & 0x80)) + return MLX5E_MC_IPV4; + + if ((addr[0] == 0x33) && + (addr[1] == 0x33)) + return MLX5E_MC_IPV6; + + return MLX5E_MC_OTHER; +} + +static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) +{ + int eth_addr_type; + u32 ret; + + switch (type) { + case MLX5E_FULLMATCH: + eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); + switch (eth_addr_type) { + case MLX5E_UC: + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + case MLX5E_MC_IPV4: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV4) | + 0; + break; + + case MLX5E_MC_IPV6: + ret = + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV6) | + 0; + break; + + case MLX5E_MC_OTHER: + ret = + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + break; + + case MLX5E_ALLMULTI: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + default: /* MLX5E_PROMISC */ + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + return ret; +} + +static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type, + void *flow_context, void *match_criteria) +{ + u8 match_criteria_enable = 0; + void *match_value; + void *dest; + u8 *dmac; + u8 *match_criteria_dmac; + void *ft = priv->ft.main; + u32 *tirn = priv->tirn; + u32 tt_vec; + int err; + + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dmac = MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dmac_47_16); + match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR); + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + memset(match_criteria_dmac, 0xff, ETH_ALEN); + ether_addr_copy(dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + match_criteria_dmac[0] = 0x01; + dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + tt_vec = mlx5e_get_tt_vec(ai, type); + + if (tt_vec & (1 << MLX5E_TT_ANY)) { + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_ANY]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_ANY]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_ANY); + } + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ethertype); + + if (tt_vec & (1 << MLX5E_TT_IPV4)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6); + } + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ip_protocol); + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_UDP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_TCP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP); + } + + return 0; +} + +static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type) +{ + u32 *flow_context; + u32 *match_criteria; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_eth_addr_rule_out; + } + + err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context, + match_criteria); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_eth_addr_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +enum mlx5e_vlan_rule_type { + MLX5E_VLAN_RULE_TYPE_UNTAGGED, + MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_VID, +}; + +static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + u8 match_criteria_enable = 0; + u32 *flow_context; + void *match_value; + void *dest; + u32 *match_criteria; + u32 *ft_ix; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_vlan_rule_out; + } + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE); + MLX5_SET(dest_format_struct, dest, destination_id, + mlx5_get_flow_table_id(priv->ft.main)); + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.vlan_tag); + + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + ft_ix = &priv->vlan.untagged_rule_ft_ix; + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + ft_ix = &priv->vlan.any_vlan_rule_ft_ix; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ + ft_ix = &priv->vlan.active_vlans_ft_ix[vid]; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, match_value, outer_headers.first_vid, + vid); + break; + } + + err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable, + match_criteria, flow_context, ft_ix); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_vlan_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.untagged_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.any_vlan_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.active_vlans_ft_ix[vid]); + break; + } +} + +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = false; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (!priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = true; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + mutex_lock(&priv->state_lock); + + set_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + clear_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + + mutex_unlock(&priv->state_lock); + + return 0; +} + +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + int err; + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + if (err) + return err; + } + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + return err; + + if (priv->vlan.filter_disabled) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + if (err) + return err; + } + + return 0; +} + +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + + if (priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +} + +#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +static void mlx5e_execute_action(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_hash_node *hn) +{ + switch (hn->action) { + case MLX5E_ACTION_ADD: + mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + hn->action = MLX5E_ACTION_NONE; + break; + + case MLX5E_ACTION_DEL: + mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); + mlx5e_del_eth_addr_from_hash(hn); + break; + } +} + +static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + priv->netdev->dev_addr); + + netdev_for_each_uc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + + netdev_for_each_mc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + + netif_addr_unlock_bh(netdev); +} + +static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_execute_action(priv, hn); + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_execute_action(priv, hn); +} + +static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + hn->action = MLX5E_ACTION_DEL; + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + hn->action = MLX5E_ACTION_DEL; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_sync_netdev_addr(priv); + + mlx5e_apply_netdev_addr(priv); +} + +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct net_device *ndev = priv->netdev; + + bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); + bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool broadcast_enabled = rx_mode_enable; + + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + + if (enable_promisc) + mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (enable_allmulti) + mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + if (enable_broadcast) + mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + + mlx5e_handle_netdev_addr(priv); + + if (disable_broadcast) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + if (disable_allmulti) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + if (disable_promisc) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_set_rx_mode_core(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +{ + ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); +} + +static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + u8 *dmac; + + g = kcalloc(9, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; + + g[0].log_sz = 2; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ip_protocol); + + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.ethertype); + + g[2].log_sz = 0; + + g[3].log_sz = 14; + g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ip_protocol); + + g[4].log_sz = 13; + g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria, + outer_headers.ethertype); + + g[5].log_sz = 11; + g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + + g[6].log_sz = 2; + g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ip_protocol); + + g[7].log_sz = 1; + g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria, + outer_headers.ethertype); + + g[8].log_sz = 0; + g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + priv->ft.main = mlx5_create_flow_table(priv->mdev, 1, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 9, g); + kfree(g); + + return priv->ft.main ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.main); +} + +static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + + g = kcalloc(2, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; + + g[0].log_sz = 12; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.first_vid); + + /* untagged + any vlan id */ + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.vlan_tag); + + priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 2, g); + + kfree(g); + return priv->ft.vlan ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.vlan); +} + +int mlx5e_open_flow_table(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_main_flow_table(priv); + if (err) + return err; + + err = mlx5e_create_vlan_flow_table(priv); + if (err) + goto err_destroy_main_flow_table; + + return 0; + +err_destroy_main_flow_table: + mlx5e_destroy_main_flow_table(priv); + + return err; +} + +void mlx5e_close_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_vlan_flow_table(priv); + mlx5e_destroy_main_flow_table(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c new file mode 100644 index 000000000000..40206da1f9d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -0,0 +1,1915 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/flow_table.h> +#include "en.h" + +struct mlx5e_rq_param { + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_sq_param { + u32 sqc[MLX5_ST_SZ_DW(sqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_cq_param { + u32 cqc[MLX5_ST_SZ_DW(cqc)]; + struct mlx5_wq_param wq; + u16 eq_ix; +}; + +struct mlx5e_channel_param { + struct mlx5e_rq_param rq; + struct mlx5e_sq_param sq; + struct mlx5e_cq_param rx_cq; + struct mlx5e_cq_param tx_cq; +}; + +static void mlx5e_update_carrier(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 port_state; + + port_state = mlx5_query_vport_state(mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); + + if (port_state == VPORT_STATE_UP) + netif_carrier_on(priv->netdev); + else + netif_carrier_off(priv->netdev); +} + +static void mlx5e_update_carrier_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + update_carrier_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_carrier(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_vport_stats *s = &priv->stats.vport; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u64 tx_offload_none; + int i, j; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + /* Collect firts the SW counters and then HW for consistency */ + s->tso_packets = 0; + s->tso_bytes = 0; + s->tx_queue_stopped = 0; + s->tx_queue_wake = 0; + s->tx_queue_dropped = 0; + tx_offload_none = 0; + s->lro_packets = 0; + s->lro_bytes = 0; + s->rx_csum_none = 0; + s->rx_wqe_err = 0; + for (i = 0; i < priv->params.num_channels; i++) { + rq_stats = &priv->channel[i]->rq.stats; + + s->lro_packets += rq_stats->lro_packets; + s->lro_bytes += rq_stats->lro_bytes; + s->rx_csum_none += rq_stats->csum_none; + s->rx_wqe_err += rq_stats->wqe_err; + + for (j = 0; j < priv->num_tc; j++) { + sq_stats = &priv->channel[i]->sq[j].stats; + + s->tso_packets += sq_stats->tso_packets; + s->tso_bytes += sq_stats->tso_bytes; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + tx_offload_none += sq_stats->csum_offload_none; + } + } + + /* HW counters */ + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, other_vport, 0); + + memset(out, 0, outlen); + + if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) + goto free_out; + +#define MLX5_GET_CTR(p, x) \ + MLX5_GET64(query_vport_counter_out, p, x) + + s->rx_error_packets = + MLX5_GET_CTR(out, received_errors.packets); + s->rx_error_bytes = + MLX5_GET_CTR(out, received_errors.octets); + s->tx_error_packets = + MLX5_GET_CTR(out, transmit_errors.packets); + s->tx_error_bytes = + MLX5_GET_CTR(out, transmit_errors.octets); + + s->rx_unicast_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets); + s->rx_unicast_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets); + s->tx_unicast_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets); + s->tx_unicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets); + + s->rx_multicast_packets = + MLX5_GET_CTR(out, received_eth_multicast.packets); + s->rx_multicast_bytes = + MLX5_GET_CTR(out, received_eth_multicast.octets); + s->tx_multicast_packets = + MLX5_GET_CTR(out, transmitted_eth_multicast.packets); + s->tx_multicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_multicast.octets); + + s->rx_broadcast_packets = + MLX5_GET_CTR(out, received_eth_broadcast.packets); + s->rx_broadcast_bytes = + MLX5_GET_CTR(out, received_eth_broadcast.octets); + s->tx_broadcast_packets = + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + s->tx_broadcast_bytes = + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + s->rx_packets = + s->rx_unicast_packets + + s->rx_multicast_packets + + s->rx_broadcast_packets; + s->rx_bytes = + s->rx_unicast_bytes + + s->rx_multicast_bytes + + s->rx_broadcast_bytes; + s->tx_packets = + s->tx_unicast_packets + + s->tx_multicast_packets + + s->tx_broadcast_packets; + s->tx_bytes = + s->tx_unicast_bytes + + s->tx_multicast_bytes + + s->tx_broadcast_bytes; + + /* Update calculated offload counters */ + s->tx_csum_offload = s->tx_packets - tx_offload_none; + s->rx_csum_good = s->rx_packets - s->rx_csum_none; + +free_out: + kvfree(out); +} + +static void mlx5e_update_stats_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, + update_stats_work); + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_update_stats(priv); + schedule_delayed_work(dwork, + msecs_to_jiffies( + MLX5E_UPDATE_STATS_INTERVAL)); + } + mutex_unlock(&priv->state_lock); +} + +static void __mlx5e_async_event(struct mlx5e_priv *priv, + enum mlx5_dev_event event) +{ + switch (event) { + case MLX5_DEV_EVENT_PORT_UP: + case MLX5_DEV_EVENT_PORT_DOWN: + schedule_work(&priv->update_carrier_work); + break; + + default: + break; + } +} + +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, + enum mlx5_dev_event event, unsigned long param) +{ + struct mlx5e_priv *priv = vpriv; + + spin_lock(&priv->async_events_spinlock); + if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + __mlx5e_async_event(priv, event); + spin_unlock(&priv->async_events_spinlock); +} + +static void mlx5e_enable_async_events(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); +} + +static void mlx5e_disable_async_events(struct mlx5e_priv *priv) +{ + spin_lock_irq(&priv->async_events_spinlock); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + spin_unlock_irq(&priv->async_events_spinlock); +} + +#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) +#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + +static int mlx5e_create_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + void *rqc = param->rqc; + void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + int wq_sz; + int err; + int i; + + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->wq); + rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!rq->skb) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + + rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : + MLX5E_SW2HW_MTU(priv->netdev->mtu); + rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); + + for (i = 0; i < wq_sz; i++) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; + + wqe->data.lkey = c->mkey_be; + wqe->data.byte_count = + cpu_to_be32(byte_count | MLX5_HW_START_PADDING); + } + + rq->pdev = c->pdev; + rq->netdev = c->netdev; + rq->channel = c; + rq->ix = c->ix; + + return 0; + +err_rq_wq_destroy: + mlx5_wq_destroy(&rq->wq_ctrl); + + return err; +} + +static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +{ + kfree(rq->skb); + mlx5_wq_destroy(&rq->wq_ctrl); +} + +static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + + sizeof(u64) * rq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); + wq = MLX5_ADDR_OF(rqc, rqc, wq); + + memcpy(rqc, param->rqc, sizeof(param->rqc)); + + MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, flush_in_error_en, 1); + MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); + MLX5_SET(rqc, rqc, state, next_state); + + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_rq(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_rq(mdev, rq->rqn); +} + +static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_wq_ll *wq = &rq->wq; + int i; + + for (i = 0; i < 1000; i++) { + if (wq->cur_sz >= priv->params.min_rx_wqes) + return 0; + + msleep(20); + } + + return -ETIMEDOUT; +} + +static int mlx5e_open_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + int err; + + err = mlx5e_create_rq(c, param, rq); + if (err) + return err; + + err = mlx5e_enable_rq(rq, param); + if (err) + goto err_destroy_rq; + + err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + goto err_disable_rq; + + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */ + + return 0; + +err_disable_rq: + mlx5e_disable_rq(rq); +err_destroy_rq: + mlx5e_destroy_rq(rq); + + return err; +} + +static void mlx5e_close_rq(struct mlx5e_rq *rq) +{ + clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ + + mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + while (!mlx5_wq_ll_is_empty(&rq->wq)) + msleep(20); + + /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ + napi_synchronize(&rq->channel->napi); + + mlx5e_disable_rq(rq); + mlx5e_destroy_rq(rq); +} + +static void mlx5e_free_sq_db(struct mlx5e_sq *sq) +{ + kfree(sq->dma_fifo); + kfree(sq->skb); +} + +static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) +{ + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + + sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); + sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, + numa); + + if (!sq->skb || !sq->dma_fifo) { + mlx5e_free_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int mlx5e_create_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *sqc = param->sqc; + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); + int txq_ix; + int err; + + err = mlx5_alloc_map_uar(mdev, &sq->uar); + if (err) + return err; + + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, + &sq->wq_ctrl); + if (err) + goto err_unmap_free_uar; + + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + sq->uar_map = sq->uar.map; + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + + err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); + if (err) + goto err_sq_wq_destroy; + + txq_ix = c->ix + tc * priv->params.num_channels; + sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); + + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->tc = tc; + sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + priv->txq_to_sq_map[txq_ix] = sq; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &sq->uar); + + return err; +} + +static void mlx5e_destroy_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + + mlx5e_free_sq_db(sq); + mlx5_wq_destroy(&sq->wq_ctrl); + mlx5_unmap_free_uar(priv->mdev, &sq->uar); +} + +static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, param->sqc, sizeof(param->sqc)); + + MLX5_SET(sqc, sqc, user_index, sq->tc); + MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); + MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, tis_lst_sz, 1); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + + MLX5_SET(modify_sq_in, in, sq_state, curr_state); + MLX5_SET(sqc, sqc, state, next_state); + + err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_sq(mdev, sq->sqn); +} + +static int mlx5e_open_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + int err; + + err = mlx5e_create_sq(c, tc, param, sq); + if (err) + return err; + + err = mlx5e_enable_sq(sq, param); + if (err) + goto err_destroy_sq; + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + if (err) + goto err_disable_sq; + + set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); + + return 0; + +err_disable_sq: + mlx5e_disable_sq(sq); +err_destroy_sq: + mlx5e_destroy_sq(sq); + + return err; +} + +static inline void netif_tx_disable_queue(struct netdev_queue *txq) +{ + __netif_tx_lock_bh(txq); + netif_tx_stop_queue(txq); + __netif_tx_unlock_bh(txq); +} + +static void mlx5e_close_sq(struct mlx5e_sq *sq) +{ + clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ + netif_tx_disable_queue(sq->txq); + + /* ensure hw is notified of all pending wqes */ + if (mlx5e_sq_has_room_for(sq, 1)) + mlx5e_send_nop(sq, true); + + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + while (sq->cc != sq->pc) /* wait till sq is empty */ + msleep(20); + + /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ + napi_synchronize(&sq->channel->napi); + + mlx5e_disable_sq(sq); + mlx5e_destroy_sq(sq); +} + +static int mlx5e_create_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int eqn_not_used; + int irqn; + int err; + u32 i; + + param->wq.numa = cpu_to_node(c->cpu); + param->eq_ix = c->ix; + + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + return err; + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + + cq->napi = &c->napi; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + *mcq->set_ci_db = 0; + *mcq->arm_db = 0; + mcq->vector = param->eq_ix; + mcq->comp = mlx5e_completion_event; + mcq->event = mlx5e_cq_error_event; + mcq->irqn = irqn; + mcq->uar = &priv->cq_uar; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->channel = c; + + return 0; +} + +static void mlx5e_destroy_cq(struct mlx5e_cq *cq) +{ + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + + void *in; + void *cqc; + int inlen; + int irqn_not_used; + int eqn; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, param->cqc, sizeof(param->cqc)); + + mlx5_fill_page_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen); + + kvfree(in); + + if (err) + return err; + + mlx5e_cq_arm(cq); + + return 0; +} + +static void mlx5e_disable_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_cq(mdev, &cq->mcq); +} + +static int mlx5e_open_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq, + u16 moderation_usecs, + u16 moderation_frames) +{ + int err; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + err = mlx5e_create_cq(c, param, cq); + if (err) + return err; + + err = mlx5e_enable_cq(cq, param); + if (err) + goto err_destroy_cq; + + err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); + if (err) + goto err_destroy_cq; + + return 0; + +err_destroy_cq: + mlx5e_destroy_cq(cq); + + return err; +} + +static void mlx5e_close_cq(struct mlx5e_cq *cq) +{ + mlx5e_disable_cq(cq); + mlx5e_destroy_cq(cq); +} + +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + +static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + struct mlx5e_priv *priv = c->priv; + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, + priv->params.tx_cq_moderation_usec, + priv->params.tx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + } + + return 0; + +err_close_tx_cqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_cq(&c->sq[tc].cq); + + return err; +} + +static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->sq[tc].cq); +} + +static int mlx5e_open_sqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); + if (err) + goto err_close_sqs; + } + + return 0; + +err_close_sqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_sq(&c->sq[tc]); + + return err; +} + +static void mlx5e_close_sqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_sq(&c->sq[tc]); +} + +static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c, + int num_channels) +{ + int i; + + for (i = 0; i < MLX5E_MAX_NUM_TC; i++) + c->tc_to_txq_map[i] = c->ix + i * num_channels; +} + +static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + struct mlx5e_channel_param *cparam, + struct mlx5e_channel **cp) +{ + struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); + struct mlx5e_channel *c; + int err; + + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + if (!c) + return -ENOMEM; + + c->priv = priv; + c->ix = ix; + c->cpu = cpu; + c->pdev = &priv->mdev->pdev->dev; + c->netdev = priv->netdev; + c->mkey_be = cpu_to_be32(priv->mr.key); + c->num_tc = priv->num_tc; + + mlx5e_build_tc_to_txq_map(c, priv->params.num_channels); + + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); + + err = mlx5e_open_tx_cqs(c, cparam); + if (err) + goto err_napi_del; + + err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, + priv->params.rx_cq_moderation_usec, + priv->params.rx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + + napi_enable(&c->napi); + + err = mlx5e_open_sqs(c, cparam); + if (err) + goto err_disable_napi; + + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); + if (err) + goto err_close_sqs; + + netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); + *cp = c; + + return 0; + +err_close_sqs: + mlx5e_close_sqs(c); + +err_disable_napi: + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + +err_close_tx_cqs: + mlx5e_close_tx_cqs(c); + +err_napi_del: + netif_napi_del(&c->napi); + kfree(c); + + return err; +} + +static void mlx5e_close_channel(struct mlx5e_channel *c) +{ + mlx5e_close_rq(&c->rq); + mlx5e_close_sqs(c); + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + mlx5e_close_tx_cqs(c); + netif_napi_del(&c->napi); + kfree(c); +} + +static void mlx5e_build_rq_param(struct mlx5e_priv *priv, + struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); + MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.linear = 1; +} + +static void mlx5e_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); +} + +static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); +} + +static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, + struct mlx5e_channel_param *cparam) +{ + memset(cparam, 0, sizeof(*cparam)); + + mlx5e_build_rq_param(priv, &cparam->rq); + mlx5e_build_sq_param(priv, &cparam->sq); + mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); + mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); +} + +static int mlx5e_open_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channel_param cparam; + int err = -ENOMEM; + int i; + int j; + + priv->channel = kcalloc(priv->params.num_channels, + sizeof(struct mlx5e_channel *), GFP_KERNEL); + + priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc, + sizeof(struct mlx5e_sq *), GFP_KERNEL); + + if (!priv->channel || !priv->txq_to_sq_map) + goto err_free_txq_to_sq_map; + + mlx5e_build_channel_param(priv, &cparam); + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); + if (err) + goto err_close_channels; + } + + for (j = 0; j < priv->params.num_channels; j++) { + err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); + if (err) + goto err_close_channels; + } + + return 0; + +err_close_channels: + for (i--; i >= 0; i--) + mlx5e_close_channel(priv->channel[i]); + +err_free_txq_to_sq_map: + kfree(priv->txq_to_sq_map); + kfree(priv->channel); + + return err; +} + +static void mlx5e_close_channels(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_close_channel(priv->channel[i]); + + kfree(priv->txq_to_sq_map); + kfree(priv->channel); +} + +static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + memset(in, 0, sizeof(in)); + + MLX5_SET(tisc, tisc, prio, tc); + MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); +} + +static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) +{ + mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); +} + +static int mlx5e_open_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int err; + int tc; + + for (tc = 0; tc < num_tc; tc++) { + err = mlx5e_open_tis(priv, tc); + if (err) + goto err_close_tises; + } + + return 0; + +err_close_tises: + for (tc--; tc >= 0; tc--) + mlx5e_close_tis(priv, tc); + + return err; +} + +static void mlx5e_close_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int tc; + + for (tc = 0; tc < num_tc; tc++) + mlx5e_close_tis(priv, tc); +} + +static int mlx5e_open_rqt(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + void *rqtc; + int inlen; + int err; + int sz; + int i; + + sz = 1 << priv->params.rx_hash_log_tbl_sz; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); + + for (i = 0; i < sz; i++) { + int ix = i % priv->params.num_channels; + + MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); + } + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); + if (!err) + priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + kvfree(in); + + return err; +} + +static void mlx5e_close_rqt(struct mlx5e_priv *priv) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); + + mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, + sizeof(out)); +} + +static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +{ + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + + MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + +#define ROUGH_MAX_L2_L3_HDR_SZ 256 + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) + + if (priv->params.lro_en) { + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[3])); + } + + switch (tt) { + case MLX5E_TT_ANY: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, + priv->channel[0]->rq.rqn); + break; + default: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn); + MLX5_SET(tirc, tirc, rx_hash_fn, + MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key), + MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key)); + break; + } + + switch (tt) { + case MLX5E_TT_IPV4_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + + case MLX5E_TT_IPV6: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + } +} + +static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + mlx5e_build_tir_ctx(priv, tirc, tt); + + err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); + + kvfree(in); + + return err; +} + +static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) +{ + mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); +} + +static int mlx5e_open_tirs(struct mlx5e_priv *priv) +{ + int err; + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + err = mlx5e_open_tir(priv, i); + if (err) + goto err_close_tirs; + } + + return 0; + +err_close_tirs: + for (i--; i >= 0; i--) + mlx5e_close_tir(priv, i); + + return err; +} + +static void mlx5e_close_tirs(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) + mlx5e_close_tir(priv, i); +} + +static int mlx5e_set_dev_port_mtu(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int hw_mtu; + int err; + + err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); + if (err) + return err; + + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + + if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) + netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", + __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); + + netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); + return 0; +} + +int mlx5e_open_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int num_txqs; + int err; + + num_txqs = priv->params.num_channels * priv->params.num_tc; + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, priv->params.num_channels); + + err = mlx5e_set_dev_port_mtu(netdev); + if (err) + return err; + + err = mlx5e_open_tises(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", + __func__, err); + return err; + } + + err = mlx5e_open_channels(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", + __func__, err); + goto err_close_tises; + } + + err = mlx5e_open_rqt(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", + __func__, err); + goto err_close_channels; + } + + err = mlx5e_open_tirs(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", + __func__, err); + goto err_close_rqls; + } + + err = mlx5e_open_flow_table(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", + __func__, err); + goto err_close_tirs; + } + + err = mlx5e_add_all_vlan_rules(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", + __func__, err); + goto err_close_flow_table; + } + + mlx5e_init_eth_addr(priv); + + set_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_update_carrier(priv); + mlx5e_set_rx_mode_core(priv); + + schedule_delayed_work(&priv->update_stats_work, 0); + return 0; + +err_close_flow_table: + mlx5e_close_flow_table(priv); + +err_close_tirs: + mlx5e_close_tirs(priv); + +err_close_rqls: + mlx5e_close_rqt(priv); + +err_close_channels: + mlx5e_close_channels(priv); + +err_close_tises: + mlx5e_close_tises(priv); + + return err; +} + +static int mlx5e_open(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_close_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_set_rx_mode_core(priv); + mlx5e_del_all_vlan_rules(priv); + netif_carrier_off(priv->netdev); + mlx5e_close_flow_table(priv); + mlx5e_close_tirs(priv); + mlx5e_close_rqt(priv); + mlx5e_close_channels(priv); + mlx5e_close_tises(priv); + + return 0; +} + +static int mlx5e_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params) +{ + int err = 0; + int was_opened; + + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params = *new_params; + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + + return err; +} + +static struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + + stats->rx_packets = vstats->rx_packets; + stats->rx_bytes = vstats->rx_bytes; + stats->tx_packets = vstats->tx_packets; + stats->tx_bytes = vstats->tx_bytes; + stats->multicast = vstats->rx_multicast_packets + + vstats->tx_multicast_packets; + stats->tx_errors = vstats->tx_error_packets; + stats->rx_errors = vstats->rx_error_packets; + stats->tx_dropped = vstats->tx_queue_dropped; + stats->rx_crc_errors = 0; + stats->rx_length_errors = 0; + + return stats; +} + +static void mlx5e_set_rx_mode(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + schedule_work(&priv->set_rx_mode_work); +} + +static int mlx5e_set_mac(struct net_device *netdev, void *addr) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + schedule_work(&priv->set_rx_mode_work); + + return 0; +} + +static int mlx5e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + netdev_features_t changes = features ^ netdev->features; + struct mlx5e_params new_params; + bool update_params = false; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + + if (changes & NETIF_F_LRO) { + new_params.lro_en = !!(features & NETIF_F_LRO); + update_params = true; + } + + if (update_params) + mlx5e_update_priv_params(priv, &new_params); + + if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + mlx5e_enable_vlan_filter(priv); + else + mlx5e_disable_vlan_filter(priv); + } + + mutex_unlock(&priv->state_lock); + + return 0; +} + +static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int max_mtu; + int err; + + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + + if (new_mtu > max_mtu) { + netdev_err(netdev, + "%s: Bad MTU (%d) > (%d) Max\n", + __func__, new_mtu, max_mtu); + return -EINVAL; + } + + mutex_lock(&priv->state_lock); + netdev->mtu = new_mtu; + err = mlx5e_update_priv_params(priv, &priv->params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static struct net_device_ops mlx5e_netdev_ops = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_set_rx_mode = mlx5e_set_rx_mode, + .ndo_set_mac_address = mlx5e_set_mac, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_set_features = mlx5e_set_features, + .ndo_change_mtu = mlx5e_change_mtu, +}; + +static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) +{ + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -ENOTSUPP; + if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || + !MLX5_CAP_GEN(mdev, nic_flow_table) || + !MLX5_CAP_ETH(mdev, csum_cap) || + !MLX5_CAP_ETH(mdev, max_lso_cap) || + !MLX5_CAP_ETH(mdev, vlan_cap) || + !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) || + MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.max_ft_level) + < 3) { + mlx5_core_warn(mdev, + "Not creating net device, some required device capabilities are missing\n"); + return -ENOTSUPP; + } + return 0; +} + +static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + int num_comp_vectors) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + priv->params.log_sq_size = + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_rq_size = + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.rx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + priv->params.rx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + priv->params.tx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + priv->params.tx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + priv->params.min_rx_wqes = + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; + priv->params.rx_hash_log_tbl_sz = + (order_base_2(num_comp_vectors) > + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? + order_base_2(num_comp_vectors) : + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; + priv->params.num_tc = 1; + priv->params.default_vlan_prio = 0; + + priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); + priv->params.lro_wqe_sz = + MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + priv->mdev = mdev; + priv->netdev = netdev; + priv->params.num_channels = num_comp_vectors; + priv->num_tc = priv->params.num_tc; + priv->default_vlan_prio = priv->params.default_vlan_prio; + + spin_lock_init(&priv->async_events_spinlock); + mutex_init(&priv->state_lock); + + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); +} + +static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr); +} + +static void mlx5e_build_netdev(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + SET_NETDEV_DEV(netdev, &mdev->pdev->dev); + + if (priv->num_tc > 1) { + mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; + } + + netdev->netdev_ops = &mlx5e_netdev_ops; + netdev->watchdog_timeo = 15 * HZ; + + netdev->ethtool_ops = &mlx5e_ethtool_ops; + + netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + + if (!!MLX5_CAP_ETH(mdev, lro_cap)) + netdev->vlan_features |= NETIF_F_LRO; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features = netdev->hw_features; + if (!priv->params.lro_en) + netdev->features &= ~NETIF_F_LRO; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + mlx5e_set_netdev_dev_addr(netdev); +} + +static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, + struct mlx5_core_mr *mr) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_create_mkey_mbox_in *in; + int err; + + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + in->seg.flags = MLX5_PERM_LOCAL_WRITE | + MLX5_PERM_LOCAL_READ | + MLX5_ACCESS_MODE_PA; + in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, + NULL); + + kvfree(in); + + return err; +} + +static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + int ncv = mdev->priv.eq_table.num_comp_vectors; + int err; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; + } + + mlx5e_build_netdev_priv(mdev, netdev, ncv); + mlx5e_build_netdev(netdev); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); + if (err) { + netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", + __func__, err); + goto err_free_netdev; + } + + err = mlx5_core_alloc_pd(mdev, &priv->pdn); + if (err) { + netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", + __func__, err); + goto err_unmap_free_uar; + } + + err = mlx5_alloc_transport_domain(mdev, &priv->tdn); + if (err) { + netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n", + __func__, err); + goto err_dealloc_pd; + } + + err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); + if (err) { + netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", + __func__, err); + goto err_dealloc_transport_domain; + } + + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "%s: register_netdev failed, %d\n", + __func__, err); + goto err_destroy_mkey; + } + + mlx5e_enable_async_events(priv); + + return priv; + +err_destroy_mkey: + mlx5_core_destroy_mkey(mdev, &priv->mr); + +err_dealloc_transport_domain: + mlx5_dealloc_transport_domain(mdev, priv->tdn); + +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, priv->pdn); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &priv->cq_uar); + +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + struct net_device *netdev = priv->netdev; + + unregister_netdev(netdev); + mlx5_core_destroy_mkey(priv->mdev, &priv->mr); + mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); + mlx5_core_dealloc_pd(priv->mdev, priv->pdn); + mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); + mlx5e_disable_async_events(priv); + flush_scheduled_work(); + free_netdev(netdev); +} + +static void *mlx5e_get_netdev(void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + + return priv->netdev; +} + +static struct mlx5_interface mlx5e_interface = { + .add = mlx5e_create_netdev, + .remove = mlx5e_destroy_netdev, + .event = mlx5e_async_event, + .protocol = MLX5_INTERFACE_PROTOCOL_ETH, + .get_dev = mlx5e_get_netdev, +}; + +void mlx5e_init(void) +{ + mlx5_register_interface(&mlx5e_interface); +} + +void mlx5e_cleanup(void) +{ + mlx5_unregister_interface(&mlx5e_interface); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c new file mode 100644 index 000000000000..9a9374131f5b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include "en.h" + +static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, u16 ix) +{ + struct sk_buff *skb; + dma_addr_t dma_addr; + + skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); + if (unlikely(!skb)) + return -ENOMEM; + + dma_addr = dma_map_single(rq->pdev, + /* hw start padding */ + skb->data, + /* hw end padding */ + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) + goto err_free_skb; + + skb_reserve(skb, MLX5E_NET_IP_ALIGN); + + *((dma_addr_t *)skb->cb) = dma_addr; + wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + + rq->skb[ix] = skb; + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + + return -ENOMEM; +} + +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + + if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) + return false; + + while (!mlx5_wq_ll_is_full(wq)) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) + break; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + } + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); + + return !mlx5_wq_ll_is_full(wq); +} + +static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); + struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + struct tcphdr *tcp; + + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || + (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); + + u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; + + if (eth->h_proto == htons(ETH_P_IP)) { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct iphdr)); + ipv6 = NULL; + } else { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + ipv4 = NULL; + } + + if (get_cqe_lro_tcppsh(cqe)) + tcp->psh = 1; + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } + + if (ipv4) { + ipv4->ttl = cqe->lro_min_ttl; + ipv4->tot_len = cpu_to_be16(tot_len); + ipv4->check = 0; + ipv4->check = ip_fast_csum((unsigned char *)ipv4, + ipv4->ihl); + } else { + ipv6->hop_limit = cqe->lro_min_ttl; + ipv6->payload_len = cpu_to_be16(tot_len - + sizeof(struct ipv6hdr)); + } +} + +static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, + struct sk_buff *skb) +{ + u8 cht = cqe->rss_hash_type; + int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : + (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : + PKT_HASH_TYPE_NONE; + skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); +} + +static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, + struct sk_buff *skb) +{ + struct net_device *netdev = rq->netdev; + u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + int lro_num_seg; + + skb_put(skb, cqe_bcnt); + + lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + if (lro_num_seg > 1) { + mlx5e_lro_update_hdr(skb, cqe); + skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + rq->stats.lro_packets++; + rq->stats.lro_bytes += cqe_bcnt; + } + + if (likely(netdev->features & NETIF_F_RXCSUM) && + (cqe->hds_ip_ext & CQE_L2_OK) && + (cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb->ip_summed = CHECKSUM_NONE; + rq->stats.csum_none++; + } + + skb->protocol = eth_type_trans(skb, netdev); + + skb_record_rx_queue(skb, rq->ix); + + if (likely(netdev->features & NETIF_F_RXHASH)) + mlx5e_skb_set_hash(cqe, skb); + + if (cqe_has_vlan(cqe)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(cqe->vlan_info)); +} + +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) +{ + struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + for (i = 0; i < budget; i++) { + struct mlx5e_rx_wqe *wqe; + struct mlx5_cqe64 *cqe; + struct sk_buff *skb; + __be16 wqe_counter_be; + u16 wqe_counter; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + skb = rq->skb[wqe_counter]; + prefetch(skb->data); + rq->skb[wqe_counter] = NULL; + + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + dev_kfree_skb(skb); + goto wq_ll_pop; + } + + mlx5e_build_rx_skb(cqe, rq, skb); + rq->stats.packets++; + napi_gro_receive(cq->napi, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + if (i == budget) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c new file mode 100644 index 000000000000..03f28f438e55 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/tcp.h> +#include <linux/if_vlan.h> +#include "en.h" + +#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS +#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ + MLX5E_SQ_NOPS_ROOM) + +void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + + memset(cseg, 0, sizeof(*cseg)); + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); + + sq->skb[pi] = NULL; + sq->pc++; + + if (notify_hw) { + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + mlx5e_tx_notify_hw(sq, wqe); + } +} + +static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, + u32 *size) +{ + sq->dma_fifo_pc--; + *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; +} + +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + dma_addr_t addr; + u32 size; + int i; + + for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + mlx5e_dma_pop_last_pushed(sq, &addr, &size); + dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + } +} + +static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, + u32 size) +{ + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; + sq->dma_fifo_pc++; +} + +static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, + u32 *size) +{ + *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; +} + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int channel_ix = fallback(dev, skb); + int up = skb_vlan_tag_present(skb) ? + skb->vlan_tci >> VLAN_PRIO_SHIFT : + priv->default_vlan_prio; + int tc = netdev_get_prio_tc_map(dev, up); + + return priv->channel[channel_ix]->tc_to_txq_map[tc]; +} + +static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, + struct sk_buff *skb) +{ +#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ + return MLX5E_MIN_INLINE; +} + +static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + u8 opcode = MLX5_OPCODE_SEND; + dma_addr_t dma_addr = 0; + u16 headlen; + u16 ds_cnt; + u16 ihs; + int i; + + memset(wqe, 0, sizeof(*wqe)); + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + else + sq->stats.csum_offload_none++; + + if (skb_is_gso(skb)) { + u32 payload_len; + + eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + opcode = MLX5_OPCODE_LSO; + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + payload_len = skb->len - ihs; + MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + + (skb_shinfo(skb)->gso_segs - 1) * ihs; + sq->stats.tso_packets++; + sq->stats.tso_bytes += payload_len; + } else { + ihs = mlx5e_get_inline_hdr_size(sq, skb); + MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, + ETH_ZLEN); + } + + skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); + skb_pull_inline(skb, ihs); + + eseg->inline_hdr_sz = cpu_to_be16(ihs); + + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), + MLX5_SEND_WQE_DS); + dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + + MLX5E_TX_SKB_CB(skb)->num_dma = 0; + + headlen = skb_headlen(skb); + if (headlen) { + dma_addr = dma_map_single(sq->pdev, skb->data, headlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(headlen); + + mlx5e_dma_push(sq, dma_addr, headlen); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(fsz); + + mlx5e_dma_push(sq, dma_addr, fsz); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + + sq->skb[pi] = skb; + + MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, + MLX5_SEND_WQEBB_NUM_DS); + sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + + netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); + + if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { + netif_tx_stop_queue(sq->txq); + sq->stats.stopped++; + } + + if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + mlx5e_tx_notify_hw(sq, wqe); + } + + /* fill sq edge with nops to avoid wqe wrap around */ + while ((sq->pc & wq->sz_m1) > sq->edge) + mlx5e_send_nop(sq, false); + + sq->stats.packets++; + return NETDEV_TX_OK; + +dma_unmap_wqe_err: + sq->stats.dropped++; + mlx5e_dma_unmap_wqe_err(sq, skb); + + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; + + return mlx5e_sq_xmit(sq, skb); +} + +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_sq *sq; + u32 dma_fifo_cc; + u32 nbytes; + u16 npkts; + u16 sqcc; + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + sq = container_of(cq, struct mlx5e_sq, cq); + + npkts = 0; + nbytes = 0; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + + for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { + struct mlx5_cqe64 *cqe; + u16 wqe_counter; + bool last_wqe; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + + do { + struct sk_buff *skb; + u16 ci; + int j; + + last_wqe = (sqcc == wqe_counter); + + ci = sqcc & sq->wq.sz_m1; + skb = sq->skb[ci]; + + if (unlikely(!skb)) { /* nop */ + sq->stats.nop++; + sqcc++; + continue; + } + + for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { + dma_addr_t addr; + u32 size; + + mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); + dma_fifo_cc++; + dma_unmap_single(sq->pdev, addr, size, + DMA_TO_DEVICE); + } + + npkts++; + nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; + sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + dev_kfree_skb(skb); + } while (!last_wqe); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && + likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { + netif_tx_wake_queue(sq->txq); + sq->stats.wake++; + } + if (i == MLX5E_TX_CQ_POLL_BUDGET) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c new file mode 100644 index 000000000000..2c7cb6755d1d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) +{ + struct mlx5_cqwq *wq = &cq->wq; + u32 ci = mlx5_cqwq_get_ci(wq); + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); + int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; + int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + /* ensure cqe content is read after cqe ownership bit */ + rmb(); + + return cqe; +} + +int mlx5e_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, + napi); + bool busy = false; + int i; + + clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); + + for (i = 0; i < c->num_tc; i++) + busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); + + busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); + + busy |= mlx5e_post_rx_wqes(&c->rq); + + if (busy) + return budget; + + napi_complete(napi); + + /* avoid losing completion event during/after polling cqs */ + if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { + napi_schedule(napi); + return 0; + } + + for (i = 0; i < c->num_tc; i++) + mlx5e_cq_arm(&c->sq[i].cq); + mlx5e_cq_arm(&c->rq.cq); + + return 0; +} + +void mlx5e_completion_event(struct mlx5_core_cq *mcq) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); + barrier(); + napi_schedule(cq->napi); +} + +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct net_device *netdev = priv->netdev; + + netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", + __func__, mcq->cqn, event); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 58800e4f3958..a40b96d4c662 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; struct mlx5_create_eq_mbox_in *in; struct mlx5_create_eq_mbox_out out; int err; int inlen; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); - err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, - &eq->buf); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; @@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, goto err_in; } - snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); + eq->eqn = out.eq_number; eq->irqn = vecidx; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, - eq->name, eq); + err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, return 0; err_irq: - free_irq(table->msix_arr[vecidx].vector, eq); + free_irq(priv->msix_arr[vecidx].vector, eq); err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); @@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { - struct mlx5_eq_table *table = &dev->priv.eq_table; int err; mlx5_debug_eq_remove(dev, eq); - free_irq(table->msix_arr[eq->irqn].vector, eq); + free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); err = mlx5_cmd_destroy_eq(dev, eq->eqn); if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); - synchronize_irq(table->msix_arr[eq->irqn].vector); + synchronize_irq(dev->priv.msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; @@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + if (MLX5_CAP_GEN(dev, pg)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, @@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->pages_eq, MLX5_EQ_VEC_PAGES, - dev->caps.gen.max_vf + 1, + /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", &dev->priv.uuari.uars[0]); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c new file mode 100644 index 000000000000..ca90b9bc3b95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/export.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/flow_table.h> +#include "mlx5_core.h" + +struct mlx5_ftg { + struct mlx5_flow_table_group g; + u32 id; + u32 start_ix; +}; + +struct mlx5_flow_table { + struct mlx5_core_dev *dev; + u8 level; + u8 type; + u32 id; + struct mutex mutex; /* sync bitmap alloc */ + u16 num_groups; + struct mlx5_ftg *group; + unsigned long *bitmap; + u32 size; +}; + +static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix, + u32 flow_index, void *flow_context) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 *in; + void *in_flow_context; + int fcdls = + MLX5_GET(flow_context, flow_context, destination_list_size) * + MLX5_ST_SZ_BYTES(dest_format_struct); + int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, flow_index); + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + memcpy(in_flow_context, flow_context, + MLX5_ST_SZ_BYTES(flow_context) + fcdls); + + MLX5_SET(flow_context, in_flow_context, group_id, + ft->group[group_ix].id); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + kvfree(in); + + return err; +} + +static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index) +{ + u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; + u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v) + MLX5_SET_DFTEI(in, table_type, ft->type); + MLX5_SET_DFTEI(in, table_id, ft->id); + MLX5_SET_DFTEI(in, flow_index, flow_index); + MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v) + MLX5_SET_DFGI(in, table_type, ft->type); + MLX5_SET_DFGI(in, table_id, ft->id); + MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET_DFGI(in, group_id, ft->group[i].id); + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; + u32 *in; + void *in_match_criteria; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_group *g = &ft->group[i].g; + u32 start_ix = ft->group[i].start_ix; + u32 end_ix = start_ix + (1 << g->log_sz) - 1; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria); + + memset(out, 0, sizeof(out)); + +#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v) + MLX5_SET_CFGI(in, table_type, ft->type); + MLX5_SET_CFGI(in, table_id, ft->id); + MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET_CFGI(in, start_flow_index, start_ix); + MLX5_SET_CFGI(in, end_flow_index, end_ix); + MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable); + + memcpy(in_match_criteria, g->match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + if (!err) + ft->group[i].id = MLX5_GET(create_flow_group_out, out, + group_id); + + kvfree(in); + + return err; +} + +static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft) +{ + int i; + + for (i = 0; i < ft->num_groups; i++) + mlx5_destroy_flow_group_cmd(ft, i); +} + +static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft) +{ + int err; + int i; + + for (i = 0; i < ft->num_groups; i++) { + err = mlx5_create_flow_group_cmd(ft, i); + if (err) + goto err_destroy_flow_table_groups; + } + + return 0; + +err_destroy_flow_table_groups: + for (i--; i >= 0; i--) + mlx5_destroy_flow_group_cmd(ft, i); + + return err; +} + +static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(create_flow_table_in, in, table_type, ft->type); + MLX5_SET(create_flow_table_in, in, level, ft->level); + MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size)); + + MLX5_SET(create_flow_table_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_TABLE); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, + sizeof(out)); + if (err) + return err; + + ft->id = MLX5_GET(create_flow_table_out, out, table_id); + + return 0; +} + +static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v) + MLX5_SET_DFTI(in, table_type, ft->type); + MLX5_SET_DFTI(in, table_id, ft->id); + MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable, + u32 *match_criteria, int *group_ix) +{ + void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria, + misc_parameters); + void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria, + inner_headers); + int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc); + int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int i; + + for (i = 0; i < ft->num_groups; i++) { + struct mlx5_flow_table_group *g = &ft->group[i].g; + void *gmc_outer = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + outer_headers); + void *gmc_misc = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + misc_parameters); + void *gmc_inner = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + inner_headers); + + if (g->match_criteria_enable != match_criteria_enable) + continue; + + if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) + if (memcmp(mc_outer, gmc_outer, mc_outer_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS) + if (memcmp(mc_misc, gmc_misc, mc_misc_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS) + if (memcmp(mc_inner, gmc_inner, mc_inner_sz)) + continue; + + *group_ix = i; + return 0; + } + + return -EINVAL; +} + +static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix) +{ + struct mlx5_ftg *g = &ft->group[group_ix]; + int err = 0; + + mutex_lock(&ft->mutex); + + *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix); + if (*ix >= (g->start_ix + (1 << g->g.log_sz))) + err = -ENOSPC; + else + __set_bit(*ix, ft->bitmap); + + mutex_unlock(&ft->mutex); + + return err; +} + +static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix) +{ + __clear_bit(ix, ft->bitmap); +} + +int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, + void *match_criteria, void *flow_context, + u32 *flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + int group_ix; + int err; + + err = mlx5_find_group(ft, match_criteria_enable, match_criteria, + &group_ix); + if (err) { + mlx5_core_warn(ft->dev, "mlx5_find_group failed\n"); + return err; + } + + err = alloc_flow_index(ft, group_ix, flow_index); + if (err) { + mlx5_core_warn(ft->dev, "alloc_flow_index failed\n"); + return err; + } + + return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context); +} +EXPORT_SYMBOL(mlx5_add_flow_table_entry); + +void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_del_flow_entry_cmd(ft, flow_index); + mlx5_free_flow_index(ft, flow_index); +} +EXPORT_SYMBOL(mlx5_del_flow_table_entry); + +void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, + u16 num_groups, + struct mlx5_flow_table_group *group) +{ + struct mlx5_flow_table *ft; + u32 start_ix = 0; + u32 ft_size = 0; + void *gr; + void *bm; + int err; + int i; + + for (i = 0; i < num_groups; i++) + ft_size += (1 << group[i].log_sz); + + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL); + bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL); + if (!ft || !gr || !bm) + goto err_free_ft; + + ft->group = gr; + ft->bitmap = bm; + ft->num_groups = num_groups; + ft->level = level; + ft->type = table_type; + ft->size = ft_size; + ft->dev = dev; + mutex_init(&ft->mutex); + + for (i = 0; i < ft->num_groups; i++) { + memcpy(&ft->group[i].g, &group[i], sizeof(*group)); + ft->group[i].start_ix = start_ix; + start_ix += 1 << group[i].log_sz; + } + + err = mlx5_create_flow_table_cmd(ft); + if (err) + goto err_free_ft; + + err = mlx5_create_flow_table_groups(ft); + if (err) + goto err_destroy_flow_table_cmd; + + return ft; + +err_destroy_flow_table_cmd: + mlx5_destroy_flow_table_cmd(ft); + +err_free_ft: + mlx5_core_warn(dev, "failed to alloc flow table\n"); + kfree(bm); + kfree(gr); + kfree(ft); + + return NULL; +} +EXPORT_SYMBOL(mlx5_create_flow_table); + +void mlx5_destroy_flow_table(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_destroy_flow_table_groups(ft); + mlx5_destroy_flow_table_cmd(ft); + kfree(ft->bitmap); + kfree(ft->group); + kfree(ft); +} +EXPORT_SYMBOL(mlx5_destroy_flow_table); + +u32 mlx5_get_flow_table_id(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + return ft->id; +} +EXPORT_SYMBOL(mlx5_get_flow_table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 4b4cda3bcc5f..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,79 +35,133 @@ #include <linux/module.h> #include "mlx5_core.h" -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, + int outlen) { - struct mlx5_cmd_query_adapter_mbox_out *out; - struct mlx5_cmd_query_adapter_mbox_in in; + u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_query_board_id(struct mlx5_core_dev *dev) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - memset(&in, 0, sizeof(in)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + err = mlx5_cmd_query_adapter(dev, out, outlen); if (err) - goto out_out; - - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out_out; - } + goto out; - memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + memcpy(dev->board_id, + MLX5_ADDR_OF(query_adapter_out, out, + query_adapter_struct.vsd_contd_psid), + MLX5_FLD_SZ_BYTES(query_adapter_out, + query_adapter_struct.vsd_contd_psid)); -out_out: +out: kfree(out); - return err; } -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) { - return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_cmd_query_adapter(mdev, out, outlen); + if (err) + goto out; + + *vendor_id = MLX5_GET(query_adapter_out, out, + query_adapter_struct.ieee_vendor_id); +out: + kfree(out); + return err; } +EXPORT_SYMBOL(mlx5_core_query_vendor_id); -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) +int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { - u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; - int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; int err; - if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) - return -ENOTSUPP; + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; - memset(in, 0, sizeof(in)); - out = kzalloc(out_sz, GFP_KERNEL); - if (!out) - return -ENOMEM; - MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); - MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) - goto out; + return err; - err = mlx5_cmd_status_to_err_v2(out); - if (err) { - mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); - goto out; + if (MLX5_CAP_GEN(dev, eth_net_offloads)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; } - memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), - sizeof(*caps)); + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } - mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", - be32_to_cpu(caps->per_transport_caps.rc_odp_caps), - be32_to_cpu(caps->per_transport_caps.uc_odp_caps), - be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); + if (MLX5_CAP_GEN(dev, atomic)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } -out: - kfree(out); - return err; + if (MLX5_CAP_GEN(dev, roce)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, nic_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + return 0; } -EXPORT_SYMBOL(mlx5_query_odp_caps); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 28425e5ea91f..afad529838de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -38,6 +38,7 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/io-mapping.h> +#include <linux/interrupt.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/qp.h> @@ -47,10 +48,6 @@ #include <linux/mlx5/mlx5_ifc.h> #include "mlx5_core.h" -#define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "3.0" -#define DRIVER_RELDATE "January 2015" - MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); @@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev) static int mlx5_enable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; - int num_eqs = 1 << dev->caps.gen.log_max_eq; + struct mlx5_priv *priv = &dev->priv; + struct mlx5_eq_table *table = &priv->eq_table; + int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; int i; - nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + + MLX5_EQ_VEC_COMP_BASE; nvec = min_t(int, nvec, num_eqs); if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; - table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); - if (!table->msix_arr) - return -ENOMEM; + priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL); + + priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); + if (!priv->msix_arr || !priv->irq_info) + goto err_free_msix; for (i = 0; i < nvec; i++) - table->msix_arr[i].entry = i; + priv->msix_arr[i].entry = i; - nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; @@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; + +err_free_msix: + kfree(priv->irq_info); + kfree(priv->msix_arr); + return -ENOMEM; } static void mlx5_disable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; pci_disable_msix(dev->pdev); - kfree(table->msix_arr); + kfree(priv->irq_info); + kfree(priv->msix_arr); } struct mlx5_reg_host_endianess { @@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size) } } -/* selectively copy writable fields clearing any reserved area - */ -static void copy_rw_fields(void *to, struct mlx5_caps *from) -{ - __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); - u64 v64; - - MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); - MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); - v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; - *flags_off = cpu_to_be64(v64); -} - -static u16 get_pkey_table_size(int pkey) -{ - if (pkey > MLX5_MAX_LOG_PKEY_TABLE) - return 0; - - return MLX5_MIN_PKEY_TABLE_SIZE << pkey; -} - -static void fw2drv_caps(struct mlx5_caps *caps, void *out) -{ - struct mlx5_general_caps *gen = &caps->gen; - - gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); - gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); - gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); - gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); - gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); - gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); - gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); - gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); - gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); - gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); - gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); - gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); - gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); - gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); - gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); - gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); - gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); - gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); - gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); - gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); - gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); - gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); - gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); - gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); - gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); - pr_debug("flags = 0x%llx\n", gen->flags); - gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); - gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); - gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); - gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); - gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); - gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); - gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); - gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); - gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); - gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); - gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); -} - -static const char *caps_opmod_str(u16 opmod) -{ - switch (opmod) { - case HCA_CAP_OPMOD_GET_MAX: - return "GET_MAX"; - case HCA_CAP_OPMOD_GET_CUR: - return "GET_CUR"; - default: - return "Invalid"; - } -} - -int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, - u16 opmod) +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; + void *out, *hca_caps; + u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); @@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, err = mlx5_cmd_status_to_err_v2(out); if (err) { - mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); + mlx5_core_warn(dev, + "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", + cap_type, cap_mode, err); goto query_ex; } - mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); - fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); + + switch (cap_mode) { + case HCA_CAP_OPMOD_GET_MAX: + memcpy(dev->hca_caps_max[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + case HCA_CAP_OPMOD_GET_CUR: + memcpy(dev->hca_caps_cur[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + default: + mlx5_core_warn(dev, + "Tried to query dev cap type(%x) with wrong opmode(%x)\n", + cap_type, cap_mode); + err = -EINVAL; + break; + } query_ex: kfree(out); return err; @@ -409,49 +356,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; - struct mlx5_caps *cur_caps = NULL; - struct mlx5_caps *max_caps = NULL; int err = -ENOMEM; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *set_hca_cap; set_ctx = kzalloc(set_sz, GFP_KERNEL); if (!set_ctx) goto query_ex; - max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); - if (!max_caps) - goto query_ex; - - cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); - if (!cur_caps) - goto query_ex; - - err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) goto query_ex; - err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); if (err) goto query_ex; + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, + capability); + memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + MLX5_ST_SZ_BYTES(cmd_hca_cap)); + + mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", + mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), + 128); /* we limit the size of the pkey table to 128 entries for now */ - cur_caps->gen.pkey_table_size = 128; + MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, + to_fw_pkey_sz(128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) - cur_caps->gen.log_max_qp = prof->log_max_qp; + MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, + prof->log_max_qp); - /* disable checksum */ - cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + /* disable cmdif checksum */ + MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); - copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), - cur_caps); err = set_caps(dev, set_ctx, set_sz); query_ex: - kfree(cur_caps); - kfree(max_caps); kfree(set_ctx); - return err; } @@ -507,6 +450,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) return 0; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + int numa_node = dev_to_node(&mdev->pdev->dev); + int err; + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + priv->irq_info[i].mask); + + err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); + if (err) { + mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", + irq); + goto err_clear_mask; + } + + return 0; + +err_clear_mask: + free_cpumask_var(priv->irq_info[i].mask); + return err; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; @@ -549,7 +560,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) static int alloc_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; - char name[MLX5_MAX_EQ_NAME]; + char name[MLX5_MAX_IRQ_NAME]; struct mlx5_eq *eq; int ncomp_vec; int nent; @@ -566,7 +577,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) goto clean; } - snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, name, &dev->priv.uuari.uars[0]); @@ -588,6 +599,61 @@ clean: return err; } +#ifdef CONFIG_MLX5_CORE_EN +static int mlx5_core_set_issi(struct mlx5_core_dev *dev) +{ + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)]; + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]; + u32 set_out[MLX5_ST_SZ_DW(set_issi_out)]; + int err; + u32 sup_issi; + + memset(query_in, 0, sizeof(query_in)); + memset(query_out, 0, sizeof(query_out)); + + MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); + + err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in), + query_out, sizeof(query_out)); + if (err) { + if (((struct mlx5_outbox_hdr *)query_out)->status == + MLX5_CMD_STAT_BAD_OP_ERR) { + pr_debug("Only ISSI 0 is supported\n"); + return 0; + } + + pr_err("failed to query ISSI\n"); + return err; + } + + sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); + + if (sup_issi & (1 << 1)) { + memset(set_in, 0, sizeof(set_in)); + memset(set_out, 0, sizeof(set_out)); + + MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); + MLX5_SET(set_issi_in, set_in, current_issi, 1); + + err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in), + set_out, sizeof(set_out)); + if (err) { + pr_err("failed to set ISSI=1\n"); + return err; + } + + dev->issi = 1; + + return 0; + } else if (sup_issi & (1 << 0) || !sup_issi) { + return 0; + } + + return -ENOTSUPP; +} +#endif + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -650,6 +716,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_pagealloc_cleanup; } +#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_core_set_issi(dev); + if (err) { + dev_err(&pdev->dev, "failed to set issi\n"); + goto err_disable_hca; + } +#endif + err = mlx5_satisfy_startup_pages(dev, 1); if (err) { dev_err(&pdev->dev, "failed to allocate boot pages\n"); @@ -688,15 +762,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) mlx5_start_health_poll(dev); - err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + err = mlx5_query_hca_caps(dev); if (err) { dev_err(&pdev->dev, "query hca failed\n"); goto err_stop_poll; } - err = mlx5_cmd_query_adapter(dev); + err = mlx5_query_board_id(dev); if (err) { - dev_err(&pdev->dev, "query adapter failed\n"); + dev_err(&pdev->dev, "query board id failed\n"); goto err_stop_poll; } @@ -730,6 +804,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_free_comp_eqs; + } + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); @@ -739,6 +819,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; +err_free_comp_eqs: + free_comp_eqs(dev); + err_stop_eqs: mlx5_stop_eqs(dev); @@ -793,6 +876,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); @@ -1048,6 +1132,10 @@ static int __init init(void) if (err) goto err_health; +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_init(); +#endif + return 0; err_health: @@ -1060,6 +1148,9 @@ err_debug: static void __exit cleanup(void) { +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_cleanup(); +#endif pci_unregister_driver(&mlx5_core_driver); mlx5_health_cleanup(); destroy_workqueue(mlx5_core_wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index d79fd85d1dd5..d5a0c2d61a18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG); memcpy(in.gid, mgid, sizeof(*mgid)); in.qpn = cpu_to_be32(qpn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a051b906afdf..fc88ecaecb4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,6 +37,10 @@ #include <linux/kernel.h> #include <linux/sched.h> +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "3.0-1" +#define DRIVER_RELDATE "January 2015" + extern int mlx5_core_debug_mask; #define mlx5_core_dbg(dev, format, ...) \ @@ -65,11 +69,20 @@ enum { MLX5_CMD_TIME, /* print command execution time */ }; +static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, + int in_size, u32 *out, + int out_size) +{ + mlx5_cmd_exec(dev, in, in_size, out, out_size); + return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); +} -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, - struct mlx5_caps *caps); -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_query_hca_caps(struct mlx5_core_dev *dev); +int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); +void mlx5e_init(void); +void mlx5e_cleanup(void); + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 49e90f2612d8..70147999f657 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -102,3 +102,229 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_caps); + +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + + err = mlx5_core_access_reg(dev, in, sizeof(in), ptys, + ptys_size, MLX5_REG_PTYS, 0, 0); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); + +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + else + *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); + +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + else + *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); + +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port); + if (err) + return err; + + *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); + +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + else + *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); + +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + if (proto_mask == MLX5_PTYS_EN) + MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); + else + MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PTYS, 0, 1); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_proto); + +int mlx5_set_port_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(paos_reg, in, admin_status, status); + MLX5_SET(paos_reg, in, ase, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 1); +} + +int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 0); + if (err) + return err; + + *status = MLX5_GET(paos_reg, out, oper_status); + return err; +} + +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, + int *max_mtu, int *oper_mtu, u8 port) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, local_port, port); + + mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PMTU, 0, 0); + + if (max_mtu) + *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu); + if (oper_mtu) + *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu); + if (admin_mtu) + *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); +} + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, admin_mtu, mtu); + MLX5_SET(pmtu_reg, in, local_port, port); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PMTU, 0, 1); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); + +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, + u8 port) +{ + mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); + +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 port) +{ + mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); + +static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, + int pvlc_size, u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, local_port); + + err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, + pvlc_size, MLX5_REG_PVLC, 0, 0); + + return err; +} + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port); + if (err) + return err; + + *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index dc7dbf7e9d98..8b494b562263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; + void *qpc; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + if (dev->issi) { + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(qpc, qpc, user_index, 0xffffff); + } + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index f9d25dcd03c1..c48f504ccbeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -37,6 +37,7 @@ #include <linux/mlx5/srq.h> #include <rdma/ib_verbs.h> #include "mlx5_core.h" +#include "transobj.h" void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) { @@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) complete(&srq->free); } +static int get_pas_size(void *srqc) +{ + u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; + u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); + u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); + u32 page_offset = MLX5_GET(srqc, srqc, page_offset); + u32 po_quanta = 1 << (log_page_size - 6); + u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); + u32 page_size = 1 << log_page_size; + u32 rq_sz_po = rq_sz + (page_offset * po_quanta); + u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + + return rq_num_pas * sizeof(u64); +} + +static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) +{ + void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + if (srqc_to_rmpc) { + switch (MLX5_GET(srqc, srqc, state)) { + case MLX5_SRQC_STATE_GOOD: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + break; + case MLX5_SRQC_STATE_ERROR: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); + break; + default: + pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, + __LINE__, MLX5_GET(srqc, srqc, state)); + MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); + } + + MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); + MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); + MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); + MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); + MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); + MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); + MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); + MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); + } else { + switch (MLX5_GET(rmpc, rmpc, state)) { + case MLX5_RMPC_STATE_RDY: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); + break; + case MLX5_RMPC_STATE_ERR: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); + break; + default: + pr_warn("%s: %d: Unknown rmp state = 0x%x\n", + __func__, __LINE__, + MLX5_GET(rmpc, rmpc, state)); + MLX5_SET(srqc, srqc, state, + MLX5_GET(rmpc, rmpc, state)); + } + + MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); + MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); + MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); + MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); + MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); + MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); + MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); + MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); + } +} + struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) { struct mlx5_srq_table *table = &dev->priv.srq_table; @@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) } EXPORT_SYMBOL(mlx5_core_get_srq); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen) +static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) { struct mlx5_create_srq_mbox_out out; - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_destroy_srq_mbox_in din; - struct mlx5_destroy_srq_mbox_out dout; int err; memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), + sizeof(out)); srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + return err; +} + +static int destroy_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)(&out), sizeof(out)); +} + +static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), + sizeof(in), (u32 *)(&out), + sizeof(out)); +} + +static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)out, sizeof(*out)); +} + +static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int srq_inlen) +{ + u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + void *create_in; + void *srqc; + void *xrc_srqc; + void *pas; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, + xrc_srq_context_entry); + pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); + + memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); + memcpy(pas, in->pas, pas_size); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff); + MLX5_SET(create_xrc_srq_in, create_in, opcode, + MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(create_out, 0, sizeof(create_out)); + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, + sizeof(create_out)); + if (err) + goto out; + + srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); +out: + kvfree(create_in); + return err; +} + +static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, u16 lwm) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + u32 *xrcsrq_out; + void *srqc; + void *xrc_srqc; + int err; + + xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!xrcsrq_out) + return -ENOMEM; + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + + MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (err) + goto out; + + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + +out: + kvfree(xrcsrq_out); + return err; +} + +static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int srq_inlen) +{ + void *create_in; + void *rmpc; + void *srqc; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); + + memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); + rmpc_srqc_reformat(srqc, rmpc, true); + + err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); + + kvfree(create_in); + return err; +} + +static int destroy_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + return mlx5_core_destroy_rmp(dev, srq->srqn); +} + +static int arm_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + return err; +} + +static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 *rmp_out; + void *rmpc; + void *srqc; + int err; + + rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); + if (!rmp_out) + return -ENOMEM; + + err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out); + if (err) + goto out; + + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); + rmpc_srqc_reformat(srqc, rmpc, false); + +out: + kvfree(rmp_out); + return err; +} + +static int create_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int inlen, int is_xrc) +{ + if (!dev->issi) + return create_srq_cmd(dev, srq, in, inlen); + else if (srq->common.res == MLX5_RES_XSRQ) + return create_xrc_srq_cmd(dev, srq, in, inlen); + else + return create_rmp_cmd(dev, srq, in, inlen); +} + +static int destroy_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + if (!dev->issi) + return destroy_srq_cmd(dev, srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return destroy_xrc_srq_cmd(dev, srq); + else + return destroy_rmp_cmd(dev, srq); +} + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen, + int is_xrc) +{ + int err; + struct mlx5_srq_table *table = &dev->priv.srq_table; + + srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; + + err = create_srq_split(dev, srq, in, inlen, is_xrc); + if (err) + return err; + atomic_set(&srq->refcount, 1); init_completion(&srq->free); @@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); - goto err_cmd; + goto err_destroy_srq_split; } return 0; -err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.srqn = cpu_to_be32(srq->srqn); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); +err_destroy_srq_split: + destroy_srq_split(dev, srq); + return err; } EXPORT_SYMBOL(mlx5_core_create_srq); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - struct mlx5_destroy_srq_mbox_in in; - struct mlx5_destroy_srq_mbox_out out; struct mlx5_srq_table *table = &dev->priv.srq_table; struct mlx5_core_srq *tmp; int err; @@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) return -EINVAL; } - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + err = destroy_srq_split(dev, srq); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); @@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out) { - struct mlx5_query_srq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, sizeof(*out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + if (!dev->issi) + return query_srq_cmd(dev, srq, out); + else if (srq->common.res == MLX5_RES_XSRQ) + return query_xrc_srq_cmd(dev, srq, out); + else + return query_rmp_cmd(dev, srq, out); } EXPORT_SYMBOL(mlx5_core_query_srq); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - struct mlx5_arm_srq_mbox_in in; - struct mlx5_arm_srq_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); - in.hdr.opmod = cpu_to_be16(!!is_srq); - in.srqn = cpu_to_be32(srq->srqn); - in.lwm = cpu_to_be16(lwm); - - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + if (!dev->issi) + return arm_srq_cmd(dev, srq, lwm, is_srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return arm_xrc_srq_cmd(dev, srq, lwm); + else + return arm_rmp_cmd(dev, srq, lwm); } EXPORT_SYMBOL(mlx5_core_arm_srq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c new file mode 100644 index 000000000000..8d98b03026d5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#include "transobj.h" + +int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) +{ + u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]; + u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(alloc_transport_domain_in, in, opcode, + MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *tdn = MLX5_GET(alloc_transport_domain_out, out, + transport_domain); + + return err; +} + +void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(dealloc_transport_domain_in, in, opcode, + MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); + MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) +{ + u32 out[MLX5_ST_SZ_DW(create_rq_out)]; + int err; + + MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rqn = MLX5_GET(create_rq_out, out, rqn); + + return err; +} + +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rq_out)]; + + MLX5_SET(modify_rq_in, in, rqn, rqn); + MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); + MLX5_SET(destroy_rq_in, in, rqn, rqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) +{ + u32 out[MLX5_ST_SZ_DW(create_sq_out)]; + int err; + + MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *sqn = MLX5_GET(create_sq_out, out, sqn); + + return err; +} + +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; + + MLX5_SET(modify_sq_in, in, sqn, sqn); + MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); + MLX5_SET(destroy_sq_in, in, sqn, sqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn) +{ + u32 out[MLX5_ST_SZ_DW(create_tir_out)]; + int err; + + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tirn = MLX5_GET(create_tir_out, out, tirn); + + return err; +} + +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, in, tirn, tirn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn) +{ + u32 out[MLX5_ST_SZ_DW(create_tis_out)]; + int err; + + MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tisn = MLX5_GET(create_tis_out, out, tisn); + + return err; +} + +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tis_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); + MLX5_SET(destroy_tis_in, in, tisn, tisn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn) +{ + u32 out[MLX5_ST_SZ_DW(create_rmp_out)]; + int err; + + MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rmpn = MLX5_GET(create_rmp_out, out, rmpn); + + return err; +} + +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rmp_out)]; + + MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); + MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_rmp_in)]; + int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); + + memset(in, 0, sizeof(in)); + MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); + MLX5_SET(query_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, rmpn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + + return err; +} + +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *xsrqn) +{ + u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + int err; + + MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); + + return err; +} + +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + void *srqc; + void *xrc_srqc; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!err) { + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + } + + return err; +} + +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) +{ + u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); + MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); + MLX5_SET(arm_xrc_srq_in, in, op_mod, + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h new file mode 100644 index 000000000000..f9ef244710d5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __TRANSOBJ_H__ +#define __TRANSOBJ_H__ + +int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); +void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqn); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *sqn); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn); +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn); +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); + +#endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 5a89bb1d678a..9ef85873ceea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) for (i = 0; i < tot_uuars; i++) { bf = &uuari->bfs[i]; - bf->buf_size = dev->caps.gen.bf_reg_size / 2; + bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + - MLX5_BF_OFFSET; + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * + (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + + MLX5_BF_OFFSET; bf->need_lock = need_uuar_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); @@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) return 0; } + +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + phys_addr_t pfn; + phys_addr_t uar_bar_start; + int err; + + err = mlx5_cmd_alloc_uar(mdev, &uar->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + return err; + } + + uar_bar_start = pci_resource_start(mdev->pdev, 0); + pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) { + mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); + err = -ENOMEM; + goto err_free_uar; + } + + return 0; + +err_free_uar: + mlx5_cmd_free_uar(mdev, uar->index); + + return err; +} +EXPORT_SYMBOL(mlx5_alloc_map_uar); + +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + iounmap(uar->map); + mlx5_cmd_free_uar(mdev, uar->index); +} +EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c new file mode 100644 index 000000000000..b94177ebcf3a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -0,0 +1,345 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/export.h> +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> +#include "mlx5_core.h" + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) +{ + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_state_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_STATE); + MLX5_SET(query_vport_state_in, in, op_mod, opmod); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); + if (err) + mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); + + return MLX5_GET(query_vport_state_out, out, state); +} +EXPORT_SYMBOL(mlx5_query_vport_state); + +void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u8 *out_addr; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context.permanent_address); + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + + memset(out, 0, outlen); + mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + + ether_addr_copy(addr, &out_addr[2]); + + kvfree(out); +} +EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); + +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); + mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid); + +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)); + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * MLX5_ST_SZ_BYTES(pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); + for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) + *pkey = MLX5_GET_PR(pkey, pkarr, pkey); + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey); + +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep) +{ + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)]; + int is_group_manager; + void *out; + void *ctx; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); + + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_context_in, in, other_vport, 1); + MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num); + } else { + err = -EPERM; + goto ex; + } + } + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto ex; + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto ex; + + ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); + rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); + rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware); + rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi); + rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw); + rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy); + rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state); + rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid); + rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid); + rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1); + rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask1_field_select); + rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2); + rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask2_field_select); + rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid); + rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx, + init_type_reply); + rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc); + rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx, + subnet_timeout); + rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid); + rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl); + rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + qkey_violation_counter); + rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + pkey_violation_counter); + rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required); + rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx, + system_image_guid); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); + +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + u64 *sys_image_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *sys_image_guid = rep->sys_image_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid); + +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c new file mode 100644 index 000000000000..8388411582cf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include "wq.h" +#include "mlx5_core.h" + +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) +{ + return wq->sz_m1 + 1; +} + +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) +{ + return mlx5_cqwq_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) +{ + return mlx5_wq_ll_get_size(wq) << wq->log_stride; +} + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + wq->sz_m1 = (1 << wq->log_sz) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + struct mlx5_wqe_srq_next_seg *next_seg; + int err; + int i; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + for (i = 0; i < wq->sz_m1; i++) { + next_seg = mlx5_wq_ll_get_wqe(wq, i); + next_seg->next_wqe_index = cpu_to_be16(i + 1); + } + next_seg = mlx5_wq_ll_get_wqe(wq, i); + wq->tail_next = &next_seg->next_wqe_index; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) +{ + mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h new file mode 100644 index 000000000000..e0ddd69fb429 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_WQ_H__ +#define __MLX5_WQ_H__ + +#include <linux/mlx5/mlx5_ifc.h> + +struct mlx5_wq_param { + int linear; + int numa; +}; + +struct mlx5_wq_ctrl { + struct mlx5_core_dev *mdev; + struct mlx5_buf buf; + struct mlx5_db db; +}; + +struct mlx5_wq_cyc { + void *buf; + __be32 *db; + u16 sz_m1; + u8 log_stride; +}; + +struct mlx5_cqwq { + void *buf; + __be32 *db; + u32 sz_m1; + u32 cc; /* consumer counter */ + u8 log_sz; + u8 log_stride; +}; + +struct mlx5_wq_ll { + void *buf; + __be32 *db; + __be16 *tail_next; + u16 sz_m1; + u16 head; + u16 wqe_ctr; + u16 cur_sz; + u8 log_stride; +}; + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); + +static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->sz_m1; +} + +static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) +{ + int equal = (cc1 == cc2); + int smaller = 0x8000 & (cc1 - cc2); + + return !equal && !smaller; +} + +static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) +{ + return wq->cc & wq->sz_m1; +} + +static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) +{ + return wq->cc >> wq->log_sz; +} + +static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) +{ + wq->cc++; +} + +static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) +{ + *wq->db = cpu_to_be32(wq->cc & 0xffffff); +} + +static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) +{ + return wq->cur_sz == wq->sz_m1; +} + +static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) +{ + return !wq->cur_sz; +} + +static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) +{ + wq->head = head_next; + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, + __be16 *next_tail_next) +{ + *wq->tail_next = ix; + wq->tail_next = next_tail_next; + wq->cur_sz--; +} + +static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} + +#endif /* __MLX5_WQ_H__ */ |