diff options
author | Eli Cohen <eli@mellanox.com> | 2017-01-03 23:55:21 +0200 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2017-01-08 11:21:26 +0200 |
commit | 2f5ff26478adaff5ed9b7ad4079d6a710b5f27e7 (patch) | |
tree | 52b102a740dcdc611100d73e0185a15012b518be /drivers/net/ethernet/mellanox | |
parent | f4044dac63e952ac1137b6df02b233d37696e2f5 (diff) | |
download | linux-2f5ff26478adaff5ed9b7ad4079d6a710b5f27e7.tar.bz2 |
mlx5: Fix naming convention with respect to UARs
This establishes a solid naming conventions for UARs. A UAR (User Access
Region) can have size identical to a system page or can be fixed 4KB
depending on a value queried by firmware. Each UAR always has 4 blue
flame register which are used to post doorbell to send queue. In
addition, a UAR has section used for posting doorbells to CQs or EQs. In
this patch we change names to reflect this conventions.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Reviewed-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eq.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/main.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/uar.c | 90 |
3 files changed, 53 insertions, 53 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4aff8ac68e14..11a8d638bcd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -686,7 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.uuari.uars[0], + "mlx5_cmd_eq", &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); @@ -697,7 +697,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.uuari.uars[0], + "mlx5_async_eq", &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); @@ -708,7 +708,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.uuari.uars[0], + &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); @@ -722,7 +722,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_NUM_ASYNC_EQE, 1 << MLX5_EVENT_TYPE_PAGE_FAULT, "mlx5_page_fault_eq", - &dev->priv.uuari.uars[0], + &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_PF); if (err) { mlx5_core_warn(dev, "failed to create page fault EQ %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f4115135e30b..634e96a02516 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -753,7 +753,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.uuari.uars[0], + name, &dev->priv.bfregi.uars[0], MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); @@ -1094,7 +1094,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_cleanup_once; } - err = mlx5_alloc_uuars(dev, &priv->uuari); + err = mlx5_alloc_bfregs(dev, &priv->bfregi); if (err) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; @@ -1170,7 +1170,7 @@ err_stop_eqs: mlx5_stop_eqs(dev); err_free_uar: - mlx5_free_uuars(dev, &priv->uuari); + mlx5_free_bfregs(dev, &priv->bfregi); err_disable_msix: mlx5_disable_msix(dev); @@ -1230,7 +1230,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); - mlx5_free_uuars(dev, &priv->uuari); + mlx5_free_bfregs(dev, &priv->bfregi); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ab0b896621a0..ce7fcebb81a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -39,7 +39,7 @@ enum { NUM_DRIVER_UARS = 4, - NUM_LOW_LAT_UUARS = 4, + NUM_LOW_LAT_BFREGS = 4, }; int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) @@ -67,116 +67,116 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -static int need_uuar_lock(int uuarn) +static int need_bfreg_lock(int bfregn) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; - if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + if (bfregn == 0 || tot_bfregs - NUM_LOW_LAT_BFREGS) return 0; return 1; } -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR; struct mlx5_bf *bf; phys_addr_t addr; int err; int i; - uuari->num_uars = NUM_DRIVER_UARS; - uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + bfregi->num_uars = NUM_DRIVER_UARS; + bfregi->num_low_latency_bfregs = NUM_LOW_LAT_BFREGS; - mutex_init(&uuari->lock); - uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); - if (!uuari->uars) + mutex_init(&bfregi->lock); + bfregi->uars = kcalloc(bfregi->num_uars, sizeof(*bfregi->uars), GFP_KERNEL); + if (!bfregi->uars) return -ENOMEM; - uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); - if (!uuari->bfs) { + bfregi->bfs = kcalloc(tot_bfregs, sizeof(*bfregi->bfs), GFP_KERNEL); + if (!bfregi->bfs) { err = -ENOMEM; goto out_uars; } - uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + bfregi->bitmap = kcalloc(BITS_TO_LONGS(tot_bfregs), sizeof(*bfregi->bitmap), GFP_KERNEL); - if (!uuari->bitmap) { + if (!bfregi->bitmap) { err = -ENOMEM; goto out_bfs; } - uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { + bfregi->count = kcalloc(tot_bfregs, sizeof(*bfregi->count), GFP_KERNEL); + if (!bfregi->count) { err = -ENOMEM; goto out_bitmap; } - for (i = 0; i < uuari->num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + for (i = 0; i < bfregi->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &bfregi->uars[i].index); if (err) goto out_count; - addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); - uuari->uars[i].map = ioremap(addr, PAGE_SIZE); - if (!uuari->uars[i].map) { - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + addr = dev->iseg_base + ((phys_addr_t)(bfregi->uars[i].index) << PAGE_SHIFT); + bfregi->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!bfregi->uars[i].map) { + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); err = -ENOMEM; goto out_count; } mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", - uuari->uars[i].index, uuari->uars[i].map); + bfregi->uars[i].index, bfregi->uars[i].map); } - for (i = 0; i < tot_uuars; i++) { - bf = &uuari->bfs[i]; + for (i = 0; i < tot_bfregs; i++) { + bf = &bfregi->bfs[i]; bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; - bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; - bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->uar = &bfregi->uars[i / MLX5_BFREGS_PER_UAR]; + bf->regreg = bfregi->uars[i / MLX5_BFREGS_PER_UAR].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * + bf->offset = (i % MLX5_BFREGS_PER_UAR) * (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + MLX5_BF_OFFSET; - bf->need_lock = need_uuar_lock(i); + bf->need_lock = need_bfreg_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); - bf->uuarn = i; + bf->bfregn = i; } return 0; out_count: for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + iounmap(bfregi->uars[i].map); + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); } - kfree(uuari->count); + kfree(bfregi->count); out_bitmap: - kfree(uuari->bitmap); + kfree(bfregi->bitmap); out_bfs: - kfree(uuari->bfs); + kfree(bfregi->bfs); out_uars: - kfree(uuari->uars); + kfree(bfregi->uars); return err; } -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi) { - int i = uuari->num_uars; + int i = bfregi->num_uars; for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + iounmap(bfregi->uars[i].map); + mlx5_cmd_free_uar(dev, bfregi->uars[i].index); } - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->bfs); - kfree(uuari->uars); + kfree(bfregi->count); + kfree(bfregi->bitmap); + kfree(bfregi->bfs); + kfree(bfregi->uars); return 0; } |