From 3f5fe0b2e606ab71d3425c138e311bce60b09543 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Fri, 30 Sep 2022 09:28:53 -0700 Subject: net/mlx5e: xsk: Use partial batches in legacy RQ with XSK The previous commit allowed allocating WQE batches in legacy RQ partially, however, XSK still checks whether there are enough frames in the fill ring. Remove this check to allow to allocate batches partially also with XSK. Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ffca217b7d7e..80f2b5960782 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -429,17 +429,6 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) struct mlx5_wq_cyc *wq = &rq->wqe.wq; int i; - if (rq->xsk_pool) { - int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; - - /* Check in advance that we have enough frames, instead of - * allocating one-by-one, failing and moving frames to the - * Reuse Ring. - */ - if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) - return -ENOMEM; - } - for (i = 0; i < wqe_bulk; i++) { int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); struct mlx5e_rx_wqe_cyc *wqe; @@ -841,8 +830,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) bulk = wqe_bulk - ((head + wqe_bulk) & rq->wqe.info.wqe_index_mask); count = mlx5e_alloc_rx_wqes(rq, head, bulk); - if (likely(count > 0)) - mlx5_wq_cyc_push_n(wq, count); + mlx5_wq_cyc_push_n(wq, count); if (unlikely(count != bulk)) { rq->stats->buff_alloc_err++; busy = true; -- cgit v1.2.3