summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2018-12-26 15:22:12 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-01-02 09:40:34 -0700
commitccffa545485714dcb001c78ffaa575a8192ed5e4 (patch)
tree56ab241285187306f42f1608fddf1f71d307e4d2 /drivers/infiniband
parent7422edce73559d2985f322145c865cea2da3c152 (diff)
downloadlinux-ccffa545485714dcb001c78ffaa575a8192ed5e4.tar.bz2
Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"
Longer term testing shows this patch didn't play well with MR cache and caused to call traces during remove_mkeys(). This reverts commit bb7e22a8ab00ff9ba911a45ba8784cef9e6d6f7a. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1bd8c1b1dba1..fd6ea1f75085 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
- if (mr->umem && mr->umem->is_odp)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
return err;
@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- bool odp_mkey_exist = false;
-#endif
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
-#endif
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {