summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2022-07-17 17:45:06 -0500
committerMike Snitzer <snitzer@kernel.org>2022-07-28 17:29:56 -0400
commit701510875975ed7e188566de205990d29f34c8d8 (patch)
tree0fb48d9ce94341af131c1f1ddb3df6e30cb6c10e /drivers/md/dm.c
parent8dd87f3c5283de7f95396a236e420487226f3951 (diff)
downloadlinux-701510875975ed7e188566de205990d29f34c8d8.tar.bz2
dm: Start pr_reserve from the same starting path
When an app does a pr_reserve it will go to whatever path we happen to be using at the time. This can result in errors when the app does a second pr_reserve call and expects success but gets a failure because the reserve is not done on the holder's path. This commit has us always start trying to do reserves from the first path in the first group. Windows failover clustering will produce the type of pattern above. With this commit, we will now pass its validation test for this case. Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 15fc2eaa80a6..ede5feb3d778 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -3078,6 +3078,7 @@ struct dm_pr {
u32 flags;
bool fail_early;
int ret;
+ enum pr_type type;
};
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
@@ -3175,25 +3176,42 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
return ret;
}
+
+static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_reserve) {
+ pr->ret = -EOPNOTSUPP;
+ return -1;
+ }
+
+ pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
+ if (!pr->ret)
+ return -1;
+
+ return 0;
+}
+
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
u32 flags)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- int r, srcu_idx;
+ struct dm_pr pr = {
+ .old_key = key,
+ .flags = flags,
+ .type = type,
+ .fail_early = false,
+ .ret = 0,
+ };
+ int ret;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
- goto out;
+ ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
+ if (ret)
+ return ret;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_reserve)
- r = ops->pr_reserve(bdev, key, type, flags);
- else
- r = -EOPNOTSUPP;
-out:
- dm_unprepare_ioctl(md, srcu_idx);
- return r;
+ return pr.ret;
}
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)