diff options
-rw-r--r-- | Documentation/device-mapper/dm-raid.txt | 1 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 101 |
2 files changed, 38 insertions, 64 deletions
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 728c38c242d6..56fb62b09fc5 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -141,3 +141,4 @@ Version History 1.2.0 Handle creation of arrays that contain failed devices. 1.3.0 Added support for RAID 10 1.3.1 Allow device replacement/rebuild for RAID 10 +1.3.2 Fix/improve redundancy checking for RAID10 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 3d8984edeff7..9e58dbd8d8cb 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -340,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) } /* - * validate_rebuild_devices + * validate_raid_redundancy * @rs * - * Determine if the devices specified for rebuild can result in a valid - * usable array that is capable of rebuilding the given devices. + * Determine if there are enough devices in the array that haven't + * failed (or are being rebuilt) to form a usable array. * * Returns: 0 on success, -EINVAL on failure. */ -static int validate_rebuild_devices(struct raid_set *rs) +static int validate_raid_redundancy(struct raid_set *rs) { unsigned i, rebuild_cnt = 0; unsigned rebuilds_per_group, copies, d; - if (!(rs->print_flags & DMPF_REBUILD)) - return 0; - for (i = 0; i < rs->md.raid_disks; i++) - if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) + if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || + !rs->dev[i].rdev.sb_page) rebuild_cnt++; switch (rs->raid_type->level) { @@ -393,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs) * A A B B C * C D D E E */ - rebuilds_per_group = 0; for (i = 0; i < rs->md.raid_disks * copies; i++) { + if (!(i % copies)) + rebuilds_per_group = 0; d = i % rs->md.raid_disks; - if (!test_bit(In_sync, &rs->dev[d].rdev.flags) && + if ((!rs->dev[d].rdev.sb_page || + !test_bit(In_sync, &rs->dev[d].rdev.flags)) && (++rebuilds_per_group >= copies)) goto too_many; - if (!((i + 1) % copies)) - rebuilds_per_group = 0; } break; default: - DMERR("The rebuild parameter is not supported for %s", - rs->raid_type->name); - rs->ti->error = "Rebuild not supported for this RAID type"; - return -EINVAL; + if (rebuild_cnt) + return -EINVAL; } return 0; too_many: - rs->ti->error = "Too many rebuild devices specified"; return -EINVAL; } @@ -664,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } rs->md.dev_sectors = sectors_per_dev; - if (validate_rebuild_devices(rs)) - return -EINVAL; - /* Assume there are no metadata devices until the drives are parsed */ rs->md.persistent = 0; rs->md.external = 1; @@ -995,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev) static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) { int ret; - unsigned redundancy = 0; struct raid_dev *dev; struct md_rdev *rdev, *tmp, *freshest; struct mddev *mddev = &rs->md; - switch (rs->raid_type->level) { - case 1: - redundancy = rs->md.raid_disks - 1; - break; - case 4: - case 5: - case 6: - redundancy = rs->raid_type->parity_devs; - break; - case 10: - redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; - break; - default: - ti->error = "Unknown RAID type"; - return -EINVAL; - } - freshest = NULL; rdev_for_each_safe(rdev, tmp, mddev) { /* @@ -1045,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) break; default: dev = container_of(rdev, struct raid_dev, rdev); - if (redundancy--) { - if (dev->meta_dev) - dm_put_device(ti, dev->meta_dev); - - dev->meta_dev = NULL; - rdev->meta_bdev = NULL; + if (dev->meta_dev) + dm_put_device(ti, dev->meta_dev); - if (rdev->sb_page) - put_page(rdev->sb_page); + dev->meta_dev = NULL; + rdev->meta_bdev = NULL; - rdev->sb_page = NULL; + if (rdev->sb_page) + put_page(rdev->sb_page); - rdev->sb_loaded = 0; + rdev->sb_page = NULL; - /* - * We might be able to salvage the data device - * even though the meta device has failed. For - * now, we behave as though '- -' had been - * set for this device in the table. - */ - if (dev->data_dev) - dm_put_device(ti, dev->data_dev); + rdev->sb_loaded = 0; - dev->data_dev = NULL; - rdev->bdev = NULL; + /* + * We might be able to salvage the data device + * even though the meta device has failed. For + * now, we behave as though '- -' had been + * set for this device in the table. + */ + if (dev->data_dev) + dm_put_device(ti, dev->data_dev); - list_del(&rdev->same_set); + dev->data_dev = NULL; + rdev->bdev = NULL; - continue; - } - ti->error = "Failed to load superblock"; - return ret; + list_del(&rdev->same_set); } } if (!freshest) return 0; + if (validate_raid_redundancy(rs)) { + rs->ti->error = "Insufficient redundancy to activate array"; + return -EINVAL; + } + /* * Validation of the freshest device provides the source of * validation for the remaining devices. @@ -1432,7 +1405,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 4, 0}, + .version = {1, 4, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, |