summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-ioctl.c14
-rw-r--r--drivers/md/dm-log.c48
-rw-r--r--drivers/md/dm-snap.c29
-rw-r--r--drivers/md/dm-snap.h4
-rw-r--r--drivers/md/dm-stripe.c8
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c48
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/md/raid6main.c152
13 files changed, 242 insertions, 130 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1235135b384b..442e2be6052e 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1359,16 +1359,11 @@ static int ctl_ioctl(struct inode *inode, struct file *file,
* Copy the parameters into kernel space.
*/
r = copy_params(user, &param);
- if (r) {
- current->flags &= ~PF_MEMALLOC;
- return r;
- }
- /*
- * FIXME: eventually we will remove the PF_MEMALLOC flag
- * here. However the tools still do nasty things like
- * 'load' while a device is suspended.
- */
+ current->flags &= ~PF_MEMALLOC;
+
+ if (r)
+ return r;
r = validate_params(cmd, param);
if (r)
@@ -1386,7 +1381,6 @@ static int ctl_ioctl(struct inode *inode, struct file *file,
out:
free_params(param);
- current->flags &= ~PF_MEMALLOC;
return r;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index efe4adf78530..d73779a42417 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -112,7 +112,7 @@ void dm_destroy_dirty_log(struct dirty_log *log)
/*
* The on-disk version of the metadata.
*/
-#define MIRROR_DISK_VERSION 1
+#define MIRROR_DISK_VERSION 2
#define LOG_OFFSET 2
struct log_header {
@@ -157,7 +157,6 @@ struct log_c {
struct log_header *disk_header;
struct io_region bits_location;
- uint32_t *disk_bits;
};
/*
@@ -166,20 +165,20 @@ struct log_c {
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
- return test_bit(bit, (unsigned long *) bs) ? 1 : 0;
+ return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- set_bit(bit, (unsigned long *) bs);
+ ext2_set_bit(bit, (unsigned long *) bs);
l->touched = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- clear_bit(bit, (unsigned long *) bs);
+ ext2_clear_bit(bit, (unsigned long *) bs);
l->touched = 1;
}
@@ -219,6 +218,11 @@ static int read_header(struct log_c *log)
log->header.nr_regions = 0;
}
+#ifdef __LITTLE_ENDIAN
+ if (log->header.version == 1)
+ log->header.version = 2;
+#endif
+
if (log->header.version != MIRROR_DISK_VERSION) {
DMWARN("incompatible disk log version");
return -EINVAL;
@@ -239,45 +243,24 @@ static inline int write_header(struct log_c *log)
/*----------------------------------------------------------------
* Bits IO
*--------------------------------------------------------------*/
-static inline void bits_to_core(uint32_t *core, uint32_t *disk, unsigned count)
-{
- unsigned i;
-
- for (i = 0; i < count; i++)
- core[i] = le32_to_cpu(disk[i]);
-}
-
-static inline void bits_to_disk(uint32_t *core, uint32_t *disk, unsigned count)
-{
- unsigned i;
-
- /* copy across the clean/dirty bitset */
- for (i = 0; i < count; i++)
- disk[i] = cpu_to_le32(core[i]);
-}
-
static int read_bits(struct log_c *log)
{
int r;
unsigned long ebits;
r = dm_io_sync_vm(1, &log->bits_location, READ,
- log->disk_bits, &ebits);
+ log->clean_bits, &ebits);
if (r)
return r;
- bits_to_core(log->clean_bits, log->disk_bits,
- log->bitset_uint32_count);
return 0;
}
static int write_bits(struct log_c *log)
{
unsigned long ebits;
- bits_to_disk(log->clean_bits, log->disk_bits,
- log->bitset_uint32_count);
return dm_io_sync_vm(1, &log->bits_location, WRITE,
- log->disk_bits, &ebits);
+ log->clean_bits, &ebits);
}
/*----------------------------------------------------------------
@@ -433,11 +416,6 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t),
1 << SECTOR_SHIFT);
lc->bits_location.count = size >> SECTOR_SHIFT;
- lc->disk_bits = vmalloc(size);
- if (!lc->disk_bits) {
- vfree(lc->disk_header);
- goto bad;
- }
return 0;
bad:
@@ -451,7 +429,6 @@ static void disk_dtr(struct dirty_log *log)
struct log_c *lc = (struct log_c *) log->context;
dm_put_device(lc->ti, lc->log_dev);
vfree(lc->disk_header);
- vfree(lc->disk_bits);
core_dtr(log);
}
@@ -568,7 +545,8 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
return 0;
do {
- *region = find_next_zero_bit((unsigned long *) lc->sync_bits,
+ *region = ext2_find_next_zero_bit(
+ (unsigned long *) lc->sync_bits,
lc->region_count,
lc->sync_search);
lc->sync_search = *region + 1;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 87727d84dbba..f3759dd7828e 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -373,16 +373,11 @@ static inline ulong round_up(ulong n, ulong size)
static void read_snapshot_metadata(struct dm_snapshot *s)
{
- if (s->have_metadata)
- return;
-
if (s->store.read_metadata(&s->store)) {
down_write(&s->lock);
s->valid = 0;
up_write(&s->lock);
}
-
- s->have_metadata = 1;
}
/*
@@ -471,7 +466,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->chunk_shift = ffs(chunk_size) - 1;
s->valid = 1;
- s->have_metadata = 0;
+ s->active = 0;
s->last_percent = 0;
init_rwsem(&s->lock);
s->table = ti->table;
@@ -506,7 +501,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad5;
}
+ /* Metadata must only be loaded into one table at once */
+ read_snapshot_metadata(s);
+
/* Add snapshot to the list of snapshots for this origin */
+ /* Exceptions aren't triggered till snapshot_resume() is called */
if (register_snapshot(s)) {
r = -EINVAL;
ti->error = "Cannot register snapshot origin";
@@ -793,6 +792,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
if (!s->valid)
return -EIO;
+ if (unlikely(bio_barrier(bio)))
+ return -EOPNOTSUPP;
+
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
@@ -862,7 +864,9 @@ static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- read_snapshot_metadata(s);
+ down_write(&s->lock);
+ s->active = 1;
+ up_write(&s->lock);
}
static int snapshot_status(struct dm_target *ti, status_type_t type,
@@ -932,8 +936,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
/* Do all the snapshots on this origin */
list_for_each_entry (snap, snapshots, list) {
- /* Only deal with valid snapshots */
- if (!snap->valid)
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
continue;
/* Nothing to do if writing beyond end of snapshot */
@@ -1057,6 +1061,9 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
struct dm_dev *dev = (struct dm_dev *) ti->private;
bio->bi_bdev = dev->bdev;
+ if (unlikely(bio_barrier(bio)))
+ return -EOPNOTSUPP;
+
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1;
}
@@ -1104,7 +1111,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 0, 1},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -1115,7 +1122,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 0, 1},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 375aa24d4d7d..fdec1e2dc871 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -99,7 +99,9 @@ struct dm_snapshot {
/* You can't use a snapshot if this is 0 (e.g. if full) */
int valid;
- int have_metadata;
+
+ /* Origin writes don't trigger exceptions until this is set */
+ int active;
/* Used for display of table */
char type;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index ab89278a56bf..697aacafb02a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -103,9 +103,15 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
+ if (((uint32_t)ti->len) & (chunk_size - 1)) {
+ ti->error = "dm-stripe: Target length not divisible by "
+ "chunk size";
+ return -EINVAL;
+ }
+
width = ti->len;
if (sector_div(width, stripes)) {
- ti->error = "dm-stripe: Target length not divisable by "
+ ti->error = "dm-stripe: Target length not divisible by "
"number of stripes";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a6f2dc66c3db..9b1e2f5ca630 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -508,7 +508,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
if (q->merge_bvec_fn)
rs->max_sectors =
min_not_zero(rs->max_sectors,
- (unsigned short)(PAGE_SIZE >> 9));
+ (unsigned int) (PAGE_SIZE >> 9));
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8c16359f8b01..745ca1f67b14 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -31,6 +31,7 @@ struct dm_io {
int error;
struct bio *bio;
atomic_t io_count;
+ unsigned long start_time;
};
/*
@@ -244,6 +245,36 @@ static inline void free_tio(struct mapped_device *md, struct target_io *tio)
mempool_free(tio, md->tio_pool);
}
+static void start_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+
+ io->start_time = jiffies;
+
+ preempt_disable();
+ disk_round_stats(dm_disk(md));
+ preempt_enable();
+ dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+}
+
+static int end_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+ struct bio *bio = io->bio;
+ unsigned long duration = jiffies - io->start_time;
+ int pending;
+ int rw = bio_data_dir(bio);
+
+ preempt_disable();
+ disk_round_stats(dm_disk(md));
+ preempt_enable();
+ dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+
+ disk_stat_add(dm_disk(md), ticks[rw], duration);
+
+ return !pending;
+}
+
/*
* Add the bio to the list of deferred io.
*/
@@ -299,7 +330,7 @@ static void dec_pending(struct dm_io *io, int error)
io->error = error;
if (atomic_dec_and_test(&io->io_count)) {
- if (atomic_dec_and_test(&io->md->pending))
+ if (end_io_acct(io))
/* nudge anyone waiting on suspend queue */
wake_up(&io->md->wait);
@@ -554,7 +585,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.sector_count = bio_sectors(bio);
ci.idx = bio->bi_idx;
- atomic_inc(&md->pending);
+ start_io_acct(ci.io);
while (ci.sector_count)
__clone_and_map(&ci);
@@ -573,10 +604,14 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
static int dm_request(request_queue_t *q, struct bio *bio)
{
int r;
+ int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
down_read(&md->io_lock);
+ disk_stat_inc(dm_disk(md), ios[rw]);
+ disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
+
/*
* If we're suspended we have to queue
* this io for later.
@@ -814,10 +849,16 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
static void free_dev(struct mapped_device *md)
{
- free_minor(md->disk->first_minor);
+ unsigned int minor = md->disk->first_minor;
+
+ if (md->suspended_bdev) {
+ thaw_bdev(md->suspended_bdev, NULL);
+ bdput(md->suspended_bdev);
+ }
mempool_destroy(md->tio_pool);
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
+ free_minor(minor);
put_disk(md->disk);
blk_put_queue(md->queue);
kfree(md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7145cd150f7b..d05e3125d298 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1024,7 +1024,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
rdev-> sb_size = (rdev->sb_size | bmask)+1;
if (refdev == 0)
- return 1;
+ ret = 1;
else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb =
@@ -1044,7 +1044,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
- return 1;
+ ret = 1;
+ else
+ ret = 0;
}
if (minor_version)
rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
@@ -1058,7 +1060,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (le32_to_cpu(sb->size) > rdev->size*2)
return -EINVAL;
- return 0;
+ return ret;
}
static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -1081,7 +1083,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->size = le64_to_cpu(sb->size)/2;
mddev->events = le64_to_cpu(sb->events);
mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = 1024;
+ mddev->default_bitmap_offset = 1024 >> 9;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1161,6 +1163,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
+ sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+ sb->size = cpu_to_le64(mddev->size<<1);
+
if (mddev->bitmap && mddev->bitmap_file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2686,14 +2691,6 @@ static int do_md_stop(mddev_t * mddev, int ro)
set_disk_ro(disk, 1);
}
- bitmap_destroy(mddev);
- if (mddev->bitmap_file) {
- atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
- }
- mddev->bitmap_offset = 0;
-
/*
* Free resources if final stop
*/
@@ -2703,6 +2700,14 @@ static int do_md_stop(mddev_t * mddev, int ro)
struct gendisk *disk;
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ bitmap_destroy(mddev);
+ if (mddev->bitmap_file) {
+ atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
+ fput(mddev->bitmap_file);
+ mddev->bitmap_file = NULL;
+ }
+ mddev->bitmap_offset = 0;
+
ITERATE_RDEV(mddev,rdev,tmp)
if (rdev->raid_disk >= 0) {
char nm[20];
@@ -2939,6 +2944,8 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->size;
+ if (info.size != mddev->size) /* overflow */
+ info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
@@ -3465,7 +3472,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
bdev = bdget_disk(mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
+ i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
@@ -3485,17 +3492,6 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
if (mddev->sync_thread)
return -EBUSY;
rv = mddev->pers->reshape(mddev, raid_disks);
- if (!rv) {
- struct block_device *bdev;
-
- bdev = bdget_disk(mddev->gendisk, 0);
- if (bdev) {
- mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
- mutex_unlock(&bdev->bd_inode->i_mutex);
- bdput(bdev);
- }
- }
return rv;
}
@@ -3531,7 +3527,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
)
return -EINVAL;
/* Check there is only one change */
- if (mddev->size != info->size) cnt++;
+ if (info->size >= 0 && mddev->size != info->size) cnt++;
if (mddev->raid_disks != info->raid_disks) cnt++;
if (mddev->layout != info->layout) cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
@@ -3548,7 +3544,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
else
return mddev->pers->reconfig(mddev, info->layout, -1);
}
- if (mddev->size != info->size)
+ if (info->size >= 0 && mddev->size != info->size)
rv = update_size(mddev, info->size);
if (mddev->raid_disks != info->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d03f99cf4b7d..678f4dbbea1d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -372,7 +372,7 @@ out_free_conf:
kfree(conf);
mddev->private = NULL;
out:
- return 1;
+ return -ENOMEM;
}
static int raid0_stop (mddev_t *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d39f584cd8b3..5d88329e3c7a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -306,6 +306,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ struct bio *to_put = NULL;
if (bio->bi_size)
return 1;
@@ -323,6 +324,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
* this branch is our 'one mirror IO has finished' event handler:
*/
r1_bio->bios[mirror] = NULL;
+ to_put = bio;
if (!uptodate) {
md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
/* an I/O failed, we can't clear the bitmap */
@@ -375,7 +377,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
/* Don't dec_pending yet, we want to hold
* the reference over the retry
*/
- return 0;
+ goto out;
}
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
/* free extra copy of the data pages */
@@ -392,10 +394,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
raid_end_bio_io(r1_bio);
}
- if (r1_bio->bios[mirror]==NULL)
- bio_put(bio);
-
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ out:
+ if (to_put)
+ bio_put(to_put);
+
return 0;
}
@@ -857,7 +860,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio->bi_rw & BIO_RW_BARRIER;
+ do_barriers = bio_barrier(bio);
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9130d051b474..ab90a6d12020 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -565,6 +565,8 @@ rb_out:
if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
+ else
+ disk = -1;
rcu_read_unlock();
return disk;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 25976bfb6f9c..2dba305daf3c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -350,7 +350,8 @@ static void shrink_stripes(raid5_conf_t *conf)
while (drop_one_stripe(conf))
;
- kmem_cache_destroy(conf->slab_cache);
+ if (conf->slab_cache)
+ kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index f618a53b98be..cd477ebf2ee4 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -115,7 +115,7 @@ static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
list_add_tail(&sh->lru, &conf->inactive_list);
atomic_dec(&conf->active_stripes);
if (!conf->inactive_blocked ||
- atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
+ atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
wake_up(&conf->wait_for_stripe);
}
}
@@ -273,7 +273,8 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
conf->inactive_blocked = 1;
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
unplug_slaves(conf->mddev);
@@ -302,9 +303,31 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
return sh;
}
-static int grow_stripes(raid6_conf_t *conf, int num)
+static int grow_one_stripe(raid6_conf_t *conf)
{
struct stripe_head *sh;
+ sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
+ if (!sh)
+ return 0;
+ memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
+ sh->raid_conf = conf;
+ spin_lock_init(&sh->lock);
+
+ if (grow_buffers(sh, conf->raid_disks)) {
+ shrink_buffers(sh, conf->raid_disks);
+ kmem_cache_free(conf->slab_cache, sh);
+ return 0;
+ }
+ /* we just created an active stripe so... */
+ atomic_set(&sh->count, 1);
+ atomic_inc(&conf->active_stripes);
+ INIT_LIST_HEAD(&sh->lru);
+ release_stripe(sh);
+ return 1;
+}
+
+static int grow_stripes(raid6_conf_t *conf, int num)
+{
kmem_cache_t *sc;
int devs = conf->raid_disks;
@@ -316,45 +339,35 @@ static int grow_stripes(raid6_conf_t *conf, int num)
if (!sc)
return 1;
conf->slab_cache = sc;
- while (num--) {
- sh = kmem_cache_alloc(sc, GFP_KERNEL);
- if (!sh)
- return 1;
- memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev));
- sh->raid_conf = conf;
- spin_lock_init(&sh->lock);
-
- if (grow_buffers(sh, conf->raid_disks)) {
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(sc, sh);
+ while (num--)
+ if (!grow_one_stripe(conf))
return 1;
- }
- /* we just created an active stripe so... */
- atomic_set(&sh->count, 1);
- atomic_inc(&conf->active_stripes);
- INIT_LIST_HEAD(&sh->lru);
- release_stripe(sh);
- }
return 0;
}
-static void shrink_stripes(raid6_conf_t *conf)
+static int drop_one_stripe(raid6_conf_t *conf)
{
struct stripe_head *sh;
+ spin_lock_irq(&conf->device_lock);
+ sh = get_free_stripe(conf);
+ spin_unlock_irq(&conf->device_lock);
+ if (!sh)
+ return 0;
+ if (atomic_read(&sh->count))
+ BUG();
+ shrink_buffers(sh, conf->raid_disks);
+ kmem_cache_free(conf->slab_cache, sh);
+ atomic_dec(&conf->active_stripes);
+ return 1;
+}
- while (1) {
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
- if (!sh)
- break;
- if (atomic_read(&sh->count))
- BUG();
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(conf->slab_cache, sh);
- atomic_dec(&conf->active_stripes);
- }
- kmem_cache_destroy(conf->slab_cache);
+static void shrink_stripes(raid6_conf_t *conf)
+{
+ while (drop_one_stripe(conf))
+ ;
+
+ if (conf->slab_cache)
+ kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
@@ -1912,6 +1925,74 @@ static void raid6d (mddev_t *mddev)
PRINTK("--- raid6d inactive\n");
}
+static ssize_t
+raid6_show_stripe_cache_size(mddev_t *mddev, char *page)
+{
+ raid6_conf_t *conf = mddev_to_conf(mddev);
+ if (conf)
+ return sprintf(page, "%d\n", conf->max_nr_stripes);
+ else
+ return 0;
+}
+
+static ssize_t
+raid6_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+{
+ raid6_conf_t *conf = mddev_to_conf(mddev);
+ char *end;
+ int new;
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ new = simple_strtoul(page, &end, 10);
+ if (!*page || (*end && *end != '\n') )
+ return -EINVAL;
+ if (new <= 16 || new > 32768)
+ return -EINVAL;
+ while (new < conf->max_nr_stripes) {
+ if (drop_one_stripe(conf))
+ conf->max_nr_stripes--;
+ else
+ break;
+ }
+ while (new > conf->max_nr_stripes) {
+ if (grow_one_stripe(conf))
+ conf->max_nr_stripes++;
+ else break;
+ }
+ return len;
+}
+
+static struct md_sysfs_entry
+raid6_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
+ raid6_show_stripe_cache_size,
+ raid6_store_stripe_cache_size);
+
+static ssize_t
+stripe_cache_active_show(mddev_t *mddev, char *page)
+{
+ raid6_conf_t *conf = mddev_to_conf(mddev);
+ if (conf)
+ return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
+ else
+ return 0;
+}
+
+static struct md_sysfs_entry
+raid6_stripecache_active = __ATTR_RO(stripe_cache_active);
+
+static struct attribute *raid6_attrs[] = {
+ &raid6_stripecache_size.attr,
+ &raid6_stripecache_active.attr,
+ NULL,
+};
+static struct attribute_group raid6_attrs_group = {
+ .name = NULL,
+ .attrs = raid6_attrs,
+};
+
static int run(mddev_t *mddev)
{
raid6_conf_t *conf;
@@ -2095,6 +2176,7 @@ static int stop (mddev_t *mddev)
shrink_stripes(conf);
kfree(conf->stripe_hashtbl);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ sysfs_remove_group(&mddev->kobj, &raid6_attrs_group);
kfree(conf);
mddev->private = NULL;
return 0;