diff options
author | Mike Snitzer <snitzer@redhat.com> | 2012-10-12 21:02:10 +0100 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2012-10-12 21:02:10 +0100 |
commit | 44feb387f6f5584535bd6e3ad7ccfdce715d7dba (patch) | |
tree | f49e3e91bcba0c55d3a0357def6472069d00c42f | |
parent | 28eed34e7662d7602da6753b0ba2563006b8e7a2 (diff) | |
download | linux-44feb387f6f5584535bd6e3ad7ccfdce715d7dba.tar.bz2 |
dm thin: prepare to separate bio_prison code
The bio prison code will be useful to share with future DM targets.
Prepare to move this code into a separate module, adding a dm prefix
to structures and functions that will be exported.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r-- | drivers/md/dm-thin.c | 221 |
1 files changed, 131 insertions, 90 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index df20a115136f..22a22a701e16 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -58,7 +58,7 @@ * i) plug io further to this physical block. (see bio_prison code). * * ii) quiesce any read io to that shared data block. Obviously - * including all devices that share this block. (see deferred_set code) + * including all devices that share this block. (see dm_deferred_set code) * * iii) copy the data block to a newly allocate block. This step can be * missed out if the io covers the block. (schedule_copy). @@ -104,9 +104,9 @@ * by a key, multiple bios can be in the same cell. When the cell is * subsequently unlocked the bios become available. */ -struct bio_prison; +struct dm_bio_prison; -struct cell_key { +struct dm_cell_key { int virtual; dm_thin_id dev; dm_block_t block; @@ -114,13 +114,13 @@ struct cell_key { struct dm_bio_prison_cell { struct hlist_node list; - struct bio_prison *prison; - struct cell_key key; + struct dm_bio_prison *prison; + struct dm_cell_key key; struct bio *holder; struct bio_list bios; }; -struct bio_prison { +struct dm_bio_prison { spinlock_t lock; mempool_t *cell_pool; @@ -148,13 +148,13 @@ static struct kmem_cache *_cell_cache; * @nr_cells should be the number of cells you want in use _concurrently_. * Don't confuse it with the number of distinct keys. */ -static struct bio_prison *prison_create(unsigned nr_cells) +static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells) { unsigned i; uint32_t nr_buckets = calc_nr_buckets(nr_cells); - size_t len = sizeof(struct bio_prison) + + size_t len = sizeof(struct dm_bio_prison) + (sizeof(struct hlist_head) * nr_buckets); - struct bio_prison *prison = kmalloc(len, GFP_KERNEL); + struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL); if (!prison) return NULL; @@ -175,13 +175,13 @@ static struct bio_prison *prison_create(unsigned nr_cells) return prison; } -static void prison_destroy(struct bio_prison *prison) +static void dm_bio_prison_destroy(struct dm_bio_prison *prison) { mempool_destroy(prison->cell_pool); kfree(prison); } -static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) +static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key) { const unsigned long BIG_PRIME = 4294967291UL; uint64_t hash = key->block * BIG_PRIME; @@ -189,7 +189,7 @@ static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) return (uint32_t) (hash & prison->hash_mask); } -static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) +static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs) { return (lhs->virtual == rhs->virtual) && (lhs->dev == rhs->dev) && @@ -197,7 +197,7 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) } static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, - struct cell_key *key) + struct dm_cell_key *key) { struct dm_bio_prison_cell *cell; struct hlist_node *tmp; @@ -215,8 +215,8 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, * * Returns 1 if the cell was already held, 0 if @inmate is the new holder. */ -static int bio_detain(struct bio_prison *prison, struct cell_key *key, - struct bio *inmate, struct dm_bio_prison_cell **ref) +static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, + struct bio *inmate, struct dm_bio_prison_cell **ref) { int r = 1; unsigned long flags; @@ -277,7 +277,7 @@ out: */ static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) { - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; hlist_del(&cell->list); @@ -289,10 +289,10 @@ static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inm mempool_free(cell, prison->cell_pool); } -static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) +static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) { unsigned long flags; - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; spin_lock_irqsave(&prison->lock, flags); __cell_release(cell, bios); @@ -313,10 +313,10 @@ static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio __cell_release(cell, NULL); } -static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) +static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) { unsigned long flags; - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; spin_lock_irqsave(&prison->lock, flags); __cell_release_singleton(cell, bio); @@ -329,7 +329,7 @@ static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio * static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates) { - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; hlist_del(&cell->list); bio_list_merge(inmates, &cell->bios); @@ -337,20 +337,20 @@ static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, mempool_free(cell, prison->cell_pool); } -static void cell_release_no_holder(struct dm_bio_prison_cell *cell, - struct bio_list *inmates) +static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, + struct bio_list *inmates) { unsigned long flags; - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; spin_lock_irqsave(&prison->lock, flags); __cell_release_no_holder(cell, inmates); spin_unlock_irqrestore(&prison->lock, flags); } -static void cell_error(struct dm_bio_prison_cell *cell) +static void dm_cell_error(struct dm_bio_prison_cell *cell) { - struct bio_prison *prison = cell->prison; + struct dm_bio_prison *prison = cell->prison; struct bio_list bios; struct bio *bio; unsigned long flags; @@ -374,23 +374,28 @@ static void cell_error(struct dm_bio_prison_cell *cell) * new mapping could free the old block that the read bios are mapped to. */ -struct deferred_set; -struct deferred_entry { - struct deferred_set *ds; +struct dm_deferred_set; +struct dm_deferred_entry { + struct dm_deferred_set *ds; unsigned count; struct list_head work_items; }; -struct deferred_set { +struct dm_deferred_set { spinlock_t lock; unsigned current_entry; unsigned sweeper; - struct deferred_entry entries[DEFERRED_SET_SIZE]; + struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; }; -static void ds_init(struct deferred_set *ds) +static struct dm_deferred_set *dm_deferred_set_create(void) { int i; + struct dm_deferred_set *ds; + + ds = kmalloc(sizeof(*ds), GFP_KERNEL); + if (!ds) + return NULL; spin_lock_init(&ds->lock); ds->current_entry = 0; @@ -400,12 +405,19 @@ static void ds_init(struct deferred_set *ds) ds->entries[i].count = 0; INIT_LIST_HEAD(&ds->entries[i].work_items); } + + return ds; } -static struct deferred_entry *ds_inc(struct deferred_set *ds) +static void dm_deferred_set_destroy(struct dm_deferred_set *ds) +{ + kfree(ds); +} + +static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds) { unsigned long flags; - struct deferred_entry *entry; + struct dm_deferred_entry *entry; spin_lock_irqsave(&ds->lock, flags); entry = ds->entries + ds->current_entry; @@ -420,7 +432,7 @@ static unsigned ds_next(unsigned index) return (index + 1) % DEFERRED_SET_SIZE; } -static void __sweep(struct deferred_set *ds, struct list_head *head) +static void __sweep(struct dm_deferred_set *ds, struct list_head *head) { while ((ds->sweeper != ds->current_entry) && !ds->entries[ds->sweeper].count) { @@ -432,7 +444,7 @@ static void __sweep(struct deferred_set *ds, struct list_head *head) list_splice_init(&ds->entries[ds->sweeper].work_items, head); } -static void ds_dec(struct deferred_entry *entry, struct list_head *head) +static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) { unsigned long flags; @@ -446,7 +458,7 @@ static void ds_dec(struct deferred_entry *entry, struct list_head *head) /* * Returns 1 if deferred or 0 if no pending items to delay job. */ -static int ds_add_work(struct deferred_set *ds, struct list_head *work) +static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) { int r = 1; unsigned long flags; @@ -467,13 +479,28 @@ static int ds_add_work(struct deferred_set *ds, struct list_head *work) return r; } +static int __init dm_bio_prison_init(void) +{ + _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); + if (!_cell_cache) + return -ENOMEM; + + return 0; +} + +static void __exit dm_bio_prison_exit(void) +{ + kmem_cache_destroy(_cell_cache); + _cell_cache = NULL; +} + /*----------------------------------------------------------------*/ /* * Key building. */ static void build_data_key(struct dm_thin_device *td, - dm_block_t b, struct cell_key *key) + dm_block_t b, struct dm_cell_key *key) { key->virtual = 0; key->dev = dm_thin_dev_id(td); @@ -481,7 +508,7 @@ static void build_data_key(struct dm_thin_device *td, } static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, - struct cell_key *key) + struct dm_cell_key *key) { key->virtual = 1; key->dev = dm_thin_dev_id(td); @@ -534,7 +561,7 @@ struct pool { unsigned low_water_triggered:1; /* A dm event has been sent */ unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ - struct bio_prison *prison; + struct dm_bio_prison *prison; struct dm_kcopyd_client *copier; struct workqueue_struct *wq; @@ -552,8 +579,8 @@ struct pool { struct bio_list retry_on_resume_list; - struct deferred_set shared_read_ds; - struct deferred_set all_io_ds; + struct dm_deferred_set *shared_read_ds; + struct dm_deferred_set *all_io_ds; struct dm_thin_new_mapping *next_mapping; mempool_t *mapping_pool; @@ -660,8 +687,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev struct dm_thin_endio_hook { struct thin_c *tc; - struct deferred_entry *shared_read_entry; - struct deferred_entry *all_io_entry; + struct dm_deferred_entry *shared_read_entry; + struct dm_deferred_entry *all_io_entry; struct dm_thin_new_mapping *overwrite_mapping; }; @@ -877,7 +904,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, unsigned long flags; spin_lock_irqsave(&pool->lock, flags); - cell_release(cell, &pool->deferred_bios); + dm_cell_release(cell, &pool->deferred_bios); spin_unlock_irqrestore(&tc->pool->lock, flags); wake_worker(pool); @@ -896,7 +923,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell bio_list_init(&bios); spin_lock_irqsave(&pool->lock, flags); - cell_release_no_holder(cell, &pool->deferred_bios); + dm_cell_release_no_holder(cell, &pool->deferred_bios); spin_unlock_irqrestore(&pool->lock, flags); wake_worker(pool); @@ -906,7 +933,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) { if (m->bio) m->bio->bi_end_io = m->saved_bi_end_io; - cell_error(m->cell); + dm_cell_error(m->cell); list_del(&m->list); mempool_free(m, m->tc->pool->mapping_pool); } @@ -921,7 +948,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) bio->bi_end_io = m->saved_bi_end_io; if (m->err) { - cell_error(m->cell); + dm_cell_error(m->cell); goto out; } @@ -933,7 +960,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); if (r) { DMERR("dm_thin_insert_block() failed"); - cell_error(m->cell); + dm_cell_error(m->cell); goto out; } @@ -1067,7 +1094,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, m->err = 0; m->bio = NULL; - if (!ds_add_work(&pool->shared_read_ds, &m->list)) + if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) m->quiesced = 1; /* @@ -1099,7 +1126,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, if (r < 0) { mempool_free(m, pool->mapping_pool); DMERR("dm_kcopyd_copy() failed"); - cell_error(cell); + dm_cell_error(cell); } } } @@ -1164,7 +1191,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, if (r < 0) { mempool_free(m, pool->mapping_pool); DMERR("dm_kcopyd_zero() failed"); - cell_error(cell); + dm_cell_error(cell); } } } @@ -1276,7 +1303,7 @@ static void no_space(struct dm_bio_prison_cell *cell) struct bio_list bios; bio_list_init(&bios); - cell_release(cell, &bios); + dm_cell_release(cell, &bios); while ((bio = bio_list_pop(&bios))) retry_on_resume(bio); @@ -1288,13 +1315,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio) unsigned long flags; struct pool *pool = tc->pool; struct dm_bio_prison_cell *cell, *cell2; - struct cell_key key, key2; + struct dm_cell_key key, key2; dm_block_t block = get_bio_block(tc, bio); struct dm_thin_lookup_result lookup_result; struct dm_thin_new_mapping *m; build_virtual_key(tc->td, block, &key); - if (bio_detain(tc->pool->prison, &key, bio, &cell)) + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) return; r = dm_thin_find_block(tc->td, block, 1, &lookup_result); @@ -1306,8 +1333,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) * on this block. */ build_data_key(tc->td, lookup_result.block, &key2); - if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { - cell_release_singleton(cell, bio); + if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { + dm_cell_release_singleton(cell, bio); break; } @@ -1326,7 +1353,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) m->err = 0; m->bio = bio; - if (!ds_add_work(&pool->all_io_ds, &m->list)) { + if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { spin_lock_irqsave(&pool->lock, flags); list_add(&m->list, &pool->prepared_discards); spin_unlock_irqrestore(&pool->lock, flags); @@ -1338,8 +1365,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) * a block boundary. So we submit the discard of a * partial block appropriately. */ - cell_release_singleton(cell, bio); - cell_release_singleton(cell2, bio); + dm_cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell2, bio); if ((!lookup_result.shared) && pool->pf.discard_passdown) remap_and_issue(tc, bio, lookup_result.block); else @@ -1351,20 +1378,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio) /* * It isn't provisioned, just forget it. */ - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); bio_endio(bio, 0); break; default: DMERR("discard: find block unexpectedly returned %d", r); - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); bio_io_error(bio); break; } } static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, - struct cell_key *key, + struct dm_cell_key *key, struct dm_thin_lookup_result *lookup_result, struct dm_bio_prison_cell *cell) { @@ -1384,7 +1411,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, default: DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); - cell_error(cell); + dm_cell_error(cell); break; } } @@ -1395,14 +1422,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, { struct dm_bio_prison_cell *cell; struct pool *pool = tc->pool; - struct cell_key key; + struct dm_cell_key key; /* * If cell is already occupied, then sharing is already in the process * of being broken so we have nothing further to do here. */ build_data_key(tc->td, lookup_result->block, &key); - if (bio_detain(pool->prison, &key, bio, &cell)) + if (dm_bio_detain(pool->prison, &key, bio, &cell)) return; if (bio_data_dir(bio) == WRITE && bio->bi_size) @@ -1410,9 +1437,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, else { struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; - h->shared_read_entry = ds_inc(&pool->shared_read_ds); + h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); remap_and_issue(tc, bio, lookup_result->block); } } @@ -1427,7 +1454,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block * Remap empty bios (flushes) immediately, without provisioning. */ if (!bio->bi_size) { - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); remap_and_issue(tc, bio, 0); return; } @@ -1437,7 +1464,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block */ if (bio_data_dir(bio) == READ) { zero_fill_bio(bio); - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); bio_endio(bio, 0); return; } @@ -1458,7 +1485,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block default: DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); set_pool_mode(tc->pool, PM_READ_ONLY); - cell_error(cell); + dm_cell_error(cell); break; } } @@ -1468,7 +1495,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) int r; dm_block_t block = get_bio_block(tc, bio); struct dm_bio_prison_cell *cell; - struct cell_key key; + struct dm_cell_key key; struct dm_thin_lookup_result lookup_result; /* @@ -1476,7 +1503,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) * being provisioned so we have nothing further to do here. */ build_virtual_key(tc->td, block, &key); - if (bio_detain(tc->pool->prison, &key, bio, &cell)) + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) return; r = dm_thin_find_block(tc->td, block, 1, &lookup_result); @@ -1491,7 +1518,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) * TODO: this will probably have to change when discard goes * back in. */ - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); if (lookup_result.shared) process_shared_bio(tc, bio, block, &lookup_result); @@ -1501,7 +1528,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) case -ENODATA: if (bio_data_dir(bio) == READ && tc->origin_dev) { - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); remap_to_origin_and_issue(tc, bio); } else provision_block(tc, bio, block, cell); @@ -1509,7 +1536,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) default: DMERR("dm_thin_find_block() failed, error = %d", r); - cell_release_singleton(cell, bio); + dm_cell_release_singleton(cell, bio); bio_io_error(bio); break; } @@ -1718,7 +1745,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b h->tc = tc; h->shared_read_entry = NULL; - h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); + h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds); h->overwrite_mapping = NULL; return h; @@ -1928,7 +1955,7 @@ static void __pool_destroy(struct pool *pool) if (dm_pool_metadata_close(pool->pmd) < 0) DMWARN("%s: dm_pool_metadata_close() failed.", __func__); - prison_destroy(pool->prison); + dm_bio_prison_destroy(pool->prison); dm_kcopyd_client_destroy(pool->copier); if (pool->wq) @@ -1938,6 +1965,8 @@ static void __pool_destroy(struct pool *pool) mempool_free(pool->next_mapping, pool->mapping_pool); mempool_destroy(pool->mapping_pool); mempool_destroy(pool->endio_hook_pool); + dm_deferred_set_destroy(pool->shared_read_ds); + dm_deferred_set_destroy(pool->all_io_ds); kfree(pool); } @@ -1976,7 +2005,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->sectors_per_block_shift = __ffs(block_size); pool->low_water_blocks = 0; pool_features_init(&pool->pf); - pool->prison = prison_create(PRISON_CELLS); + pool->prison = dm_bio_prison_create(PRISON_CELLS); if (!pool->prison) { *error = "Error creating pool's bio prison"; err_p = ERR_PTR(-ENOMEM); @@ -2012,8 +2041,20 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->low_water_triggered = 0; pool->no_free_space = 0; bio_list_init(&pool->retry_on_resume_list); - ds_init(&pool->shared_read_ds); - ds_init(&pool->all_io_ds); + + pool->shared_read_ds = dm_deferred_set_create(); + if (!pool->shared_read_ds) { + *error = "Error creating pool's shared read deferred set"; + err_p = ERR_PTR(-ENOMEM); + goto bad_shared_read_ds; + } + + pool->all_io_ds = dm_deferred_set_create(); + if (!pool->all_io_ds) { + *error = "Error creating pool's all io deferred set"; + err_p = ERR_PTR(-ENOMEM); + goto bad_all_io_ds; + } pool->next_mapping = NULL; pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, @@ -2042,11 +2083,15 @@ static struct pool *pool_create(struct mapped_device *pool_md, bad_endio_hook_pool: mempool_destroy(pool->mapping_pool); bad_mapping_pool: + dm_deferred_set_destroy(pool->all_io_ds); +bad_all_io_ds: + dm_deferred_set_destroy(pool->shared_read_ds); +bad_shared_read_ds: destroy_workqueue(pool->wq); bad_wq: dm_kcopyd_client_destroy(pool->copier); bad_kcopyd_client: - prison_destroy(pool->prison); + dm_bio_prison_destroy(pool->prison); bad_prison: kfree(pool); bad_pool: @@ -2982,7 +3027,7 @@ static int thin_endio(struct dm_target *ti, if (h->shared_read_entry) { INIT_LIST_HEAD(&work); - ds_dec(h->shared_read_entry, &work); + dm_deferred_entry_dec(h->shared_read_entry, &work); spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) { @@ -2995,7 +3040,7 @@ static int thin_endio(struct dm_target *ti, if (h->all_io_entry) { INIT_LIST_HEAD(&work); - ds_dec(h->all_io_entry, &work); + dm_deferred_entry_dec(h->all_io_entry, &work); spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) list_add(&m->list, &pool->prepared_discards); @@ -3128,9 +3173,7 @@ static int __init dm_thin_init(void) r = -ENOMEM; - _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); - if (!_cell_cache) - goto bad_cell_cache; + dm_bio_prison_init(); _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); if (!_new_mapping_cache) @@ -3145,8 +3188,6 @@ static int __init dm_thin_init(void) bad_endio_hook_cache: kmem_cache_destroy(_new_mapping_cache); bad_new_mapping_cache: - kmem_cache_destroy(_cell_cache); -bad_cell_cache: dm_unregister_target(&pool_target); bad_pool_target: dm_unregister_target(&thin_target); @@ -3159,7 +3200,7 @@ static void dm_thin_exit(void) dm_unregister_target(&thin_target); dm_unregister_target(&pool_target); - kmem_cache_destroy(_cell_cache); + dm_bio_prison_exit(); kmem_cache_destroy(_new_mapping_cache); kmem_cache_destroy(_endio_hook_cache); } |