From 6f10f7d1b02b1bbc305f88d7696445dd38b13881 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:44 +0800 Subject: bcache: style fix to replace 'unsigned' by 'unsigned int' This patch fixes warning reported by checkpatch.pl by replacing 'unsigned' with 'unsigned int'. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 36 +++++++------ drivers/md/bcache/bcache.h | 107 +++++++++++++++++++-------------------- drivers/md/bcache/bset.c | 114 ++++++++++++++++++++++-------------------- drivers/md/bcache/bset.h | 34 ++++++------- drivers/md/bcache/btree.c | 50 +++++++++--------- drivers/md/bcache/btree.h | 4 +- drivers/md/bcache/closure.h | 2 +- drivers/md/bcache/debug.c | 6 +-- drivers/md/bcache/extents.c | 22 ++++---- drivers/md/bcache/io.c | 18 +++---- drivers/md/bcache/journal.c | 20 ++++---- drivers/md/bcache/journal.h | 8 +-- drivers/md/bcache/movinggc.c | 12 ++--- drivers/md/bcache/request.c | 42 ++++++++-------- drivers/md/bcache/request.h | 18 +++---- drivers/md/bcache/stats.c | 12 ++--- drivers/md/bcache/stats.h | 2 +- drivers/md/bcache/super.c | 34 ++++++------- drivers/md/bcache/sysfs.c | 18 +++---- drivers/md/bcache/util.h | 9 ++-- drivers/md/bcache/writeback.c | 19 +++---- drivers/md/bcache/writeback.h | 12 ++--- include/uapi/linux/bcache.h | 6 +-- 23 files changed, 309 insertions(+), 296 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 7fa2631b422c..89f663d22551 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) { struct cache *ca; struct bucket *b; - unsigned next = c->nbuckets * c->sb.bucket_size / 1024; - unsigned i; + unsigned int next = c->nbuckets * c->sb.bucket_size / 1024; + unsigned int i; int r; atomic_sub(sectors, &c->rescale); @@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) #define bucket_prio(b) \ ({ \ - unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ + unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ \ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ }) @@ -301,7 +301,7 @@ do { \ static int bch_allocator_push(struct cache *ca, long bucket) { - unsigned i; + unsigned int i; /* Prios/gens are actually the most important reserve */ if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) @@ -385,7 +385,7 @@ out: /* Allocation */ -long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) +long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) { DEFINE_WAIT(w); struct bucket *b; @@ -421,7 +421,7 @@ out: if (expensive_debug_checks(ca->set)) { size_t iter; long i; - unsigned j; + unsigned int j; for (iter = 0; iter < prio_buckets(ca) * 2; iter++) BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); @@ -470,14 +470,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) void bch_bucket_free(struct cache_set *c, struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) __bch_bucket_free(PTR_CACHE(c, k, i), PTR_BUCKET(c, k, i)); } -int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, struct bkey *k, int n, bool wait) { int i; @@ -510,7 +510,7 @@ err: return -1; } -int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, struct bkey *k, int n, bool wait) { int ret; @@ -524,8 +524,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, struct open_bucket { struct list_head list; - unsigned last_write_point; - unsigned sectors_free; + unsigned int last_write_point; + unsigned int sectors_free; BKEY_PADDED(key); }; @@ -556,7 +556,7 @@ struct open_bucket { */ static struct open_bucket *pick_data_bucket(struct cache_set *c, const struct bkey *search, - unsigned write_point, + unsigned int write_point, struct bkey *alloc) { struct open_bucket *ret, *ret_task = NULL; @@ -595,12 +595,16 @@ found: * * If s->writeback is true, will not fail. */ -bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, - unsigned write_point, unsigned write_prio, bool wait) +bool bch_alloc_sectors(struct cache_set *c, + struct bkey *k, + unsigned int sectors, + unsigned int write_point, + unsigned int write_prio, + bool wait) { struct open_bucket *b; BKEY_PADDED(key) alloc; - unsigned i; + unsigned int i; /* * We might have to allocate a new bucket, which we can't do with a @@ -613,7 +617,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, spin_lock(&c->data_bucket_lock); while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { - unsigned watermark = write_prio + unsigned int watermark = write_prio ? RESERVE_MOVINGGC : RESERVE_NONE; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 05f82ff6f016..1ebd2d9d90d5 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -252,7 +252,7 @@ struct bcache_device { struct kobject kobj; struct cache_set *c; - unsigned id; + unsigned int id; #define BCACHEDEVNAME_SIZE 12 char name[BCACHEDEVNAME_SIZE]; @@ -264,18 +264,18 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned nr_stripes; - unsigned stripe_size; + unsigned int nr_stripes; + unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; struct bio_set bio_split; - unsigned data_csum:1; + unsigned int data_csum:1; int (*cache_miss)(struct btree *, struct search *, - struct bio *, unsigned); - int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); + struct bio *, unsigned int); + int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long); }; struct io { @@ -284,7 +284,7 @@ struct io { struct list_head lru; unsigned long jiffies; - unsigned sequential; + unsigned int sequential; sector_t last; }; @@ -358,18 +358,18 @@ struct cached_dev { struct cache_accounting accounting; /* The rest of this all shows up in sysfs */ - unsigned sequential_cutoff; - unsigned readahead; + unsigned int sequential_cutoff; + unsigned int readahead; - unsigned io_disable:1; - unsigned verify:1; - unsigned bypass_torture_test:1; + unsigned int io_disable:1; + unsigned int verify:1; + unsigned int bypass_torture_test:1; - unsigned partial_stripes_expensive:1; - unsigned writeback_metadata:1; - unsigned writeback_running:1; + unsigned int partial_stripes_expensive:1; + unsigned int writeback_metadata:1; + unsigned int writeback_running:1; unsigned char writeback_percent; - unsigned writeback_delay; + unsigned int writeback_delay; uint64_t writeback_rate_target; int64_t writeback_rate_proportional; @@ -377,16 +377,16 @@ struct cached_dev { int64_t writeback_rate_integral_scaled; int32_t writeback_rate_change; - unsigned writeback_rate_update_seconds; - unsigned writeback_rate_i_term_inverse; - unsigned writeback_rate_p_term_inverse; - unsigned writeback_rate_minimum; + unsigned int writeback_rate_update_seconds; + unsigned int writeback_rate_i_term_inverse; + unsigned int writeback_rate_p_term_inverse; + unsigned int writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 atomic_t io_errors; - unsigned error_limit; - unsigned offline_seconds; + unsigned int error_limit; + unsigned int offline_seconds; char backing_dev_name[BDEVNAME_SIZE]; }; @@ -447,7 +447,7 @@ struct cache { * until a gc finishes - otherwise we could pointlessly burn a ton of * cpu */ - unsigned invalidate_needs_gc; + unsigned int invalidate_needs_gc; bool discard; /* Get rid of? */ @@ -472,7 +472,7 @@ struct gc_stat { size_t nkeys; uint64_t data; /* sectors */ - unsigned in_use; /* percent */ + unsigned int in_use; /* percent */ }; /* @@ -518,7 +518,7 @@ struct cache_set { int caches_loaded; struct bcache_device **devices; - unsigned devices_max_used; + unsigned int devices_max_used; atomic_t attached_dev_nr; struct list_head cached_devs; uint64_t cached_dev_sectors; @@ -548,7 +548,7 @@ struct cache_set { * Default number of pages for a new btree node - may be less than a * full bucket */ - unsigned btree_pages; + unsigned int btree_pages; /* * Lists of struct btrees; lru is the list for structs that have memory @@ -571,7 +571,7 @@ struct cache_set { struct list_head btree_cache_freed; /* Number of elements in btree_cache + btree_cache_freeable lists */ - unsigned btree_cache_used; + unsigned int btree_cache_used; /* * If we need to allocate memory for a new btree node and that @@ -649,7 +649,7 @@ struct cache_set { struct mutex verify_lock; #endif - unsigned nr_uuids; + unsigned int nr_uuids; struct uuid_entry *uuids; BKEY_PADDED(uuid_bucket); struct closure uuid_write; @@ -670,12 +670,12 @@ struct cache_set { struct journal journal; #define CONGESTED_MAX 1024 - unsigned congested_last_us; + unsigned int congested_last_us; atomic_t congested; /* The rest of this all shows up in sysfs */ - unsigned congested_read_threshold_us; - unsigned congested_write_threshold_us; + unsigned int congested_read_threshold_us; + unsigned int congested_write_threshold_us; struct time_stats btree_gc_time; struct time_stats btree_split_time; @@ -694,16 +694,16 @@ struct cache_set { ON_ERROR_PANIC, } on_error; #define DEFAULT_IO_ERROR_LIMIT 8 - unsigned error_limit; - unsigned error_decay; + unsigned int error_limit; + unsigned int error_decay; unsigned short journal_delay_ms; bool expensive_debug_checks; - unsigned verify:1; - unsigned key_merging_disabled:1; - unsigned gc_always_rewrite:1; - unsigned shrinker_disabled:1; - unsigned copy_gc_enabled:1; + unsigned int verify:1; + unsigned int key_merging_disabled:1; + unsigned int gc_always_rewrite:1; + unsigned int shrinker_disabled:1; + unsigned int copy_gc_enabled:1; #define BUCKET_HASH_BITS 12 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; @@ -712,7 +712,7 @@ struct cache_set { }; struct bbio { - unsigned submit_time_us; + unsigned int submit_time_us; union { struct bkey key; uint64_t _pad[3]; @@ -729,10 +729,10 @@ struct bbio { #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) #define btree_blocks(b) \ - ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) + ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) #define btree_default_blocks(c) \ - ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) + ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) #define bucket_bytes(c) ((c)->sb.bucket_size << 9) @@ -761,21 +761,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) static inline struct cache *PTR_CACHE(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return c->cache[PTR_DEV(k, ptr)]; } static inline size_t PTR_BUCKET_NR(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return sector_to_bucket(c, PTR_OFFSET(k, ptr)); } static inline struct bucket *PTR_BUCKET(struct cache_set *c, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); } @@ -787,13 +787,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b) } static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, - unsigned i) + unsigned int i) { return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); } static inline bool ptr_available(struct cache_set *c, const struct bkey *k, - unsigned i) + unsigned int i) { return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); } @@ -888,7 +888,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) static inline void wake_up_allocators(struct cache_set *c) { struct cache *ca; - unsigned i; + unsigned int i; for_each_cache(ca, c, i) wake_up_process(ca->alloc_thread); @@ -933,7 +933,8 @@ void bch_bbio_free(struct bio *, struct cache_set *); struct bio *bch_bbio_alloc(struct cache_set *); void __bch_submit_bbio(struct bio *, struct cache_set *); -void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); +void bch_submit_bbio(struct bio *, struct cache_set *, + struct bkey *, unsigned int); uint8_t bch_inc_gen(struct cache *, struct bucket *); void bch_rescale_priorities(struct cache_set *, int); @@ -944,13 +945,13 @@ void __bch_invalidate_one_bucket(struct cache *, struct bucket *); void __bch_bucket_free(struct cache *, struct bucket *); void bch_bucket_free(struct cache_set *, struct bkey *); -long bch_bucket_alloc(struct cache *, unsigned, bool); -int __bch_bucket_alloc_set(struct cache_set *, unsigned, +long bch_bucket_alloc(struct cache *, unsigned int, bool); +int __bch_bucket_alloc_set(struct cache_set *, unsigned int, struct bkey *, int, bool); -int bch_bucket_alloc_set(struct cache_set *, unsigned, +int bch_bucket_alloc_set(struct cache_set *, unsigned int, struct bkey *, int, bool); -bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, - unsigned, unsigned, bool); +bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int, + unsigned int, unsigned int, bool); bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 596c93b44e9b..dfda7e9efc3e 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -18,7 +18,7 @@ #ifdef CONFIG_BCACHE_DEBUG -void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) +void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) { struct bkey *k, *next; @@ -26,7 +26,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) next = bkey_next(k); printk(KERN_ERR "block %u key %u/%u: ", set, - (unsigned) ((u64 *) k - i->d), i->keys); + (unsigned int) ((u64 *) k - i->d), i->keys); if (b->ops->key_dump) b->ops->key_dump(b, k); @@ -42,7 +42,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) void bch_dump_bucket(struct btree_keys *b) { - unsigned i; + unsigned int i; console_lock(); for (i = 0; i <= b->nsets; i++) @@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b) int __bch_count_data(struct btree_keys *b) { - unsigned ret = 0; + unsigned int ret = 0; struct btree_iter iter; struct bkey *k; @@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} /* Keylists */ -int __bch_keylist_realloc(struct keylist *l, unsigned u64s) +int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) { size_t oldsize = bch_keylist_nkeys(l); size_t newsize = oldsize + u64s; @@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l) /* Key/pointer manipulation */ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, - unsigned i) + unsigned int i) { BUG_ON(i > KEY_PTRS(src)); @@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, bool __bch_cut_front(const struct bkey *where, struct bkey *k) { - unsigned i, len = 0; + unsigned int i, len = 0; if (bkey_cmp(where, &START_KEY(k)) <= 0) return false; @@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k) bool __bch_cut_back(const struct bkey *where, struct bkey *k) { - unsigned len = 0; + unsigned int len = 0; if (bkey_cmp(where, k) >= 0) return false; @@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k) #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) struct bkey_float { - unsigned exponent:BKEY_EXPONENT_BITS; - unsigned m:BKEY_MID_BITS; - unsigned mantissa:BKEY_MANTISSA_BITS; + unsigned int exponent:BKEY_EXPONENT_BITS; + unsigned int m:BKEY_MID_BITS; + unsigned int mantissa:BKEY_MANTISSA_BITS; } __packed; /* @@ -311,7 +311,7 @@ void bch_btree_keys_free(struct btree_keys *b) } EXPORT_SYMBOL(bch_btree_keys_free); -int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) +int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, gfp_t gfp) { struct bset_tree *t = b->set; @@ -345,7 +345,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc); void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, bool *expensive_debug_checks) { - unsigned i; + unsigned int i; b->ops = ops; b->expensive_debug_checks = expensive_debug_checks; @@ -370,7 +370,7 @@ EXPORT_SYMBOL(bch_btree_keys_init); * return array index next to j when does in-order traverse * of a binary tree which is stored in a linear array */ -static unsigned inorder_next(unsigned j, unsigned size) +static unsigned int inorder_next(unsigned int j, unsigned int size) { if (j * 2 + 1 < size) { j = j * 2 + 1; @@ -387,7 +387,7 @@ static unsigned inorder_next(unsigned j, unsigned size) * return array index previous to j when does in-order traverse * of a binary tree which is stored in a linear array */ -static unsigned inorder_prev(unsigned j, unsigned size) +static unsigned int inorder_prev(unsigned int j, unsigned int size) { if (j * 2 < size) { j = j * 2; @@ -413,10 +413,12 @@ static unsigned inorder_prev(unsigned j, unsigned size) * extra is a function of size: * extra = (size - rounddown_pow_of_two(size - 1)) << 1; */ -static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) +static unsigned int __to_inorder(unsigned int j, + unsigned int size, + unsigned int extra) { - unsigned b = fls(j); - unsigned shift = fls(size - 1) - b; + unsigned int b = fls(j); + unsigned int shift = fls(size - 1) - b; j ^= 1U << (b - 1); j <<= 1; @@ -433,14 +435,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) * Return the cacheline index in bset_tree->data, where j is index * from a linear array which stores the auxiliar binary tree */ -static unsigned to_inorder(unsigned j, struct bset_tree *t) +static unsigned int to_inorder(unsigned int j, struct bset_tree *t) { return __to_inorder(j, t->size, t->extra); } -static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) +static unsigned int __inorder_to_tree(unsigned int j, + unsigned int size, + unsigned int extra) { - unsigned shift; + unsigned int shift; if (j > extra) j += j - extra; @@ -457,7 +461,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) * Return an index from a linear array which stores the auxiliar binary * tree, j is the cacheline index of t->data. */ -static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) +static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t) { return __inorder_to_tree(j, t->size, t->extra); } @@ -468,11 +472,11 @@ void inorder_test(void) unsigned long done = 0; ktime_t start = ktime_get(); - for (unsigned size = 2; + for (unsigned int size = 2; size < 65536000; size++) { - unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; - unsigned i = 1, j = rounddown_pow_of_two(size - 1); + unsigned int extra = (size - rounddown_pow_of_two(size - 1)) << 1; + unsigned int i = 1, j = rounddown_pow_of_two(size - 1); if (!(size % 4096)) printk(KERN_NOTICE "loop %u, %llu per us\n", size, @@ -518,30 +522,31 @@ void inorder_test(void) * of the previous key so we can walk backwards to it from t->tree[j]'s key. */ -static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, - unsigned offset) +static struct bkey *cacheline_to_bkey(struct bset_tree *t, + unsigned int cacheline, + unsigned int offset) { return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; } -static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) +static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) { return ((void *) k - (void *) t->data) / BSET_CACHELINE; } -static unsigned bkey_to_cacheline_offset(struct bset_tree *t, - unsigned cacheline, +static unsigned int bkey_to_cacheline_offset(struct bset_tree *t, + unsigned int cacheline, struct bkey *k) { return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); } -static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) +static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j) { return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); } -static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) +static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j) { return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); } @@ -550,7 +555,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) * For the write set - the one we're currently inserting keys into - we don't * maintain a full search tree, we just keep a simple lookup table in t->prev. */ -static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) +static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline) { return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); } @@ -576,14 +581,14 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) * See make_bfloat() to check when most significant bit of f->exponent * is set or not. */ -static inline unsigned bfloat_mantissa(const struct bkey *k, +static inline unsigned int bfloat_mantissa(const struct bkey *k, struct bkey_float *f) { const uint64_t *p = &k->low - (f->exponent >> 6); return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; } -static void make_bfloat(struct bset_tree *t, unsigned j) +static void make_bfloat(struct bset_tree *t, unsigned int j) { struct bkey_float *f = &t->tree[j]; struct bkey *m = tree_to_bkey(t, j); @@ -631,7 +636,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j) static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) { if (t != b->set) { - unsigned j = roundup(t[-1].size, + unsigned int j = roundup(t[-1].size, 64 / sizeof(struct bkey_float)); t->tree = t[-1].tree + j; @@ -686,13 +691,13 @@ void bch_bset_build_written_tree(struct btree_keys *b) { struct bset_tree *t = bset_tree_last(b); struct bkey *prev = NULL, *k = t->data->start; - unsigned j, cacheline = 1; + unsigned int j, cacheline = 1; b->last_set_unwritten = 0; bset_alloc_tree(b, t); - t->size = min_t(unsigned, + t->size = min_t(unsigned int, bkey_to_cacheline(t, bset_bkey_last(t->data)), b->set->tree + btree_keys_cachelines(b) - t->tree); @@ -732,7 +737,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree); void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) { struct bset_tree *t; - unsigned inorder, j = 1; + unsigned int inorder, j = 1; for (t = b->set; t <= bset_tree_last(b); t++) if (k < bset_bkey_last(t->data)) @@ -779,8 +784,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, struct bset_tree *t, struct bkey *k) { - unsigned shift = bkey_u64s(k); - unsigned j = bkey_to_cacheline(t, k); + unsigned int shift = bkey_u64s(k); + unsigned int j = bkey_to_cacheline(t, k); /* We're getting called from btree_split() or btree_gc, just bail out */ if (!t->size) @@ -867,10 +872,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where, } EXPORT_SYMBOL(bch_bset_insert); -unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, +unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, struct bkey *replace_key) { - unsigned status = BTREE_INSERT_STATUS_NO_INSERT; + unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; struct btree_iter iter; @@ -922,10 +927,10 @@ struct bset_search_iter { static struct bset_search_iter bset_search_write_set(struct bset_tree *t, const struct bkey *search) { - unsigned li = 0, ri = t->size; + unsigned int li = 0, ri = t->size; while (li + 1 != ri) { - unsigned m = (li + ri) >> 1; + unsigned int m = (li + ri) >> 1; if (bkey_cmp(table_to_bkey(t, m), search) > 0) ri = m; @@ -944,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, { struct bkey *l, *r; struct bkey_float *f; - unsigned inorder, j, n = 1; + unsigned int inorder, j, n = 1; do { /* @@ -958,7 +963,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, * p = 0; * but a branch instruction is avoided. */ - unsigned p = n << 4; + unsigned int p = n << 4; p &= ((int) (p - t->size)) >> 31; prefetch(&t->tree[p]); @@ -978,7 +983,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, * to work - that's done in make_bfloat() */ if (likely(f->exponent != 127)) - n = j * 2 + (((unsigned) + n = j * 2 + (((unsigned int) (f->mantissa - bfloat_mantissa(search, f))) >> 31); else @@ -1184,7 +1189,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state) mempool_exit(&state->pool); } -int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) +int bch_bset_sort_state_init(struct bset_sort_state *state, + unsigned int page_order) { spin_lock_init(&state->time.lock); @@ -1237,7 +1243,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out, } static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, - unsigned start, unsigned order, bool fixup, + unsigned int start, unsigned int order, bool fixup, struct bset_sort_state *state) { uint64_t start_time; @@ -1288,7 +1294,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, bch_time_stats_update(&state->time, start_time); } -void bch_btree_sort_partial(struct btree_keys *b, unsigned start, +void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, struct bset_sort_state *state) { size_t order = b->page_order, keys = 0; @@ -1298,7 +1304,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start, __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); if (start) { - unsigned i; + unsigned int i; for (i = start; i <= b->nsets; i++) keys += b->set[i].data->keys; @@ -1338,7 +1344,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) { - unsigned crit = SORT_CRIT; + unsigned int crit = SORT_CRIT; int i; /* Don't sort if nothing to do */ @@ -1367,7 +1373,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy); void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) { - unsigned i; + unsigned int i; for (i = 0; i <= b->nsets; i++) { struct bset_tree *t = &b->set[i]; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index b867f2200495..fdc296103113 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -163,10 +163,10 @@ struct bset_tree { */ /* size of the binary tree and prev array */ - unsigned size; + unsigned int size; /* function of size - precalculated for to_inorder() */ - unsigned extra; + unsigned int extra; /* copy of the last key in the set */ struct bkey end; @@ -211,7 +211,7 @@ struct btree_keys { const struct btree_keys_ops *ops; uint8_t page_order; uint8_t nsets; - unsigned last_set_unwritten:1; + unsigned int last_set_unwritten:1; bool *expensive_debug_checks; /* @@ -239,12 +239,12 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k) return !b->last_set_unwritten || k < b->set[b->nsets].data->start; } -static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i) +static inline unsigned int bset_byte_offset(struct btree_keys *b, struct bset *i) { return ((size_t) i) - ((size_t) b->set->data); } -static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i) +static inline unsigned int bset_sector_offset(struct btree_keys *b, struct bset *i) { return bset_byte_offset(b, i) >> 9; } @@ -273,7 +273,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) } static inline struct bset *bset_next_set(struct btree_keys *b, - unsigned block_bytes) + unsigned int block_bytes) { struct bset *i = bset_tree_last(b)->data; @@ -281,7 +281,7 @@ static inline struct bset *bset_next_set(struct btree_keys *b, } void bch_btree_keys_free(struct btree_keys *); -int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); +int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t); void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, bool *); @@ -290,7 +290,7 @@ void bch_bset_build_written_tree(struct btree_keys *); void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); -unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *, +unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *, struct bkey *); enum { @@ -349,20 +349,20 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, struct bset_sort_state { mempool_t pool; - unsigned page_order; - unsigned crit_factor; + unsigned int page_order; + unsigned int crit_factor; struct time_stats time; }; void bch_bset_sort_state_free(struct bset_sort_state *); -int bch_bset_sort_state_init(struct bset_sort_state *, unsigned); +int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int); void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, struct bset_sort_state *); void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, struct bset_sort_state *); -void bch_btree_sort_partial(struct btree_keys *, unsigned, +void bch_btree_sort_partial(struct btree_keys *, unsigned int, struct bset_sort_state *); static inline void bch_btree_sort(struct btree_keys *b, @@ -383,7 +383,7 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) -static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) +static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx) { return bkey_idx(i->start, idx); } @@ -402,7 +402,7 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, } void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, - unsigned); + unsigned int); bool __bch_cut_front(const struct bkey *, struct bkey *); bool __bch_cut_back(const struct bkey *, struct bkey *); @@ -524,7 +524,7 @@ static inline size_t bch_keylist_bytes(struct keylist *l) struct bkey *bch_keylist_pop(struct keylist *); void bch_keylist_pop_front(struct keylist *); -int __bch_keylist_realloc(struct keylist *, unsigned); +int __bch_keylist_realloc(struct keylist *, unsigned int); /* Debug stuff */ @@ -532,7 +532,7 @@ int __bch_keylist_realloc(struct keylist *, unsigned); int __bch_count_data(struct btree_keys *); void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); -void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); +void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); void bch_dump_bucket(struct btree_keys *); #else @@ -541,7 +541,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; } static inline void __printf(2, 3) __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} static inline void bch_dump_bucket(struct btree_keys *b) {} -void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); +void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); #endif diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index c19f7716df88..96c39a8db895 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b) void bkey_put(struct cache_set *c, struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) @@ -479,7 +479,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) void bch_btree_node_write(struct btree *b, struct closure *parent) { - unsigned nsets = b->keys.nsets; + unsigned int nsets = b->keys.nsets; lockdep_assert_held(&b->lock); @@ -581,7 +581,7 @@ static void mca_bucket_free(struct btree *b) list_move(&b->list, &b->c->btree_cache_freeable); } -static unsigned btree_order(struct bkey *k) +static unsigned int btree_order(struct bkey *k) { return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); } @@ -589,7 +589,7 @@ static unsigned btree_order(struct bkey *k) static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) { if (!bch_btree_keys_alloc(&b->keys, - max_t(unsigned, + max_t(unsigned int, ilog2(b->c->btree_pages), btree_order(k)), gfp)) { @@ -620,7 +620,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, return b; } -static int mca_reap(struct btree *b, unsigned min_order, bool flush) +static int mca_reap(struct btree *b, unsigned int min_order, bool flush) { struct closure cl; @@ -786,7 +786,7 @@ void bch_btree_cache_free(struct cache_set *c) int bch_btree_cache_alloc(struct cache_set *c) { - unsigned i; + unsigned int i; for (i = 0; i < mca_reserve(c); i++) if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) @@ -1136,7 +1136,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, static void make_btree_freeing_key(struct btree *b, struct bkey *k) { - unsigned i; + unsigned int i; mutex_lock(&b->c->bucket_lock); @@ -1157,7 +1157,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op) { struct cache_set *c = b->c; struct cache *ca; - unsigned i, reserve = (c->root->level - b->level) * 2 + 1; + unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; mutex_lock(&c->bucket_lock); @@ -1181,7 +1181,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) { uint8_t stale = 0; - unsigned i; + unsigned int i; struct bucket *g; /* @@ -1219,7 +1219,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, SET_GC_MARK(g, GC_MARK_RECLAIMABLE); /* guard against overflow */ - SET_GC_SECTORS_USED(g, min_t(unsigned, + SET_GC_SECTORS_USED(g, min_t(unsigned int, GC_SECTORS_USED(g) + KEY_SIZE(k), MAX_GC_SECTORS_USED)); @@ -1233,7 +1233,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i) && @@ -1259,7 +1259,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) { uint8_t stale = 0; - unsigned keys = 0, good_keys = 0; + unsigned int keys = 0, good_keys = 0; struct bkey *k; struct btree_iter iter; struct bset_tree *t; @@ -1302,7 +1302,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) struct gc_merge_info { struct btree *b; - unsigned keys; + unsigned int keys; }; static int bch_btree_insert_node(struct btree *, struct btree_op *, @@ -1311,7 +1311,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *, static int btree_gc_coalesce(struct btree *b, struct btree_op *op, struct gc_stat *gc, struct gc_merge_info *r) { - unsigned i, nodes = 0, keys = 0, blocks; + unsigned int i, nodes = 0, keys = 0, blocks; struct btree *new_nodes[GC_MERGE_NODES]; struct keylist keylist; struct closure cl; @@ -1511,11 +1511,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, return -EINTR; } -static unsigned btree_gc_count_keys(struct btree *b) +static unsigned int btree_gc_count_keys(struct btree *b) { struct bkey *k; struct btree_iter iter; - unsigned ret = 0; + unsigned int ret = 0; for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) ret += bkey_u64s(k); @@ -1678,7 +1678,7 @@ static void btree_gc_start(struct cache_set *c) { struct cache *ca; struct bucket *b; - unsigned i; + unsigned int i; if (!c->gc_mark_valid) return; @@ -1704,7 +1704,7 @@ static void bch_btree_gc_finish(struct cache_set *c) { struct bucket *b; struct cache *ca; - unsigned i; + unsigned int i; mutex_lock(&c->bucket_lock); @@ -1722,7 +1722,7 @@ static void bch_btree_gc_finish(struct cache_set *c) struct bcache_device *d = c->devices[i]; struct cached_dev *dc; struct keybuf_key *w, *n; - unsigned j; + unsigned int j; if (!d || UUID_FLASH_ONLY(&c->uuids[i])) continue; @@ -1814,7 +1814,7 @@ static void bch_btree_gc(struct cache_set *c) static bool gc_should_run(struct cache_set *c) { struct cache *ca; - unsigned i; + unsigned int i; for_each_cache(ca, c, i) if (ca->invalidate_needs_gc) @@ -1905,7 +1905,7 @@ void bch_initial_gc_finish(struct cache_set *c) { struct cache *ca; struct bucket *b; - unsigned i; + unsigned int i; bch_btree_gc_finish(c); @@ -1945,7 +1945,7 @@ void bch_initial_gc_finish(struct cache_set *c) static bool btree_insert_key(struct btree *b, struct bkey *k, struct bkey *replace_key) { - unsigned status; + unsigned int status; BUG_ON(bkey_cmp(k, &b->key) > 0); @@ -2044,7 +2044,7 @@ static int btree_split(struct btree *b, struct btree_op *op, block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; if (split) { - unsigned keys = 0; + unsigned int keys = 0; trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); @@ -2300,7 +2300,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys, void bch_btree_set_root(struct btree *b) { - unsigned i; + unsigned int i; struct closure cl; closure_init_stack(&cl); @@ -2412,7 +2412,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, struct refill { struct btree_op op; - unsigned nr_found; + unsigned int nr_found; struct keybuf *buf; struct bkey *end; keybuf_pred_fn *pred; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 68e9d926134d..8cbc0fd27738 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b) return bset_tree_last(&b->keys)->data; } -static inline unsigned bset_block_offset(struct btree *b, struct bset *i) +static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) { return bset_sector_offset(&b->keys, i) >> b->c->block_bits; } @@ -213,7 +213,7 @@ struct btree_op { /* Btree level at which we start taking write locks */ short lock; - unsigned insert_collision:1; + unsigned int insert_collision:1; }; static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 7c2c5bc7c88b..7f3594c0be14 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -159,7 +159,7 @@ struct closure { #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e - unsigned magic; + unsigned int magic; struct list_head all; unsigned long ip; unsigned long waiting_on; diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 12034c07257b..0caad145902b 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -69,7 +69,7 @@ void bch_btree_verify(struct btree *b) sorted->start, (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { struct bset *i; - unsigned j; + unsigned int j; console_lock(); @@ -80,7 +80,7 @@ void bch_btree_verify(struct btree *b) bch_dump_bset(&v->keys, sorted, 0); for_each_written_bset(b, ondisk, i) { - unsigned block = ((void *) i - (void *) ondisk) / + unsigned int block = ((void *) i - (void *) ondisk) / block_bytes(b->c); printk(KERN_ERR "*** on disk block %u:\n", block); @@ -176,7 +176,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, while (size) { struct keybuf_key *w; - unsigned bytes = min(i->bytes, size); + unsigned int bytes = min(i->bytes, size); int err = copy_to_user(buf, i->buf, bytes); if (err) diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 1d096742eb41..e96ba928eeb6 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l, static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { @@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) { - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { @@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) { - unsigned i = 0; + unsigned int i = 0; char *out = buf, *end = buf + size; #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) @@ -126,7 +126,7 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) { struct btree *b = container_of(keys, struct btree, keys); - unsigned j; + unsigned int j; char buf[80]; bch_extent_to_text(buf, sizeof(buf), k); @@ -171,7 +171,7 @@ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) { - unsigned i; + unsigned int i; char buf[80]; struct bucket *g; @@ -204,7 +204,7 @@ err: static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); - unsigned i; + unsigned int i; if (!bkey_cmp(k, &ZERO_KEY) || !KEY_PTRS(k) || @@ -327,7 +327,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, struct cache_set *c = container_of(b, struct btree, keys)->c; uint64_t old_offset; - unsigned old_size, sectors_found = 0; + unsigned int old_size, sectors_found = 0; BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_SIZE(insert)); @@ -363,7 +363,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, * k might have been split since we inserted/found the * key we're replacing */ - unsigned i; + unsigned int i; uint64_t offset = KEY_START(k) - KEY_START(replace_key); @@ -502,7 +502,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) } static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, - unsigned ptr) + unsigned int ptr) { struct bucket *g = PTR_BUCKET(b->c, k, ptr); char buf[80]; @@ -534,7 +534,7 @@ err: static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); - unsigned i, stale; + unsigned int i, stale; if (!KEY_PTRS(k) || bch_extent_invalid(bk, k)) @@ -577,7 +577,7 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r) static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) { struct btree *b = container_of(bk, struct btree, keys); - unsigned i; + unsigned int i; if (key_merging_disabled(b->c)) return false; diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9612873afee2..c6b41a09f550 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -42,7 +42,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) } void bch_submit_bbio(struct bio *bio, struct cache_set *c, - struct bkey *k, unsigned ptr) + struct bkey *k, unsigned int ptr) { struct bbio *b = container_of(bio, struct bbio, bio); bch_bkey_copy_single_ptr(&b->key, k, ptr); @@ -52,7 +52,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, /* IO errors */ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) { - unsigned errors; + unsigned int errors; WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); @@ -75,12 +75,12 @@ void bch_count_io_errors(struct cache *ca, */ if (ca->set->error_decay) { - unsigned count = atomic_inc_return(&ca->io_count); + unsigned int count = atomic_inc_return(&ca->io_count); while (count > ca->set->error_decay) { - unsigned errors; - unsigned old = count; - unsigned new = count - ca->set->error_decay; + unsigned int errors; + unsigned int old = count; + unsigned int new = count - ca->set->error_decay; /* * First we subtract refresh from count; each time we @@ -104,7 +104,7 @@ void bch_count_io_errors(struct cache *ca, } if (error) { - unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, + unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT, &ca->io_errors); errors >>= IO_ERROR_SHIFT; @@ -126,12 +126,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, struct cache *ca = PTR_CACHE(c, &b->key, 0); int is_read = (bio_data_dir(bio) == READ ? 1 : 0); - unsigned threshold = op_is_write(bio_op(bio)) + unsigned int threshold = op_is_write(bio_op(bio)) ? c->congested_write_threshold_us : c->congested_read_threshold_us; if (threshold) { - unsigned t = local_clock_us(); + unsigned int t = local_clock_us(); int us = t - b->submit_time_us; int congested = atomic_read(&c->congested); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 10748c626a1d..ee61062b58fc 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -32,7 +32,7 @@ static void journal_read_endio(struct bio *bio) } static int journal_read_bucket(struct cache *ca, struct list_head *list, - unsigned bucket_index) + unsigned int bucket_index) { struct journal_device *ja = &ca->journal; struct bio *bio = &ja->bio; @@ -40,7 +40,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, struct journal_replay *i; struct jset *j, *data = ca->set->journal.w[0].data; struct closure cl; - unsigned len, left, offset = 0; + unsigned int len, left, offset = 0; int ret = 0; sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); @@ -50,7 +50,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, while (offset < ca->sb.bucket_size) { reread: left = ca->sb.bucket_size - offset; - len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS); + len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS); bio_reset(bio); bio->bi_iter.bi_sector = bucket + offset; @@ -154,12 +154,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) }) struct cache *ca; - unsigned iter; + unsigned int iter; for_each_cache(ca, c, iter) { struct journal_device *ja = &ca->journal; DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); - unsigned i, l, r, m; + unsigned int i, l, r, m; uint64_t seq; bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); @@ -304,7 +304,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) k < bset_bkey_last(&i->j); k = bkey_next(k)) if (!__bch_extent_invalid(c, k)) { - unsigned j; + unsigned int j; for (j = 0; j < KEY_PTRS(k); j++) if (ptr_available(c, k, j)) @@ -492,7 +492,7 @@ static void journal_reclaim(struct cache_set *c) struct bkey *k = &c->journal.key; struct cache *ca; uint64_t last_seq; - unsigned iter, n = 0; + unsigned int iter, n = 0; atomic_t p __maybe_unused; atomic_long_inc(&c->reclaim); @@ -526,7 +526,7 @@ static void journal_reclaim(struct cache_set *c) for_each_cache(ca, c, iter) { struct journal_device *ja = &ca->journal; - unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; + unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; /* No space available on this device */ if (next == ja->discard_idx) @@ -609,7 +609,7 @@ static void journal_write_unlocked(struct closure *cl) struct cache *ca; struct journal_write *w = c->journal.cur; struct bkey *k = &c->journal.key; - unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * + unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * c->sb.block_size; struct bio *bio; @@ -705,7 +705,7 @@ static void journal_try_write(struct cache_set *c) } static struct journal_write *journal_wait_for_write(struct cache_set *c, - unsigned nkeys) + unsigned int nkeys) __acquires(&c->journal.lock) { size_t sectors; diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index b5788199188f..f0982731ae20 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -110,7 +110,7 @@ struct journal { struct delayed_work work; /* Number of blocks free in the bucket(s) we're currently writing to */ - unsigned blocks_free; + unsigned int blocks_free; uint64_t seq; DECLARE_FIFO(atomic_t, pin); @@ -131,13 +131,13 @@ struct journal_device { uint64_t seq[SB_JOURNAL_BUCKETS]; /* Journal bucket we're currently writing to */ - unsigned cur_idx; + unsigned int cur_idx; /* Last journal bucket that still contains an open journal entry */ - unsigned last_idx; + unsigned int last_idx; /* Next journal bucket to be discarded */ - unsigned discard_idx; + unsigned int discard_idx; #define DISCARD_READY 0 #define DISCARD_IN_FLIGHT 1 diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index a24c3a95b2c0..0790d710f911 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) { struct cache_set *c = container_of(buf, struct cache_set, moving_gc_keys); - unsigned i; + unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i) && @@ -186,7 +186,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); } -static unsigned bucket_heap_top(struct cache *ca) +static unsigned int bucket_heap_top(struct cache *ca) { struct bucket *b; return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; @@ -196,7 +196,7 @@ void bch_moving_gc(struct cache_set *c) { struct cache *ca; struct bucket *b; - unsigned i; + unsigned int i; if (!c->copy_gc_enabled) return; @@ -204,9 +204,9 @@ void bch_moving_gc(struct cache_set *c) mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) { - unsigned sectors_to_move = 0; - unsigned reserve_sectors = ca->sb.bucket_size * - fifo_used(&ca->free[RESERVE_MOVINGGC]); + unsigned int sectors_to_move = 0; + unsigned int reserve_sectors = ca->sb.bucket_size * + fifo_used(&ca->free[RESERVE_MOVINGGC]); ca->heap.used = 0; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7dbe8b6316a0..6e1a60dd1742 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -27,7 +27,7 @@ struct kmem_cache *bch_search_cache; static void bch_data_insert_start(struct closure *); -static unsigned cache_mode(struct cached_dev *dc) +static unsigned int cache_mode(struct cached_dev *dc) { return BDEV_CACHE_MODE(&dc->sb); } @@ -98,7 +98,7 @@ static void bch_data_insert_keys(struct closure *cl) closure_return(cl); } -static int bch_keylist_realloc(struct keylist *l, unsigned u64s, +static int bch_keylist_realloc(struct keylist *l, unsigned int u64s, struct cache_set *c) { size_t oldsize = bch_keylist_nkeys(l); @@ -125,7 +125,7 @@ static void bch_data_invalidate(struct closure *cl) bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); while (bio_sectors(bio)) { - unsigned sectors = min(bio_sectors(bio), + unsigned int sectors = min(bio_sectors(bio), 1U << (KEY_SIZE_BITS - 1)); if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) @@ -211,7 +211,7 @@ static void bch_data_insert_start(struct closure *cl) bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); do { - unsigned i; + unsigned int i; struct bkey *k; struct bio_set *split = &op->c->bio_split; @@ -328,7 +328,7 @@ void bch_data_insert(struct closure *cl) /* Congested? */ -unsigned bch_get_congested(struct cache_set *c) +unsigned int bch_get_congested(struct cache_set *c) { int i; long rand; @@ -372,8 +372,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) { struct cache_set *c = dc->disk.c; - unsigned mode = cache_mode(dc); - unsigned sectors, congested = bch_get_congested(c); + unsigned int mode = cache_mode(dc); + unsigned int sectors, congested = bch_get_congested(c); struct task_struct *task = current; struct io *i; @@ -469,11 +469,11 @@ struct search { struct bio *cache_miss; struct bcache_device *d; - unsigned insert_bio_sectors; - unsigned recoverable:1; - unsigned write:1; - unsigned read_dirty_data:1; - unsigned cache_missed:1; + unsigned int insert_bio_sectors; + unsigned int recoverable:1; + unsigned int write:1; + unsigned int read_dirty_data:1; + unsigned int cache_missed:1; unsigned long start_time; @@ -514,15 +514,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) struct search *s = container_of(op, struct search, op); struct bio *n, *bio = &s->bio.bio; struct bkey *bio_key; - unsigned ptr; + unsigned int ptr; if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) return MAP_CONTINUE; if (KEY_INODE(k) != s->iop.inode || KEY_START(k) > bio->bi_iter.bi_sector) { - unsigned bio_sectors = bio_sectors(bio); - unsigned sectors = KEY_INODE(k) == s->iop.inode + unsigned int bio_sectors = bio_sectors(bio); + unsigned int sectors = KEY_INODE(k) == s->iop.inode ? min_t(uint64_t, INT_MAX, KEY_START(k) - bio->bi_iter.bi_sector) : INT_MAX; @@ -856,10 +856,10 @@ static void cached_dev_read_done_bh(struct closure *cl) } static int cached_dev_cache_miss(struct btree *b, struct search *s, - struct bio *bio, unsigned sectors) + struct bio *bio, unsigned int sectors) { int ret = MAP_CONTINUE; - unsigned reada = 0; + unsigned int reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; @@ -1226,7 +1226,7 @@ static int cached_dev_congested(void *data, int bits) return 1; if (cached_dev_get(dc)) { - unsigned i; + unsigned int i; struct cache *ca; for_each_cache(ca, d->c, i) { @@ -1253,9 +1253,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc) /* Flash backed devices */ static int flash_dev_cache_miss(struct btree *b, struct search *s, - struct bio *bio, unsigned sectors) + struct bio *bio, unsigned int sectors) { - unsigned bytes = min(sectors, bio_sectors(bio)) << 9; + unsigned int bytes = min(sectors, bio_sectors(bio)) << 9; swap(bio->bi_iter.bi_size, bytes); zero_fill_bio(bio); @@ -1338,7 +1338,7 @@ static int flash_dev_congested(void *data, int bits) struct bcache_device *d = data; struct request_queue *q; struct cache *ca; - unsigned i; + unsigned int i; int ret = 0; for_each_cache(ca, d->c, i) { diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index dea0886b81c1..8e8c1ce00d9d 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -8,7 +8,7 @@ struct data_insert_op { struct bio *bio; struct workqueue_struct *wq; - unsigned inode; + unsigned int inode; uint16_t write_point; uint16_t write_prio; blk_status_t status; @@ -17,15 +17,15 @@ struct data_insert_op { uint16_t flags; struct { - unsigned bypass:1; - unsigned writeback:1; - unsigned flush_journal:1; - unsigned csum:1; + unsigned int bypass:1; + unsigned int writeback:1; + unsigned int flush_journal:1; + unsigned int csum:1; - unsigned replace:1; - unsigned replace_collision:1; + unsigned int replace:1; + unsigned int replace_collision:1; - unsigned insert_data_done:1; + unsigned int insert_data_done:1; }; }; @@ -33,7 +33,7 @@ struct data_insert_op { BKEY_PADDED(replace_key); }; -unsigned bch_get_congested(struct cache_set *); +unsigned int bch_get_congested(struct cache_set *); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index be119326297b..2331a0d5aa28 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -33,11 +33,11 @@ * stored left shifted by 16, and scaled back in the sysfs show() function. */ -static const unsigned DAY_RESCALE = 288; -static const unsigned HOUR_RESCALE = 12; -static const unsigned FIVE_MINUTE_RESCALE = 1; -static const unsigned accounting_delay = (HZ * 300) / 22; -static const unsigned accounting_weight = 32; +static const unsigned int DAY_RESCALE = 288; +static const unsigned int HOUR_RESCALE = 12; +static const unsigned int FIVE_MINUTE_RESCALE = 1; +static const unsigned int accounting_delay = (HZ * 300) / 22; +static const unsigned int accounting_weight = 32; /* sysfs reading/writing */ @@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t) struct cache_accounting *acc = from_timer(acc, t, timer); #define move_stat(name) do { \ - unsigned t = atomic_xchg(&acc->collector.name, 0); \ + unsigned int t = atomic_xchg(&acc->collector.name, 0); \ t <<= 16; \ acc->five_minute.name += t; \ acc->hour.name += t; \ diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index 0b70f9de0c03..77234a89dd69 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -23,7 +23,7 @@ struct cache_stats { unsigned long cache_miss_collisions; unsigned long sectors_bypassed; - unsigned rescale; + unsigned int rescale; }; struct cache_accounting { diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 55a37641aa95..4ab1b1968d9a 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -61,7 +61,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, const char *err; struct cache_sb *s; struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); - unsigned i; + unsigned int i; if (!bh) return "IO error"; @@ -202,7 +202,7 @@ static void write_bdev_super_endio(struct bio *bio) static void __write_super(struct cache_sb *sb, struct bio *bio) { struct cache_sb *out = page_address(bio_first_page_all(bio)); - unsigned i; + unsigned int i; bio->bi_iter.bi_sector = SB_SECTOR; bio->bi_iter.bi_size = SB_SIZE; @@ -282,7 +282,7 @@ void bcache_write_super(struct cache_set *c) { struct closure *cl = &c->sb_write; struct cache *ca; - unsigned i; + unsigned int i; down(&c->sb_write_mutex); closure_init(cl, &c->cl); @@ -334,7 +334,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, { struct closure *cl = &c->uuid_write; struct uuid_entry *u; - unsigned i; + unsigned int i; char buf[80]; BUG_ON(!parent); @@ -587,7 +587,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) struct prio_set *p = ca->disk_buckets; struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; struct bucket *b; - unsigned bucket_nr = 0; + unsigned int bucket_nr = 0; for (b = ca->buckets; b < ca->buckets + ca->sb.nbuckets; @@ -662,7 +662,7 @@ static void bcache_device_unlink(struct bcache_device *d) lockdep_assert_held(&bch_register_lock); if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { - unsigned i; + unsigned int i; struct cache *ca; sysfs_remove_link(&d->c->kobj, d->name); @@ -676,7 +676,7 @@ static void bcache_device_unlink(struct bcache_device *d) static void bcache_device_link(struct bcache_device *d, struct cache_set *c, const char *name) { - unsigned i; + unsigned int i; struct cache *ca; for_each_cache(ca, d->c, i) @@ -715,7 +715,7 @@ static void bcache_device_detach(struct bcache_device *d) } static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, - unsigned id) + unsigned int id) { d->id = id; d->c = c; @@ -762,7 +762,7 @@ static void bcache_device_free(struct bcache_device *d) closure_debug_destroy(&d->cl); } -static int bcache_device_init(struct bcache_device *d, unsigned block_size, +static int bcache_device_init(struct bcache_device *d, unsigned int block_size, sector_t sectors) { struct request_queue *q; @@ -778,7 +778,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, if (!d->nr_stripes || d->nr_stripes > max_stripes) { pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", - (unsigned)d->nr_stripes); + (unsigned int)d->nr_stripes); return -ENOMEM; } @@ -1212,7 +1212,7 @@ static void cached_dev_flush(struct closure *cl) continue_at(cl, cached_dev_free, system_wq); } -static int cached_dev_init(struct cached_dev *dc, unsigned block_size) +static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) { int ret; struct io *io; @@ -1489,7 +1489,7 @@ static void cache_set_free(struct closure *cl) { struct cache_set *c = container_of(cl, struct cache_set, cl); struct cache *ca; - unsigned i; + unsigned int i; if (!IS_ERR_OR_NULL(c->debug)) debugfs_remove(c->debug); @@ -1532,7 +1532,7 @@ static void cache_set_flush(struct closure *cl) struct cache_set *c = container_of(cl, struct cache_set, caching); struct cache *ca; struct btree *b; - unsigned i; + unsigned int i; bch_cache_accounting_destroy(&c->accounting); @@ -1762,7 +1762,7 @@ static void run_cache_set(struct cache_set *c) struct cached_dev *dc, *t; struct cache *ca; struct closure cl; - unsigned i; + unsigned int i; closure_init_stack(&cl); @@ -1853,7 +1853,7 @@ static void run_cache_set(struct cache_set *c) pr_notice("invalidating existing data"); for_each_cache(ca, c, i) { - unsigned j; + unsigned int j; ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 2, SB_JOURNAL_BUCKETS); @@ -1998,7 +1998,7 @@ err: void bch_cache_release(struct kobject *kobj) { struct cache *ca = container_of(kobj, struct cache, kobj); - unsigned i; + unsigned int i; if (ca->set) { BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); @@ -2150,7 +2150,7 @@ static bool bch_is_open_backing(struct block_device *bdev) { static bool bch_is_open_cache(struct block_device *bdev) { struct cache_set *c, *tc; struct cache *ca; - unsigned i; + unsigned int i; list_for_each_entry_safe(c, tc, &bch_cache_sets, list) for_each_cache(ca, c, i) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 81d3520b0702..3f2b7964d6a9 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -307,7 +307,7 @@ STORE(__cached_dev) if (v < 0) return v; - if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) { + if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) { SET_BDEV_CACHE_MODE(&dc->sb, v); bch_write_bdev_super(dc, NULL); } @@ -533,9 +533,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) op.stats.floats, op.stats.failed); } -static unsigned bch_root_usage(struct cache_set *c) +static unsigned int bch_root_usage(struct cache_set *c) { - unsigned bytes = 0; + unsigned int bytes = 0; struct bkey *k; struct btree *b; struct btree_iter iter; @@ -570,9 +570,9 @@ static size_t bch_cache_size(struct cache_set *c) return ret; } -static unsigned bch_cache_max_chain(struct cache_set *c) +static unsigned int bch_cache_max_chain(struct cache_set *c) { - unsigned ret = 0; + unsigned int ret = 0; struct hlist_head *h; mutex_lock(&c->bucket_lock); @@ -580,7 +580,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c) for (h = c->bucket_hash; h < c->bucket_hash + (1 << BUCKET_HASH_BITS); h++) { - unsigned i = 0; + unsigned int i = 0; struct hlist_node *p; hlist_for_each(p, h) @@ -593,13 +593,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c) return ret; } -static unsigned bch_btree_used(struct cache_set *c) +static unsigned int bch_btree_used(struct cache_set *c) { return div64_u64(c->gc_stats.key_bytes * 100, (c->gc_stats.nodes ?: 1) * btree_bytes(c)); } -static unsigned bch_average_key_size(struct cache_set *c) +static unsigned int bch_average_key_size(struct cache_set *c) { return c->gc_stats.nkeys ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) @@ -996,7 +996,7 @@ STORE(__bch_cache) if (v < 0) return v; - if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) { + if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) { mutex_lock(&ca->set->bucket_lock); SET_CACHE_REPLACEMENT(&ca->sb, v); mutex_unlock(&ca->set->bucket_lock); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index f7b0133c9d2f..484044231f21 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -347,7 +347,7 @@ static inline int bch_strtoul_h(const char *cp, long *res) snprintf(buf, size, \ __builtin_types_compatible_p(typeof(var), int) \ ? "%i\n" : \ - __builtin_types_compatible_p(typeof(var), unsigned) \ + __builtin_types_compatible_p(typeof(var), unsigned int) \ ? "%u\n" : \ __builtin_types_compatible_p(typeof(var), long) \ ? "%li\n" : \ @@ -379,7 +379,7 @@ struct time_stats { void bch_time_stats_update(struct time_stats *stats, uint64_t time); -static inline unsigned local_clock_us(void) +static inline unsigned int local_clock_us(void) { return local_clock() >> 10; } @@ -543,9 +543,10 @@ dup: \ container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) /* Does linear interpolation between powers of two */ -static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) +static inline unsigned int fract_exp_two(unsigned int x, + unsigned int fract_bits) { - unsigned fract = x & ~(~0 << fract_bits); + unsigned int fract = x & ~(~0 << fract_bits); x >>= fract_bits; x = 1 << x; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 481d4cf38ac0..39ee38ffb2db 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work) smp_mb(); } -static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) +static unsigned int writeback_delay(struct cached_dev *dc, + unsigned int sectors) { if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || !dc->writeback_percent) @@ -263,7 +264,7 @@ static void write_dirty_finish(struct closure *cl) /* This is kind of a dumb way of signalling errors. */ if (KEY_DIRTY(&w->key)) { int ret; - unsigned i; + unsigned int i; struct keylist keys; bch_keylist_init(&keys); @@ -377,7 +378,7 @@ static void read_dirty_submit(struct closure *cl) static void read_dirty(struct cached_dev *dc) { - unsigned delay = 0; + unsigned int delay = 0; struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; size_t size; int nk, i; @@ -498,11 +499,11 @@ err: /* Scan for dirty data */ -void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, +void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) { struct bcache_device *d = c->devices[inode]; - unsigned stripe_offset, stripe, sectors_dirty; + unsigned int stripe_offset, stripe, sectors_dirty; if (!d) return; @@ -514,7 +515,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, stripe_offset = offset & (d->stripe_size - 1); while (nr_sectors) { - int s = min_t(unsigned, abs(nr_sectors), + int s = min_t(unsigned int, abs(nr_sectors), d->stripe_size - stripe_offset); if (nr_sectors < 0) @@ -548,7 +549,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) static void refill_full_stripes(struct cached_dev *dc) { struct keybuf *buf = &dc->writeback_keys; - unsigned start_stripe, stripe, next_stripe; + unsigned int start_stripe, stripe, next_stripe; bool wrapped = false; stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); @@ -688,7 +689,7 @@ static int bch_writeback_thread(void *arg) read_dirty(dc); if (searched_full_index) { - unsigned delay = dc->writeback_delay * HZ; + unsigned int delay = dc->writeback_delay * HZ; while (delay && !kthread_should_stop() && @@ -712,7 +713,7 @@ static int bch_writeback_thread(void *arg) struct sectors_dirty_init { struct btree_op op; - unsigned inode; + unsigned int inode; size_t count; struct bkey start; }; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 3745d7004c47..76b691850c98 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline unsigned offset_to_stripe(struct bcache_device *d, +static inline unsigned int offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); @@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d, static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, - unsigned nr_sectors) + unsigned int nr_sectors) { - unsigned stripe = offset_to_stripe(&dc->disk, offset); + unsigned int stripe = offset_to_stripe(&dc->disk, offset); while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) @@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, } static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, - unsigned cache_mode, bool would_skip) + unsigned int cache_mode, bool would_skip) { - unsigned in_use = dc->disk.c->gc_stats.in_use; + unsigned int in_use = dc->disk.c->gc_stats.in_use; if (cache_mode != CACHE_MODE_WRITEBACK || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || @@ -96,7 +96,7 @@ static inline void bch_writeback_add(struct cached_dev *dc) } } -void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); +void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int); void bch_sectors_dirty_init(struct bcache_device *); void bch_cached_dev_writeback_init(struct cached_dev *); diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 8d19e02d752a..6bdcb48ee8cf 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -30,10 +30,10 @@ struct bkey { BITMASK(name, struct bkey, field, offset, size) #define PTR_FIELD(name, offset, size) \ -static inline __u64 name(const struct bkey *k, unsigned i) \ +static inline __u64 name(const struct bkey *k, unsigned int i) \ { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \ \ -static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \ +static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \ { \ k->ptr[i] &= ~(~(~0ULL << size) << offset); \ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \ @@ -120,7 +120,7 @@ static inline struct bkey *bkey_next(const struct bkey *k) return (struct bkey *) (d + bkey_u64s(k)); } -static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys) +static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys) { __u64 *d = (void *) k; return (struct bkey *) (d + nr_keys); -- cgit v1.2.3 From 1fae7cf05293d3a2c9e59c1bc59372322386467c Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:45 +0800 Subject: bcache: style fix to add a blank line after declarations Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 3 +++ drivers/md/bcache/bcache.h | 1 + drivers/md/bcache/bset.c | 5 ++++- drivers/md/bcache/btree.c | 7 +++++++ drivers/md/bcache/closure.c | 1 + drivers/md/bcache/debug.c | 4 ++-- drivers/md/bcache/extents.c | 5 ++++- drivers/md/bcache/io.c | 4 +++- drivers/md/bcache/journal.c | 2 ++ drivers/md/bcache/movinggc.c | 2 ++ drivers/md/bcache/request.c | 5 ++++- drivers/md/bcache/stats.c | 3 +++ drivers/md/bcache/super.c | 13 ++++++++++++- drivers/md/bcache/sysfs.c | 5 +++++ drivers/md/bcache/util.c | 1 + drivers/md/bcache/writeback.c | 1 + include/uapi/linux/bcache.h | 2 ++ 17 files changed, 57 insertions(+), 7 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 89f663d22551..7a28232d868b 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca) while (!fifo_full(&ca->free_inc)) { size_t n; + get_random_bytes(&n, sizeof(n)); n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); @@ -514,6 +515,7 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, struct bkey *k, int n, bool wait) { int ret; + mutex_lock(&c->bucket_lock); ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); mutex_unlock(&c->bucket_lock); @@ -706,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c) for (i = 0; i < MAX_OPEN_BUCKETS; i++) { struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) return -ENOMEM; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 1ebd2d9d90d5..fd74dd075951 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -783,6 +783,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, static inline uint8_t gen_after(uint8_t a, uint8_t b) { uint8_t r = a - b; + return r > 128U ? 0 : r; } diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index dfda7e9efc3e..6fd5623b2e63 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -585,6 +585,7 @@ static inline unsigned int bfloat_mantissa(const struct bkey *k, struct bkey_float *f) { const uint64_t *p = &k->low - (f->exponent >> 6); + return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; } @@ -964,6 +965,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, * but a branch instruction is avoided. */ unsigned int p = n << 4; + p &= ((int) (p - t->size)) >> 31; prefetch(&t->tree[p]); @@ -1114,6 +1116,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b, struct bset_tree *start) { struct bkey *ret = NULL; + iter->size = ARRAY_SIZE(iter->data); iter->used = 0; @@ -1329,8 +1332,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, struct bset_sort_state *state) { uint64_t start_time = local_clock(); - struct btree_iter iter; + bch_btree_iter_init(b, &iter, NULL); btree_mergesort(b, new->set->data, &iter, false, true); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 96c39a8db895..4003f92f4d2c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -287,6 +287,7 @@ err: static void btree_node_read_endio(struct bio *bio) { struct closure *cl = bio->bi_private; + closure_put(cl); } @@ -604,6 +605,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, struct bkey *k, gfp_t gfp) { struct btree *b = kzalloc(sizeof(struct btree), gfp); + if (!b) return NULL; @@ -746,6 +748,7 @@ void bch_btree_cache_free(struct cache_set *c) { struct btree *b; struct closure cl; + closure_init_stack(&cl); if (c->shrink.list.next) @@ -1124,6 +1127,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, struct btree_op *op) { struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); + if (!IS_ERR_OR_NULL(n)) { mutex_lock(&n->write_lock); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); @@ -2488,6 +2492,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, if (!RB_EMPTY_ROOT(&buf->keys)) { struct keybuf_key *w; + w = RB_FIRST(&buf->keys, struct keybuf_key, node); buf->start = START_KEY(&w->key); @@ -2519,6 +2524,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, { bool ret = false; struct keybuf_key *p, *w, s; + s.key = *start; if (bkey_cmp(end, &buf->start) <= 0 || @@ -2545,6 +2551,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, struct keybuf_key *bch_keybuf_next(struct keybuf *buf) { struct keybuf_key *w; + spin_lock(&buf->lock); w = RB_FIRST(&buf->keys, struct keybuf_key, node); diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 618253683d40..8570fc426e31 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -162,6 +162,7 @@ static struct dentry *closure_debug; static int debug_seq_show(struct seq_file *f, void *data) { struct closure *cl; + spin_lock_irq(&closure_list_lock); list_for_each_entry(cl, &closure_list, all) { diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 0caad145902b..f0eb37a14dab 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -177,8 +177,8 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, while (size) { struct keybuf_key *w; unsigned int bytes = min(i->bytes, size); - int err = copy_to_user(buf, i->buf, bytes); + if (err) return err; @@ -237,8 +237,8 @@ void bch_debug_init_cache_set(struct cache_set *c) { if (!IS_ERR_OR_NULL(bcache_debug)) { char name[50]; - snprintf(name, 50, "bcache-%pU", c->sb.set_uuid); + snprintf(name, 50, "bcache-%pU", c->sb.set_uuid); c->debug = debugfs_create_file(name, 0400, bcache_debug, c, &cache_set_debug_ops); } diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index e96ba928eeb6..8f5de61e1a90 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -134,8 +134,8 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) for (j = 0; j < KEY_PTRS(k); j++) { size_t n = PTR_BUCKET_NR(b->c, k, j); - printk(" bucket %zu", n); + printk(" bucket %zu", n); if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) printk(" prio %i", PTR_BUCKET(b->c, k, j)->prio); @@ -166,6 +166,7 @@ bad: static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); + return __bch_btree_ptr_invalid(b->c, k); } @@ -334,6 +335,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, while (1) { struct bkey *k = bch_btree_iter_next(iter); + if (!k) break; @@ -498,6 +500,7 @@ bad: static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); + return __bch_extent_invalid(b->c, k); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index c6b41a09f550..cfc56add799a 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -17,6 +17,7 @@ void bch_bbio_free(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); + mempool_free(b, &c->bio_meta); } @@ -45,6 +46,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, struct bkey *k, unsigned int ptr) { struct bbio *b = container_of(bio, struct bbio, bio); + bch_bkey_copy_single_ptr(&b->key, k, ptr); __bch_submit_bbio(bio, c); } @@ -132,12 +134,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, if (threshold) { unsigned int t = local_clock_us(); - int us = t - b->submit_time_us; int congested = atomic_read(&c->congested); if (us > (int) threshold) { int ms = us / 1024; + c->congested_last_us = t; ms = min(ms, CONGESTED_MAX + congested); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ee61062b58fc..301cbb43a78f 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -28,6 +28,7 @@ static void journal_read_endio(struct bio *bio) { struct closure *cl = bio->bi_private; + closure_put(cl); } @@ -614,6 +615,7 @@ static void journal_write_unlocked(struct closure *cl) struct bio *bio; struct bio_list list; + bio_list_init(&list); if (!w->need_write) { diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 0790d710f911..7891fb512736 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) static void moving_io_destructor(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); + kfree(io); } @@ -189,6 +190,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) static unsigned int bucket_heap_top(struct cache *ca) { struct bucket *b; + return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 6e1a60dd1742..d15d8c5778ed 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) bio_for_each_segment(bv, bio, iter) { void *d = kmap(bv.bv_page) + bv.bv_offset; + csum = bch_crc64_update(csum, d, bv.bv_len); kunmap(bv.bv_page); } @@ -526,8 +527,8 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ? min_t(uint64_t, INT_MAX, KEY_START(k) - bio->bi_iter.bi_sector) : INT_MAX; - int ret = s->d->cache_miss(b, s, bio, sectors); + if (ret != MAP_CONTINUE) return ret; @@ -623,6 +624,7 @@ static void request_endio(struct bio *bio) if (bio->bi_status) { struct search *s = container_of(cl, struct search, cl); + s->iop.status = bio->bi_status; /* Only cache read errors are recoverable */ s->recoverable = false; @@ -1212,6 +1214,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); + return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); } diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 2331a0d5aa28..894410f3f829 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, bool hit, bool bypass) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); + mark_cache_stats(&dc->accounting.collector, hit, bypass); mark_cache_stats(&c->accounting.collector, hit, bypass); } @@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); + atomic_inc(&dc->accounting.collector.cache_readaheads); atomic_inc(&c->accounting.collector.cache_readaheads); } @@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); + atomic_inc(&dc->accounting.collector.cache_miss_collisions); atomic_inc(&c->accounting.collector.cache_miss_collisions); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4ab1b1968d9a..c11cf852715c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -415,8 +415,8 @@ static int __uuid_write(struct cache_set *c) { BKEY_PADDED(key) k; struct closure cl; - closure_init_stack(&cl); + closure_init_stack(&cl); lockdep_assert_held(&bch_register_lock); if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) @@ -456,6 +456,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) static struct uuid_entry *uuid_find_empty(struct cache_set *c) { static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; + return uuid_find(c, zero_uuid); } @@ -619,6 +620,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) static int open_dev(struct block_device *b, fmode_t mode) { struct bcache_device *d = b->bd_disk->private_data; + if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) return -ENXIO; @@ -629,6 +631,7 @@ static int open_dev(struct block_device *b, fmode_t mode) static void release_dev(struct gendisk *b, fmode_t mode) { struct bcache_device *d = b->private_data; + closure_put(&d->cl); } @@ -919,6 +922,7 @@ void bch_cached_dev_run(struct cached_dev *dc) if (!d->c && BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { struct closure cl; + closure_init_stack(&cl); SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); @@ -976,6 +980,7 @@ static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); struct closure cl; + closure_init_stack(&cl); BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); @@ -1103,6 +1108,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (bch_is_zero(u->uuid, 16)) { struct closure cl; + closure_init_stack(&cl); memcpy(u->uuid, dc->sb.uuid, 16); @@ -1320,6 +1326,7 @@ void bch_flash_dev_release(struct kobject *kobj) static void flash_dev_free(struct closure *cl) { struct bcache_device *d = container_of(cl, struct bcache_device, cl); + mutex_lock(&bch_register_lock); atomic_long_sub(bcache_dev_sectors_dirty(d), &d->c->flash_dev_dirty_sectors); @@ -1481,6 +1488,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) void bch_cache_set_release(struct kobject *kobj) { struct cache_set *c = container_of(kobj, struct cache_set, kobj); + kfree(c); module_put(THIS_MODULE); } @@ -1671,6 +1679,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) { int iter_size; struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); + if (!c) return NULL; @@ -2216,6 +2225,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) goto err_close; @@ -2224,6 +2234,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, mutex_unlock(&bch_register_lock); } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + if (!ca) goto err_close; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 3f2b7964d6a9..ba4cd7efca8e 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -459,6 +459,7 @@ STORE(__bch_flash_dev) if (attr == &sysfs_size) { uint64_t v; + strtoi_h_or_return(buf, v); u->sectors = v >> 9; @@ -703,6 +704,7 @@ STORE(__bch_cache_set) if (attr == &sysfs_flash_vol_create) { int r; uint64_t v; + strtoi_h_or_return(buf, v); r = bch_flash_dev_create(c, v); @@ -736,6 +738,7 @@ STORE(__bch_cache_set) if (attr == &sysfs_prune_cache) { struct shrink_control sc; + sc.gfp_mask = GFP_KERNEL; sc.nr_to_scan = strtoul_or_return(buf); c->shrink.scan_objects(&c->shrink, &sc); @@ -789,12 +792,14 @@ STORE_LOCKED(bch_cache_set) SHOW(bch_cache_set_internal) { struct cache_set *c = container_of(kobj, struct cache_set, internal); + return bch_cache_set_show(&c->kobj, attr, buf); } STORE(bch_cache_set_internal) { struct cache_set *c = container_of(kobj, struct cache_set, internal); + return bch_cache_set_store(&c->kobj, attr, buf, size); } diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index b15256bcf0e7..18016e7bb32c 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -133,6 +133,7 @@ bool bch_is_zero(const char *p, size_t n) int bch_parse_uuid(const char *s, char *uuid) { size_t i, j, x; + memset(uuid, 0, 16); for (i = 0, j = 0; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 39ee38ffb2db..44f1b0f1f4d9 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -250,6 +250,7 @@ static void dirty_init(struct keybuf_key *w) static void dirty_io_destructor(struct closure *cl) { struct dirty_io *io = container_of(cl, struct dirty_io, cl); + kfree(io); } diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 6bdcb48ee8cf..5d4f58e059fd 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) static inline struct bkey *bkey_next(const struct bkey *k) { __u64 *d = (void *) k; + return (struct bkey *) (d + bkey_u64s(k)); } static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys) { __u64 *d = (void *) k; + return (struct bkey *) (d + nr_keys); } /* Enough for a key with 6 pointers */ -- cgit v1.2.3 From fc2d5988b5972bced859944986fb36d902ac3698 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:46 +0800 Subject: bcache: add identifier names to arguments of function definitions There are many function definitions do not have identifier argument names, scripts/checkpatch.pl complains warnings like this, WARNING: function definition argument 'struct bcache_device *' should also have an identifier name #16735: FILE: writeback.h:120: +void bch_sectors_dirty_init(struct bcache_device *); This patch adds identifier argument names to all bcache function definitions to fix such warnings. Signed-off-by: Coly Li Reviewed: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 112 +++++++++++++++++++------------------ drivers/md/bcache/bset.h | 126 ++++++++++++++++++++++++------------------ drivers/md/bcache/btree.c | 6 +- drivers/md/bcache/btree.h | 80 ++++++++++++++------------- drivers/md/bcache/debug.h | 6 +- drivers/md/bcache/extents.h | 6 +- drivers/md/bcache/journal.c | 2 +- drivers/md/bcache/journal.h | 20 ++++--- drivers/md/bcache/request.c | 2 +- drivers/md/bcache/request.h | 2 +- drivers/md/bcache/stats.h | 13 +++-- drivers/md/bcache/super.c | 4 +- drivers/md/bcache/util.h | 12 ++-- drivers/md/bcache/writeback.h | 9 +-- 14 files changed, 215 insertions(+), 185 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index fd74dd075951..0bd505c61943 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -273,9 +273,10 @@ struct bcache_device { unsigned int data_csum:1; - int (*cache_miss)(struct btree *, struct search *, - struct bio *, unsigned int); - int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long); + int (*cache_miss)(struct btree *b, struct search *s, + struct bio *bio, unsigned int sectors); + int (*ioctl) (struct bcache_device *d, fmode_t mode, + unsigned int cmd, unsigned long arg); }; struct io { @@ -925,41 +926,43 @@ static inline void wait_for_kthread_stop(void) /* Forward declarations */ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); -void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); -void bch_bbio_count_io_errors(struct cache_set *, struct bio *, - blk_status_t, const char *); -void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, - const char *); -void bch_bbio_free(struct bio *, struct cache_set *); -struct bio *bch_bbio_alloc(struct cache_set *); - -void __bch_submit_bbio(struct bio *, struct cache_set *); -void bch_submit_bbio(struct bio *, struct cache_set *, - struct bkey *, unsigned int); - -uint8_t bch_inc_gen(struct cache *, struct bucket *); -void bch_rescale_priorities(struct cache_set *, int); - -bool bch_can_invalidate_bucket(struct cache *, struct bucket *); -void __bch_invalidate_one_bucket(struct cache *, struct bucket *); - -void __bch_bucket_free(struct cache *, struct bucket *); -void bch_bucket_free(struct cache_set *, struct bkey *); - -long bch_bucket_alloc(struct cache *, unsigned int, bool); -int __bch_bucket_alloc_set(struct cache_set *, unsigned int, - struct bkey *, int, bool); -int bch_bucket_alloc_set(struct cache_set *, unsigned int, - struct bkey *, int, bool); -bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int, - unsigned int, unsigned int, bool); +void bch_count_io_errors(struct cache *ca, blk_status_t error, + int is_read, const char *m); +void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, + blk_status_t error, const char *m); +void bch_bbio_endio(struct cache_set *c, struct bio *bio, + blk_status_t error, const char *m); +void bch_bbio_free(struct bio *bio, struct cache_set *c); +struct bio *bch_bbio_alloc(struct cache_set *c); + +void __bch_submit_bbio(struct bio *bio, struct cache_set *c); +void bch_submit_bbio(struct bio *bio, struct cache_set *c, + struct bkey *k, unsigned int ptr); + +uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); +void bch_rescale_priorities(struct cache_set *c, int sectors); + +bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); +void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); + +void __bch_bucket_free(struct cache *ca, struct bucket *b); +void bch_bucket_free(struct cache_set *c, struct bkey *k); + +long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); +int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, + struct bkey *k, int n, bool wait); +int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, + struct bkey *k, int n, bool wait); +bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, + unsigned int sectors, unsigned int write_point, + unsigned int write_prio, bool wait); bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) -bool bch_cache_set_error(struct cache_set *, const char *, ...); +bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); -void bch_prio_write(struct cache *); -void bch_write_bdev_super(struct cached_dev *, struct closure *); +void bch_prio_write(struct cache *ca); +void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; extern struct mutex bch_register_lock; @@ -971,30 +974,31 @@ extern struct kobj_type bch_cache_set_ktype; extern struct kobj_type bch_cache_set_internal_ktype; extern struct kobj_type bch_cache_ktype; -void bch_cached_dev_release(struct kobject *); -void bch_flash_dev_release(struct kobject *); -void bch_cache_set_release(struct kobject *); -void bch_cache_release(struct kobject *); +void bch_cached_dev_release(struct kobject *kobj); +void bch_flash_dev_release(struct kobject *kobj); +void bch_cache_set_release(struct kobject *kobj); +void bch_cache_release(struct kobject *kobj); -int bch_uuid_write(struct cache_set *); -void bcache_write_super(struct cache_set *); +int bch_uuid_write(struct cache_set *c); +void bcache_write_super(struct cache_set *c); int bch_flash_dev_create(struct cache_set *c, uint64_t size); -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); -void bch_cached_dev_detach(struct cached_dev *); -void bch_cached_dev_run(struct cached_dev *); -void bcache_device_stop(struct bcache_device *); - -void bch_cache_set_unregister(struct cache_set *); -void bch_cache_set_stop(struct cache_set *); - -struct cache_set *bch_cache_set_alloc(struct cache_sb *); -void bch_btree_cache_free(struct cache_set *); -int bch_btree_cache_alloc(struct cache_set *); -void bch_moving_init_cache_set(struct cache_set *); -int bch_open_buckets_alloc(struct cache_set *); -void bch_open_buckets_free(struct cache_set *); +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, + uint8_t *set_uuid); +void bch_cached_dev_detach(struct cached_dev *dc); +void bch_cached_dev_run(struct cached_dev *dc); +void bcache_device_stop(struct bcache_device *d); + +void bch_cache_set_unregister(struct cache_set *c); +void bch_cache_set_stop(struct cache_set *c); + +struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); +void bch_btree_cache_free(struct cache_set *c); +int bch_btree_cache_alloc(struct cache_set *c); +void bch_moving_init_cache_set(struct cache_set *c); +int bch_open_buckets_alloc(struct cache_set *c); +void bch_open_buckets_free(struct cache_set *c); int bch_cache_allocator_start(struct cache *ca); diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index fdc296103113..f5bf333aa40d 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -187,18 +187,25 @@ struct bset_tree { }; struct btree_keys_ops { - bool (*sort_cmp)(struct btree_iter_set, - struct btree_iter_set); - struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); - bool (*insert_fixup)(struct btree_keys *, struct bkey *, - struct btree_iter *, struct bkey *); - bool (*key_invalid)(struct btree_keys *, - const struct bkey *); - bool (*key_bad)(struct btree_keys *, const struct bkey *); - bool (*key_merge)(struct btree_keys *, - struct bkey *, struct bkey *); - void (*key_to_text)(char *, size_t, const struct bkey *); - void (*key_dump)(struct btree_keys *, const struct bkey *); + bool (*sort_cmp)(struct btree_iter_set l, + struct btree_iter_set r); + struct bkey *(*sort_fixup)(struct btree_iter *iter, + struct bkey *tmp); + bool (*insert_fixup)(struct btree_keys *b, + struct bkey *insert, + struct btree_iter *iter, + struct bkey *replace_key); + bool (*key_invalid)(struct btree_keys *bk, + const struct bkey *k); + bool (*key_bad)(struct btree_keys *bk, + const struct bkey *k); + bool (*key_merge)(struct btree_keys *bk, + struct bkey *l, struct bkey *r); + void (*key_to_text)(char *buf, + size_t size, + const struct bkey *k); + void (*key_dump)(struct btree_keys *keys, + const struct bkey *k); /* * Only used for deciding whether to use START_KEY(k) or just the key @@ -280,18 +287,20 @@ static inline struct bset *bset_next_set(struct btree_keys *b, return ((void *) i) + roundup(set_bytes(i), block_bytes); } -void bch_btree_keys_free(struct btree_keys *); -int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t); -void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, - bool *); - -void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t); -void bch_bset_build_written_tree(struct btree_keys *); -void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); -bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); -void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); -unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *, - struct bkey *); +void bch_btree_keys_free(struct btree_keys *b); +int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, + gfp_t gfp); +void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, + bool *expensive_debug_checks); + +void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic); +void bch_bset_build_written_tree(struct btree_keys *b); +void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k); +bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r); +void bch_bset_insert(struct btree_keys *b, struct bkey *where, + struct bkey *insert); +unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, + struct bkey *replace_key); enum { BTREE_INSERT_STATUS_NO_INSERT = 0, @@ -313,18 +322,21 @@ struct btree_iter { } data[MAX_BSETS]; }; -typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *); +typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); -struct bkey *bch_btree_iter_next(struct btree_iter *); -struct bkey *bch_btree_iter_next_filter(struct btree_iter *, - struct btree_keys *, ptr_filter_fn); +struct bkey *bch_btree_iter_next(struct btree_iter *iter); +struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, + struct btree_keys *b, + ptr_filter_fn fn); -void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); -struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *, - struct bkey *); +void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, + struct bkey *end); +struct bkey *bch_btree_iter_init(struct btree_keys *b, + struct btree_iter *iter, + struct bkey *search); -struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *, - const struct bkey *); +struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, + const struct bkey *search); /* * Returns the first key that is strictly greater than search @@ -355,15 +367,17 @@ struct bset_sort_state { struct time_stats time; }; -void bch_bset_sort_state_free(struct bset_sort_state *); -int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int); -void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); -void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, - struct bset_sort_state *); -void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, - struct bset_sort_state *); -void bch_btree_sort_partial(struct btree_keys *, unsigned int, - struct bset_sort_state *); +void bch_bset_sort_state_free(struct bset_sort_state *state); +int bch_bset_sort_state_init(struct bset_sort_state *state, + unsigned int page_order); +void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state); +void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, + struct bset_sort_state *state); +void bch_btree_sort_and_fix_extents(struct btree_keys *b, + struct btree_iter *iter, + struct bset_sort_state *state); +void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, + struct bset_sort_state *state); static inline void bch_btree_sort(struct btree_keys *b, struct bset_sort_state *state) @@ -377,7 +391,7 @@ struct bset_stats { size_t floats, failed; }; -void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); +void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state); /* Bkey utility code */ @@ -401,10 +415,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); } -void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, - unsigned int); -bool __bch_cut_front(const struct bkey *, struct bkey *); -bool __bch_cut_back(const struct bkey *, struct bkey *); +void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, + unsigned int i); +bool __bch_cut_front(const struct bkey *where, struct bkey *k); +bool __bch_cut_back(const struct bkey *where, struct bkey *k); static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) { @@ -522,18 +536,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l) return bch_keylist_nkeys(l) * sizeof(uint64_t); } -struct bkey *bch_keylist_pop(struct keylist *); -void bch_keylist_pop_front(struct keylist *); -int __bch_keylist_realloc(struct keylist *, unsigned int); +struct bkey *bch_keylist_pop(struct keylist *l); +void bch_keylist_pop_front(struct keylist *l); +int __bch_keylist_realloc(struct keylist *l, unsigned int u64s); /* Debug stuff */ #ifdef CONFIG_BCACHE_DEBUG -int __bch_count_data(struct btree_keys *); -void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); -void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); -void bch_dump_bucket(struct btree_keys *); +int __bch_count_data(struct btree_keys *b); +void __printf(2, 3) __bch_check_keys(struct btree_keys *b, + const char *fmt, + ...); +void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); +void bch_dump_bucket(struct btree_keys *b); #else @@ -541,7 +557,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; } static inline void __printf(2, 3) __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} static inline void bch_dump_bucket(struct btree_keys *b) {} -void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); +void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); #endif diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 4003f92f4d2c..a797ef359a21 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1309,8 +1309,10 @@ struct gc_merge_info { unsigned int keys; }; -static int bch_btree_insert_node(struct btree *, struct btree_op *, - struct keylist *, atomic_t *, struct bkey *); +static int bch_btree_insert_node(struct btree *b, struct btree_op *op, + struct keylist *insert_keys, + atomic_t *journal_ref, + struct bkey *replace_key); static int btree_gc_coalesce(struct btree *b, struct btree_op *op, struct gc_stat *gc, struct gc_merge_info *r) diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 8cbc0fd27738..f13b71a0613a 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b) (w ? up_write : up_read)(&b->lock); } -void bch_btree_node_read_done(struct btree *); -void __bch_btree_node_write(struct btree *, struct closure *); -void bch_btree_node_write(struct btree *, struct closure *); - -void bch_btree_set_root(struct btree *); -struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *, - int, bool, struct btree *); -struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, - struct bkey *, int, bool, struct btree *); - -int bch_btree_insert_check_key(struct btree *, struct btree_op *, - struct bkey *); -int bch_btree_insert(struct cache_set *, struct keylist *, - atomic_t *, struct bkey *); - -int bch_gc_thread_start(struct cache_set *); -void bch_initial_gc_finish(struct cache_set *); -void bch_moving_gc(struct cache_set *); -int bch_btree_check(struct cache_set *); -void bch_initial_mark_key(struct cache_set *, int, struct bkey *); +void bch_btree_node_read_done(struct btree *b); +void __bch_btree_node_write(struct btree *b, struct closure *parent); +void bch_btree_node_write(struct btree *b, struct closure *parent); + +void bch_btree_set_root(struct btree *b); +struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, + int level, bool wait, + struct btree *parent); +struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, + struct bkey *k, int level, bool write, + struct btree *parent); + +int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, + struct bkey *check_key); +int bch_btree_insert(struct cache_set *c, struct keylist *keys, + atomic_t *journal_ref, struct bkey *replace_key); + +int bch_gc_thread_start(struct cache_set *c); +void bch_initial_gc_finish(struct cache_set *c); +void bch_moving_gc(struct cache_set *c); +int bch_btree_check(struct cache_set *c); +void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); static inline void wake_up_gc(struct cache_set *c) { @@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c) #define MAP_END_KEY 1 -typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); -int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, - struct bkey *, btree_map_nodes_fn *, int); +typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b); +int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_nodes_fn *fn, int flags); static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, struct bkey *from, btree_map_nodes_fn *fn) @@ -290,21 +292,21 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op, return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); } -typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, - struct bkey *); -int bch_btree_map_keys(struct btree_op *, struct cache_set *, - struct bkey *, btree_map_keys_fn *, int); - -typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); - -void bch_keybuf_init(struct keybuf *); -void bch_refill_keybuf(struct cache_set *, struct keybuf *, - struct bkey *, keybuf_pred_fn *); -bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, - struct bkey *); -void bch_keybuf_del(struct keybuf *, struct keybuf_key *); -struct keybuf_key *bch_keybuf_next(struct keybuf *); -struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, - struct bkey *, keybuf_pred_fn *); +typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b, + struct bkey *k); +int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_keys_fn *fn, int flags); + +typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); + +void bch_keybuf_init(struct keybuf *buf); +void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, + struct bkey *end, keybuf_pred_fn *pred); +bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, + struct bkey *end); +void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w); +struct keybuf_key *bch_keybuf_next(struct keybuf *buf); +struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf *buf, + struct bkey *end, keybuf_pred_fn *pred); void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); #endif diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index acc48d3fa274..fb3d4dff4b26 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h @@ -8,8 +8,8 @@ struct cache_set; #ifdef CONFIG_BCACHE_DEBUG -void bch_btree_verify(struct btree *); -void bch_data_verify(struct cached_dev *, struct bio *); +void bch_btree_verify(struct btree *b); +void bch_data_verify(struct cached_dev *dc, struct bio *bio); #define expensive_debug_checks(c) ((c)->expensive_debug_checks) #define key_merging_disabled(c) ((c)->key_merging_disabled) @@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} #endif #ifdef CONFIG_DEBUG_FS -void bch_debug_init_cache_set(struct cache_set *); +void bch_debug_init_cache_set(struct cache_set *c); #else static inline void bch_debug_init_cache_set(struct cache_set *c) {} #endif diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h index 0cd3575afa1d..4d667e05bb73 100644 --- a/drivers/md/bcache/extents.h +++ b/drivers/md/bcache/extents.h @@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops; struct bkey; struct cache_set; -void bch_extent_to_text(char *, size_t, const struct bkey *); -bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); -bool __bch_extent_invalid(struct cache_set *, const struct bkey *); +void bch_extent_to_text(char *buf, size_t size, const struct bkey *k); +bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k); +bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k); #endif /* _BCACHE_EXTENTS_H */ diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 301cbb43a78f..a70126466fa8 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -581,7 +581,7 @@ static void journal_write_endio(struct bio *bio) closure_put(&w->c->journal.io); } -static void journal_write(struct closure *); +static void journal_write(struct closure *cl); static void journal_write_done(struct closure *cl) { diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index f0982731ae20..66f0facff84b 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -167,14 +167,16 @@ struct cache_set; struct btree_op; struct keylist; -atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *); -void bch_journal_next(struct journal *); -void bch_journal_mark(struct cache_set *, struct list_head *); -void bch_journal_meta(struct cache_set *, struct closure *); -int bch_journal_read(struct cache_set *, struct list_head *); -int bch_journal_replay(struct cache_set *, struct list_head *); - -void bch_journal_free(struct cache_set *); -int bch_journal_alloc(struct cache_set *); +atomic_t *bch_journal(struct cache_set *c, + struct keylist *keys, + struct closure *parent); +void bch_journal_next(struct journal *j); +void bch_journal_mark(struct cache_set *c, struct list_head *list); +void bch_journal_meta(struct cache_set *c, struct closure *cl); +int bch_journal_read(struct cache_set *c, struct list_head *list); +int bch_journal_replay(struct cache_set *c, struct list_head *list); + +void bch_journal_free(struct cache_set *c); +int bch_journal_alloc(struct cache_set *c); #endif /* _BCACHE_JOURNAL_H */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d15d8c5778ed..d7e6ee3d7dd2 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,7 +25,7 @@ struct kmem_cache *bch_search_cache; -static void bch_data_insert_start(struct closure *); +static void bch_data_insert_start(struct closure *cl); static unsigned int cache_mode(struct cached_dev *dc) { diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 8e8c1ce00d9d..aa055cfeb099 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -33,7 +33,7 @@ struct data_insert_op { BKEY_PADDED(replace_key); }; -unsigned int bch_get_congested(struct cache_set *); +unsigned int bch_get_congested(struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index 77234a89dd69..abfaabf7e7fc 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc); void bch_cache_accounting_destroy(struct cache_accounting *acc); -void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *, - bool, bool); -void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *); -void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *); -void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int); +void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, + bool hit, bool bypass); +void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d); +void bch_mark_cache_miss_collision(struct cache_set *c, + struct bcache_device *d); +void bch_mark_sectors_bypassed(struct cache_set *c, + struct cached_dev *dc, + int sectors); #endif /* _BCACHE_STATS_H_ */ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c11cf852715c..6a80069650c7 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2136,8 +2136,8 @@ err: /* Global interfaces/init */ -static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, - const char *, size_t); +static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, + const char *buffer, size_t size); kobj_attribute_write(register, register_bcache); kobj_attribute_write(register_quiet, register_bcache); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 484044231f21..707d7f0c034e 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -288,10 +288,10 @@ do { \ #define ANYSINT_MAX(t) \ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) -int bch_strtoint_h(const char *, int *); -int bch_strtouint_h(const char *, unsigned int *); -int bch_strtoll_h(const char *, long long *); -int bch_strtoull_h(const char *, unsigned long long *); +int bch_strtoint_h(const char *cp, int *res); +int bch_strtouint_h(const char *cp, unsigned int *res); +int bch_strtoll_h(const char *cp, long long *res); +int bch_strtoull_h(const char *cp, unsigned long long *res); static inline int bch_strtol_h(const char *cp, long *res) { @@ -563,7 +563,7 @@ static inline sector_t bdev_sectors(struct block_device *bdev) return bdev->bd_inode->i_size >> 9; } -uint64_t bch_crc64_update(uint64_t, const void *, size_t); -uint64_t bch_crc64(const void *, size_t); +uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len); +uint64_t bch_crc64(const void *data, size_t len); #endif /* _BCACHE_UTIL_H */ diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 76b691850c98..d2b9fdbc8994 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -96,10 +96,11 @@ static inline void bch_writeback_add(struct cached_dev *dc) } } -void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int); +void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, + uint64_t offset, int nr_sectors); -void bch_sectors_dirty_init(struct bcache_device *); -void bch_cached_dev_writeback_init(struct cached_dev *); -int bch_cached_dev_writeback_start(struct cached_dev *); +void bch_sectors_dirty_init(struct bcache_device *d); +void bch_cached_dev_writeback_init(struct cached_dev *dc); +int bch_cached_dev_writeback_start(struct cached_dev *dc); #endif -- cgit v1.2.3 From b0d30981c05f32d8cc032b209408ca3224f05f36 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:47 +0800 Subject: bcache: style fixes for lines over 80 characters This patch fixes the lines over 80 characters into more lines, to minimize warnings by checkpatch.pl. There are still some lines exceed 80 characters, but it is better to be a single line and I don't change them. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 4 ++-- drivers/md/bcache/bset.c | 10 +++++++--- drivers/md/bcache/bset.h | 6 ++++-- drivers/md/bcache/btree.c | 5 ++++- drivers/md/bcache/btree.h | 6 ++++-- drivers/md/bcache/debug.c | 3 ++- drivers/md/bcache/extents.c | 4 +++- drivers/md/bcache/journal.c | 3 ++- drivers/md/bcache/request.c | 7 +++++-- drivers/md/bcache/super.c | 18 ++++++++++++------ drivers/md/bcache/sysfs.c | 11 +++++++---- drivers/md/bcache/util.h | 3 ++- drivers/md/bcache/writeback.c | 7 +++++-- 13 files changed, 59 insertions(+), 28 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0bd505c61943..031a75a25d3e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -614,8 +614,8 @@ struct cache_set { uint16_t min_prio; /* - * max(gen - last_gc) for all buckets. When it gets too big we have to gc - * to keep gens from wrapping around. + * max(gen - last_gc) for all buckets. When it gets too big we have to + * gc to keep gens from wrapping around. */ uint8_t need_gc; struct gc_stat gc_stats; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 6fd5623b2e63..19b4febe5b45 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b) } EXPORT_SYMBOL(bch_btree_keys_free); -int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, gfp_t gfp) +int bch_btree_keys_alloc(struct btree_keys *b, + unsigned int page_order, + gfp_t gfp) { struct bset_tree *t = b->set; @@ -475,7 +477,8 @@ void inorder_test(void) for (unsigned int size = 2; size < 65536000; size++) { - unsigned int extra = (size - rounddown_pow_of_two(size - 1)) << 1; + unsigned int extra = + (size - rounddown_pow_of_two(size - 1)) << 1; unsigned int i = 1, j = rounddown_pow_of_two(size - 1); if (!(size % 4096)) @@ -825,7 +828,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, k != bset_bkey_last(t->data); k = bkey_next(k)) if (t->size == bkey_to_cacheline(t, k)) { - t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); + t->prev[t->size] = + bkey_to_cacheline_offset(t, t->size, k); t->size++; } } diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index f5bf333aa40d..bac76aabca6d 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -246,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k) return !b->last_set_unwritten || k < b->set[b->nsets].data->start; } -static inline unsigned int bset_byte_offset(struct btree_keys *b, struct bset *i) +static inline unsigned int bset_byte_offset(struct btree_keys *b, + struct bset *i) { return ((size_t) i) - ((size_t) b->set->data); } -static inline unsigned int bset_sector_offset(struct btree_keys *b, struct bset *i) +static inline unsigned int bset_sector_offset(struct btree_keys *b, + struct bset *i) { return bset_byte_offset(b, i) >> 9; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index a797ef359a21..c2e4174a2e03 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -436,7 +436,10 @@ static void do_btree_node_write(struct btree *b) continue_at(cl, btree_node_write_done, NULL); } else { - /* No problem for multipage bvec since the bio is just allocated */ + /* + * No problem for multipage bvec since the bio is + * just allocated + */ b->bio->bi_vcnt = 0; bch_bio_map(b->bio, i); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index f13b71a0613a..a68d6c55783b 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -306,7 +306,9 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, struct bkey *end); void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w); struct keybuf_key *bch_keybuf_next(struct keybuf *buf); -struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf *buf, - struct bkey *end, keybuf_pred_fn *pred); +struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, + struct keybuf *buf, + struct bkey *end, + keybuf_pred_fn *pred); void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); #endif diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index f0eb37a14dab..a8f70c916fdb 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -67,7 +67,8 @@ void bch_btree_verify(struct btree *b) if (inmemory->keys != sorted->keys || memcmp(inmemory->start, sorted->start, - (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { + (void *) bset_bkey_last(inmemory) - + (void *) inmemory->start)) { struct bset *i; unsigned int j; diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 8f5de61e1a90..cb3b2c613ed6 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -577,7 +577,9 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r) ~((uint64_t)1 << 63); } -static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) +static bool bch_extent_merge(struct btree_keys *bk, + struct bkey *l, + struct bkey *r) { struct btree *b = container_of(bk, struct btree, keys); unsigned int i; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index a70126466fa8..6116bbf870d8 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -193,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); l < ca->sb.njournal_buckets; - l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) + l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, + l + 1)) if (read_bucket(l)) goto bsearch; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d7e6ee3d7dd2..858dd3da9dc5 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -136,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl) bio->bi_iter.bi_size -= sectors << 9; bch_keylist_add(&op->insert_keys, - &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); + &KEY(op->inode, + bio->bi_iter.bi_sector, + sectors)); } op->insert_data_done = true; @@ -815,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); - s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; + s->iop.bio->bi_iter.bi_sector = + s->cache_miss->bi_iter.bi_sector; bio_copy_dev(s->iop.bio, s->cache_miss); s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 6a80069650c7..296fc8c31c6c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -149,7 +149,8 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, goto err; err = "Invalid superblock: device too small"; - if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) + if (get_capacity(bdev->bd_disk) < + sb->bucket_size * sb->nbuckets) goto err; err = "Bad UUID"; @@ -600,7 +601,8 @@ static void prio_read(struct cache *ca, uint64_t bucket) prio_io(ca, bucket, REQ_OP_READ, 0); - if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) + if (p->csum != + bch_crc64(&p->magic, bucket_bytes(ca) - 8)) pr_warn("bad csum reading priorities"); if (p->magic != pset_magic(&ca->sb)) @@ -1740,8 +1742,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || mempool_init_slab_pool(&c->search, 32, bch_search_cache) || mempool_init_kmalloc_pool(&c->bio_meta, 2, - sizeof(struct bbio) + sizeof(struct bio_vec) * - bucket_pages(c)) || + sizeof(struct bbio) + sizeof(struct bio_vec) * + bucket_pages(c)) || mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) || @@ -1813,7 +1815,9 @@ static void run_cache_set(struct cache_set *c) goto err; err = "error reading btree root"; - c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); + c->root = bch_btree_node_get(c, NULL, k, + j->btree_level, + true, NULL); if (IS_ERR_OR_NULL(c->root)) goto err; @@ -2107,7 +2111,9 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto err; } - if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { + if (kobject_add(&ca->kobj, + &part_to_dev(bdev->bd_part)->kobj, + "bcache")) { err = "error calling kobject_add"; ret = -ENOMEM; goto out; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index ba4cd7efca8e..f0faaeaec57f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -130,8 +130,10 @@ rw_attribute(btree_shrinker_disabled); rw_attribute(copy_gc_enabled); rw_attribute(size); -static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], - size_t selected) +static ssize_t bch_snprint_string_list(char *buf, + size_t size, + const char * const list[], + size_t selected) { char *out = buf; size_t i; @@ -341,8 +343,9 @@ STORE(__cached_dev) add_uevent_var(env, "DRIVER=bcache"); add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), add_uevent_var(env, "CACHED_LABEL=%s", buf); - kobject_uevent_env( - &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); + kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, + KOBJ_CHANGE, + env->envp); kfree(env); } diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 707d7f0c034e..4e0ed19e32d3 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -402,7 +402,8 @@ do { \ __print_time_stat(stats, name, \ average_duration, duration_units); \ sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ - div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\ + div_u64((stats)->max_duration, \ + NSEC_PER_ ## duration_units)); \ \ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ ? div_s64(local_clock() - (stats)->last, \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 44f1b0f1f4d9..e40bf0e403e7 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -444,7 +444,8 @@ static void read_dirty(struct cached_dev *dc) io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) * - DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), + DIV_ROUND_UP(KEY_SIZE(&w->key), + PAGE_SECTORS), GFP_KERNEL); if (!io) goto err; @@ -540,7 +541,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, static bool dirty_pred(struct keybuf *buf, struct bkey *k) { - struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); + struct cached_dev *dc = container_of(buf, + struct cached_dev, + writeback_keys); BUG_ON(KEY_INODE(k) != dc->disk.id); -- cgit v1.2.3 From 958bf494ecf0a6755309569a9083bf123d6e3754 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:48 +0800 Subject: bcache: replace Symbolic permissions by octal permission numbers Symbolic permission names are used in bcache, for now octal permission numbers are encouraged to use for readability. This patch replaces all symbolic permissions by octal permission numbers. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 4 ++-- drivers/md/bcache/sysfs.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 031a75a25d3e..66e6d5639b38 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -881,11 +881,11 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) #define BUCKET_GC_GEN_MAX 96U #define kobj_attribute_write(n, fn) \ - static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) + static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) #define kobj_attribute_rw(n, show, store) \ static struct kobj_attribute ksysfs_##n = \ - __ATTR(n, S_IWUSR|S_IRUSR, show, store) + __ATTR(n, 0600, show, store) static inline void wake_up_allocators(struct cache_set *c) { diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h index b54fe9602529..3fe82425859c 100644 --- a/drivers/md/bcache/sysfs.h +++ b/drivers/md/bcache/sysfs.h @@ -44,9 +44,9 @@ STORE(fn) \ static struct attribute sysfs_##_name = \ { .name = #_name, .mode = _mode } -#define write_attribute(n) __sysfs_attribute(n, S_IWUSR) -#define read_attribute(n) __sysfs_attribute(n, S_IRUGO) -#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR) +#define write_attribute(n) __sysfs_attribute(n, 0200) +#define read_attribute(n) __sysfs_attribute(n, 0444) +#define rw_attribute(n) __sysfs_attribute(n, 0644) #define sysfs_printf(file, fmt, ...) \ do { \ -- cgit v1.2.3 From 6ae63e3501c493616612b90be26b35a6084fc75d Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:49 +0800 Subject: bcache: replace printk() by pr_*() routines There are still many places in bcache use printk to display kernel message, which are suggested to be preplaced by pr_*() routines like pr_err(), pr_info(), or pr_notice(). This patch replaces all printk() with a proper pr_*() routine for bcache code. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.c | 8 ++++---- drivers/md/bcache/debug.c | 10 +++++----- drivers/md/bcache/extents.c | 8 ++++---- drivers/md/bcache/super.c | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 19b4febe5b45..b6a3f9d291a9 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -25,18 +25,18 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) for (k = i->start; k < bset_bkey_last(i); k = next) { next = bkey_next(k); - printk(KERN_ERR "block %u key %u/%u: ", set, + pr_err("block %u key %u/%u: ", set, (unsigned int) ((u64 *) k - i->d), i->keys); if (b->ops->key_dump) b->ops->key_dump(b, k); else - printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); + pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); if (next < bset_bkey_last(i) && bkey_cmp(k, b->ops->is_extents ? &START_KEY(next) : next) > 0) - printk(KERN_ERR "Key skipped backwards\n"); + pr_err("Key skipped backwards\n"); } } @@ -482,7 +482,7 @@ void inorder_test(void) unsigned int i = 1, j = rounddown_pow_of_two(size - 1); if (!(size % 4096)) - printk(KERN_NOTICE "loop %u, %llu per us\n", size, + pr_notice("loop %u, %llu per us\n", size, done / ktime_us_delta(ktime_get(), start)); while (1) { diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index a8f70c916fdb..06da66b2488a 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -74,28 +74,28 @@ void bch_btree_verify(struct btree *b) console_lock(); - printk(KERN_ERR "*** in memory:\n"); + pr_err("*** in memory:\n"); bch_dump_bset(&b->keys, inmemory, 0); - printk(KERN_ERR "*** read back in:\n"); + pr_err("*** read back in:\n"); bch_dump_bset(&v->keys, sorted, 0); for_each_written_bset(b, ondisk, i) { unsigned int block = ((void *) i - (void *) ondisk) / block_bytes(b->c); - printk(KERN_ERR "*** on disk block %u:\n", block); + pr_err("*** on disk block %u:\n", block); bch_dump_bset(&b->keys, i, block); } - printk(KERN_ERR "*** block %zu not written\n", + pr_err("*** block %zu not written\n", ((void *) i - (void *) ondisk) / block_bytes(b->c)); for (j = 0; j < inmemory->keys; j++) if (inmemory->d[j] != sorted->d[j]) break; - printk(KERN_ERR "b->written %u\n", b->written); + pr_err("b->written %u\n", b->written); console_unlock(); panic("verify failed at %u\n", j); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index cb3b2c613ed6..c809724e6571 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -130,18 +130,18 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) char buf[80]; bch_extent_to_text(buf, sizeof(buf), k); - printk(" %s", buf); + pr_err(" %s", buf); for (j = 0; j < KEY_PTRS(k); j++) { size_t n = PTR_BUCKET_NR(b->c, k, j); - printk(" bucket %zu", n); + pr_err(" bucket %zu", n); if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) - printk(" prio %i", + pr_err(" prio %i", PTR_BUCKET(b->c, k, j)->prio); } - printk(" %s\n", bch_ptr_status(b->c, k)); + pr_err(" %s\n", bch_ptr_status(b->c, k)); } /* Btree ptrs */ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 296fc8c31c6c..2a7be104557e 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1472,13 +1472,13 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) acquire_console_sem(); */ - printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); + pr_err("bcache: error on %pU: ", c->sb.set_uuid); va_start(args, fmt); vprintk(fmt, args); va_end(args); - printk(", disabling caching\n"); + pr_err(", disabling caching\n"); if (c->on_error == ON_ERROR_PANIC) panic("panic forced after error\n"); -- cgit v1.2.3 From c63ca7871aa3f1777e2af978ea1967b2f5bf0ae2 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:50 +0800 Subject: bcache: fix indent by replacing blank by tabs bch_btree_insert_check_key() has unaligned indent, or indent by blank characters. This patch makes the indent aligned and replace blank by tabs. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index c2e4174a2e03..e7d4817681f2 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2231,10 +2231,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, rw_lock(true, b, b->level); if (b->key.ptr[0] != btree_ptr || - b->seq != seq + 1) { + b->seq != seq + 1) { op->lock = b->level; goto out; - } + } } SET_KEY_PTRS(check_key, 1); -- cgit v1.2.3 From d9c61d30e86a024b2f7c728706b8ab2c9e6ec2c1 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:51 +0800 Subject: bcache: replace '%pF' by '%pS' in seq_printf() '%pF' and '%pf' are deprecated vsprintf pointer extensions, this patch replace them by '%pS', which is suggested by checkpatch.pl. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/closure.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 8570fc426e31..034067e0e9ce 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -168,7 +168,7 @@ static int debug_seq_show(struct seq_file *f, void *data) list_for_each_entry(cl, &closure_list, all) { int r = atomic_read(&cl->remaining); - seq_printf(f, "%p: %pF -> %pf p %p r %i ", + seq_printf(f, "%p: %pS -> %pS p %p r %i ", cl, (void *) cl->ip, cl->fn, cl->parent, r & CLOSURE_REMAINING_MASK); @@ -178,7 +178,7 @@ static int debug_seq_show(struct seq_file *f, void *data) r & CLOSURE_RUNNING ? "R" : ""); if (r & CLOSURE_WAITING) - seq_printf(f, " W %pF\n", + seq_printf(f, " W %pS\n", (void *) cl->waiting_on); seq_printf(f, "\n"); -- cgit v1.2.3 From 2b1edd23ecc6f5839d107a723a282a73bf00df3f Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:52 +0800 Subject: bcache: fix typo 'succesfully' to 'successfully' This patch fixes typo 'succesfully' to correct 'successfully', which is suggested by checkpatch.pl. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/io.c | 2 +- drivers/md/bcache/request.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index cfc56add799a..c25097968319 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -86,7 +86,7 @@ void bch_count_io_errors(struct cache *ca, /* * First we subtract refresh from count; each time we - * succesfully do so, we rescale the errors once: + * successfully do so, we rescale the errors once: */ count = atomic_cmpxchg(&ca->io_count, old, new); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 858dd3da9dc5..55264e71369d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -154,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl) /* * Our data write just errored, which means we've got a bunch of keys to - * insert that point to data that wasn't succesfully written. + * insert that point to data that wasn't successfully written. * * We don't have to insert those keys but we still have to invalidate * that region of the cache - so, if we just strip off all the pointers -- cgit v1.2.3 From bc81b47e828adfc3631e616fddc6b2203876b65d Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:53 +0800 Subject: bcache: prefer 'help' in Kconfig Current bcache Kconfig uses '---help---' as header of help information, for now 'help' is prefered. This patch fixes this style by replacing '---help---' by 'help' in bcache Kconfig file. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 17bf109c58e9..817b9fba50db 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -1,7 +1,7 @@ config BCACHE tristate "Block device as cache" - ---help--- + help Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. @@ -10,7 +10,7 @@ config BCACHE config BCACHE_DEBUG bool "Bcache debugging" depends on BCACHE - ---help--- + help Don't select this option unless you're a developer Enables extra debugging tools, allows expensive runtime checks to be @@ -20,7 +20,7 @@ config BCACHE_CLOSURES_DEBUG bool "Debug closures" depends on BCACHE select DEBUG_FS - ---help--- + help Keeps all active closures in a linked list and provides a debugfs interface to list them, which makes it possible to see asynchronous operations that get stuck. -- cgit v1.2.3 From 3069211be30015b78e096fc4d3774b7d4ba29781 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:54 +0800 Subject: bcache: do not check NULL pointer before calling kmem_cache_destroy kmem_cache_destroy() is safe for NULL pointer as input, the NULL pointer checking is unncessary. This patch just removes the NULL pointer checking to make code simpler. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 55264e71369d..51be355a3309 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1367,8 +1367,7 @@ void bch_flash_dev_request_init(struct bcache_device *d) void bch_request_exit(void) { - if (bch_search_cache) - kmem_cache_destroy(bch_search_cache); + kmem_cache_destroy(bch_search_cache); } int __init bch_request_init(void) -- cgit v1.2.3 From 3be11dbab67a3ed28358a950671de9b8e7fb5a02 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:55 +0800 Subject: bcache: fix code comments style This patch fixes 3 style issues warned by checkpatch.pl, - Comment lines are not aligned - Comments use "/*" on subsequent lines - Comment lines use a trailing "*/" Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.c | 9 ++++++--- drivers/md/bcache/super.c | 22 +++++++++++++--------- drivers/md/bcache/writeback.c | 3 ++- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index b6a3f9d291a9..8f07fa6e1739 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -402,7 +402,8 @@ static unsigned int inorder_prev(unsigned int j, unsigned int size) return j; } -/* I have no idea why this code works... and I'm the one who wrote it +/* + * I have no idea why this code works... and I'm the one who wrote it * * However, I do know what it does: * Given a binary tree constructed in an array (i.e. how you normally implement @@ -795,7 +796,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, if (!t->size) return; - /* k is the key we just inserted; we need to find the entry in the + /* + * k is the key we just inserted; we need to find the entry in the * lookup table for the first key that is strictly greater than k: * it's either k's cacheline or the next one */ @@ -803,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, table_to_bkey(t, j) <= k) j++; - /* Adjust all the lookup table entries, and find a new key for any that + /* + * Adjust all the lookup table entries, and find a new key for any that * have gotten too big */ for (; j < t->size; j++) { diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2a7be104557e..01fc3c015a58 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -465,8 +465,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c) * Bucket priorities/gens: * * For each bucket, we store on disk its - * 8 bit gen - * 16 bit priority + * 8 bit gen + * 16 bit priority * * See alloc.c for an explanation of the gen. The priority is used to implement * lru (and in the future other) cache replacement policies; for most purposes @@ -934,8 +934,10 @@ void bch_cached_dev_run(struct cached_dev *dc) add_disk(d->disk); bd_link_disk_holder(dc->bdev, dc->disk.disk); - /* won't show up in the uevent file, use udevadm monitor -e instead - * only class / kset properties are persistent */ + /* + * won't show up in the uevent file, use udevadm monitor -e instead + * only class / kset properties are persistent + */ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); kfree(env[1]); kfree(env[2]); @@ -1104,8 +1106,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, } } - /* Deadlocks since we're called via sysfs... - sysfs_remove_file(&dc->kobj, &sysfs_attach); + /* + * Deadlocks since we're called via sysfs... + * sysfs_remove_file(&dc->kobj, &sysfs_attach); */ if (bch_is_zero(u->uuid, 16)) { @@ -1468,9 +1471,10 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) pr_info("CACHE_SET_IO_DISABLE already set"); - /* XXX: we can be called from atomic context - acquire_console_sem(); - */ + /* + * XXX: we can be called from atomic context + * acquire_console_sem(); + */ pr_err("bcache: error on %pU: ", c->sb.set_uuid); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index e40bf0e403e7..6be05bd7ca67 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -468,7 +468,8 @@ static void read_dirty(struct cached_dev *dc) down(&dc->in_flight); - /* We've acquired a semaphore for the maximum + /* + * We've acquired a semaphore for the maximum * simultaneous number of writebacks; from here * everything happens asynchronously. */ -- cgit v1.2.3 From e1f08f1bc0134c2ebd655fc63ed82e160c123b7b Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:56 +0800 Subject: bcache: add static const prefix to char * array declarations This patch declares char * array with const prefix in sysfs.c, which is suggested by checkpatch.pl. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index f0faaeaec57f..150cf4f4cf74 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -150,7 +150,7 @@ SHOW(__bch_cached_dev) { struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); - const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; + char const *states[] = { "no cache", "clean", "dirty", "inconsistent" }; int wb = dc->writeback_running; #define var(stat) (dc->stat) -- cgit v1.2.3 From b3cf37bfa1a48acd5620d7be3dea05a4f76d87fd Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:57 +0800 Subject: bcache: move open brace at end of function definitions to next line This is not a preferred style to place open brace '{' at the end of function definition, checkpatch.pl reports error for such coding style. This patch moves them into the start of the next new line. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 01fc3c015a58..e3ecf08a10fc 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2152,7 +2152,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, kobj_attribute_write(register, register_bcache); kobj_attribute_write(register_quiet, register_bcache); -static bool bch_is_open_backing(struct block_device *bdev) { +static bool bch_is_open_backing(struct block_device *bdev) +{ struct cache_set *c, *tc; struct cached_dev *dc, *t; @@ -2166,7 +2167,8 @@ static bool bch_is_open_backing(struct block_device *bdev) { return false; } -static bool bch_is_open_cache(struct block_device *bdev) { +static bool bch_is_open_cache(struct block_device *bdev) +{ struct cache_set *c, *tc; struct cache *ca; unsigned int i; @@ -2178,7 +2180,8 @@ static bool bch_is_open_cache(struct block_device *bdev) { return false; } -static bool bch_is_open(struct block_device *bdev) { +static bool bch_is_open(struct block_device *bdev) +{ return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); } -- cgit v1.2.3 From 87418ef9f07ac6bc00af7992dc1ccd96da46cd68 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:58 +0800 Subject: bcache: add missing SPDX header The SPDX header is missing fro closure.c, super.c and util.c, this patch adds SPDX header for GPL-2.0 into these files. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/closure.c | 1 + drivers/md/bcache/super.c | 1 + drivers/md/bcache/util.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 034067e0e9ce..73f5319295bc 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Asynchronous refcounty things * diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e3ecf08a10fc..e637d74e2908 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * bcache setup/teardown code, and some metadata io - read a superblock and * figure out what to do with it. diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 18016e7bb32c..e873b0f7a82a 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * random utiility code, for bcache but in theory not specific to bcache * -- cgit v1.2.3 From d0c1b89a409fb8f55783e95effeb60d703f81061 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:19:59 +0800 Subject: bcache: remove unnecessary space before ioctl function pointer arguments This is warned by checkpatch.pl, this patch removes the extra space. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 66e6d5639b38..83504dd8100a 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -275,8 +275,8 @@ struct bcache_device { int (*cache_miss)(struct btree *b, struct search *s, struct bio *bio, unsigned int sectors); - int (*ioctl) (struct bcache_device *d, fmode_t mode, - unsigned int cmd, unsigned long arg); + int (*ioctl)(struct bcache_device *d, fmode_t mode, + unsigned int cmd, unsigned long arg); }; struct io { -- cgit v1.2.3 From eb2b3d034511f7f5f5fa781b9354ea4d5dbd4fc5 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 11 Aug 2018 13:20:00 +0800 Subject: bcache: add the missing comments for smp_mb()/smp_wmb() Checkpatch.pl warns there are 2 locations of smp_mb() and smp_wmb() without code comment. This patch adds the missing code comments for these memory barrier calls. Signed-off-by: Coly Li Reviewed-by: Shenghui Wang Signed-off-by: Jens Axboe --- drivers/md/bcache/closure.h | 4 +++- drivers/md/bcache/super.c | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 7f3594c0be14..eca0d496b686 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl) } /** - * closure_wake_up - wake up all closures on a wait list. + * closure_wake_up - wake up all closures on a wait list, + * with memory barrier */ static inline void closure_wake_up(struct closure_waitlist *list) { + /* Memory barrier for the wait list */ smp_mb(); __closure_wake_up(list); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e637d74e2908..94c756c66bd7 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1136,11 +1136,11 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, list_move(&dc->list, &c->cached_devs); calc_cached_dev_sectors(c); - smp_wmb(); /* * dc->c must be set before dc->count != 0 - paired with the mb in * cached_dev_get() */ + smp_wmb(); refcount_set(&dc->count, 1); /* Block writeback thread, but spawn it */ -- cgit v1.2.3 From b089cfd95d32638335c551651a8e00fd2c4edb0b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 14 Aug 2018 10:52:40 -0600 Subject: block: don't warn for flush on read-only device Don't warn for a flush issued to a read-only device. It's not strictly a writable command, as it doesn't change any on-media data by itself. Reported-by: Stefan Agner Fixes: 721c7fc701c7 ("block: fail op_is_write() requests to read-only partitions") Signed-off-by: Jens Axboe --- block/blk-core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 49af34bf2119..7aeef19704f2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2162,7 +2162,9 @@ static inline bool should_fail_request(struct hd_struct *part, static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) { - if (part->policy && op_is_write(bio_op(bio))) { + const int op = bio_op(bio); + + if (part->policy && (op_is_write(op) && !op_is_flush(op))) { char b[BDEVNAME_SIZE]; printk(KERN_ERR -- cgit v1.2.3 From df60f6e835f763258a06cdbb5690a2e35c1aac4e Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 14 Aug 2018 23:57:49 +0800 Subject: blk-wbt: fix IO hang in wbt_wait() On wbt invariant is that if one IO is tracked via WBT_TRACKED, rqw->inflight should be updated for tracking this IO. But commit c1c80384c8f ("block: remove external dependency on wbt_flags") forgets to remove the early handling of !rwb_enabled(rwb) inside wbt_wait(), then the inflight counter may not be increased in wbt_wait(), but decreased in wbt_done() for this kind of IO, so this counter may become negative, then wbt_wait() may wait forever. This patch fixes the report in the following link: https://marc.info/?l=linux-block&m=153221542021033&w=2 Fixes: c1c80384c8f ("block: remove external dependency on wbt_flags") Cc: Josef Bacik Reported-by: Ming Lei Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-wbt.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 1d94a20374fc..bb93c7c2b182 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) struct rq_wb *rwb = RQWB(rqos); enum wbt_flags flags; - if (!rwb_enabled(rwb)) - return; - flags = bio_to_wbt_flags(rwb, bio); - - if (!wbt_should_throttle(rwb, bio)) { + if (!(flags & WBT_TRACKED)) { if (flags & WBT_READ) wb_timestamp(rwb, &rwb->last_issue); return; -- cgit v1.2.3 From 8a511ba5feec5947bee0635cff8bd796d5f77e3e Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Thu, 16 Aug 2018 18:51:15 +0200 Subject: block, bfq: readd missing reset of parent-entity service The received-service counter needs to be equal to 0 when an entity is set in service. Unfortunately, commit "block, bfq: fix service being wrongly set to zero in case of preemption" mistakenly removed the resetting of this counter for the parent entities of the bfq_queue being set in service. This commit fixes this issue by resetting service for parent entities, directly on the expiration of the in-service bfq_queue. Fixes: 9fae8dd59ff3 ("block, bfq: fix service being wrongly set to zero in case of preemption") Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 41d9036b1822..62efc1b97afb 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3298,6 +3298,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, */ } else entity->service = 0; + + /* + * Reset the received-service counter for every parent entity. + * Differently from what happens with bfqq->entity.service, + * the resetting of this counter never needs to be postponed + * for parent entities. In fact, in case bfqq may have a + * chance to go on being served using the last, partially + * consumed budget, bfqq->entity.service needs to be kept, + * because if bfqq then actually goes on being served using + * the same budget, the last value of bfqq->entity.service is + * needed to properly decrement bfqq->entity.budget by the + * portion already consumed. In contrast, it is not necessary + * to keep entity->service for parent entities too, because + * the bubble up of the new value of bfqq->entity.budget will + * make sure that the budgets of parent entities are correct, + * even in case bfqq and thus parent entities go on receiving + * service with the same budget. + */ + entity = entity->parent; + for_each_entity(entity) + entity->service = 0; } /* -- cgit v1.2.3 From e02a0aa26bf61b6e481a3d7453a150e692b0df80 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Thu, 16 Aug 2018 18:51:16 +0200 Subject: block, bfq: always update the budget of an entity when needed When the next child entity to serve changes for a given parent entity, the budget of that parent entity must be updated accordingly. Unfortunately, this update is not performed, by mistake, for the entities that happen to switch from having no child entity to serve, to having one child entity to serve. Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-wf2q.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index dbc07b456059..d558fd26740c 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, if (!change_without_lookup) /* lookup needed */ next_in_service = bfq_lookup_next_entity(sd, expiration); - if (next_in_service) - parent_sched_may_change = !sd->next_in_service || + if (next_in_service) { + bool new_budget_triggers_change = bfq_update_parent_budget(next_in_service); + parent_sched_may_change = !sd->next_in_service || + new_budget_triggers_change; + } + sd->next_in_service = next_in_service; if (!next_in_service) -- cgit v1.2.3 From d5801088a7bd210dd8fd7add04745e35f0f6ea72 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Thu, 16 Aug 2018 18:51:17 +0200 Subject: block, bfq: reduce write overcharge When a sync request is dispatched, the queue that contains that request, and all the ancestor entities of that queue, are charged with the number of sectors of the request. In constrast, if the request is async, then the queue and its ancestor entities are charged with the number of sectors of the request, multiplied by an overcharge factor. This throttles the bandwidth for async I/O, w.r.t. to sync I/O, and it is done to counter the tendency of async writes to steal I/O throughput to reads. On the opposite end, the lower this parameter, the stabler I/O control, in the following respect. The lower this parameter is, the less the bandwidth enjoyed by a group decreases - when the group does writes, w.r.t. to when it does reads; - when other groups do reads, w.r.t. to when they do writes. The fixes "block, bfq: always update the budget of an entity when needed" and "block, bfq: readd missing reset of parent-entity service" improved I/O control in bfq to such an extent that it has been possible to revise this overcharge factor downwards. This commit introduces the resulting, new value. Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 62efc1b97afb..653100fb719e 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194; static const int bfq_default_max_budget = 16 * 1024; /* - * Async to sync throughput distribution is controlled as follows: - * when an async request is served, the entity is charged the number - * of sectors of the request, multiplied by the factor below + * When a sync request is dispatched, the queue that contains that + * request, and all the ancestor entities of that queue, are charged + * with the number of sectors of the request. In constrast, if the + * request is async, then the queue and its ancestor entities are + * charged with the number of sectors of the request, multiplied by + * the factor below. This throttles the bandwidth for async I/O, + * w.r.t. to sync I/O, and it is done to counter the tendency of async + * writes to steal I/O throughput to reads. + * + * The current value of this parameter is the result of a tuning with + * several hardware and software configurations. We tried to find the + * lowest value for which writes do not cause noticeable problems to + * reads. In fact, the lower this parameter, the stabler I/O control, + * in the following respect. The lower this parameter is, the less + * the bandwidth enjoyed by a group decreases + * - when the group does writes, w.r.t. to when it does reads; + * - when other groups do reads, w.r.t. to when they do writes. */ -static const int bfq_async_charge_factor = 10; +static const int bfq_async_charge_factor = 3; /* Default timeout values, in jiffies, approximating CFQ defaults. */ const int bfq_timeout = HZ / 8; @@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq, if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) return blk_rq_sectors(rq); - /* - * If there are no weight-raised queues, then amplify service - * by just the async charge factor; otherwise amplify service - * by twice the async charge factor, to further reduce latency - * for weight-raised queues. - */ - if (bfqq->bfqd->wr_busy_queues == 0) - return blk_rq_sectors(rq) * bfq_async_charge_factor; - - return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor; + return blk_rq_sectors(rq) * bfq_async_charge_factor; } /** -- cgit v1.2.3 From f812164869b98d81d692ce12bfed76621a5c8908 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Thu, 16 Aug 2018 18:51:18 +0200 Subject: block, bfq: improve code of bfq_bfqq_charge_time bfq_bfqq_charge_time contains some lengthy and redundant code. This commit trims and condenses that code. Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-wf2q.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index d558fd26740c..ae52bff43ce4 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -881,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, unsigned long time_ms) { struct bfq_entity *entity = &bfqq->entity; - int tot_serv_to_charge = entity->service; - unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout); - - if (time_ms > 0 && time_ms < timeout_ms) - tot_serv_to_charge = - (bfqd->bfq_max_budget * time_ms) / timeout_ms; - - if (tot_serv_to_charge < entity->service) - tot_serv_to_charge = entity->service; + unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout); + unsigned long bounded_time_ms = min(time_ms, timeout_ms); + int serv_to_charge_for_time = + (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms; + int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service); /* Increase budget to avoid inconsistencies */ if (tot_serv_to_charge > entity->budget) -- cgit v1.2.3 From fc8ebd01deeb12728c83381f6ec923e4a192ffd3 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Wed, 15 Aug 2018 23:56:45 +0200 Subject: block, bfq: return nbytes and not zero from struct cftype .write() method The value that struct cftype .write() method returns is then directly returned to userspace as the value returned by write() syscall, so it should be the number of bytes actually written (or consumed) and not zero. Returning zero from write() syscall makes programs like /bin/echo or bash spin. Signed-off-by: Maciej S. Szmigiero Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support") Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index a9e8633388f4..58c6efa9f9a9 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, if (ret) return ret; - return bfq_io_set_weight_legacy(of_css(of), NULL, weight); + ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); + return ret ?: nbytes; } #ifdef CONFIG_DEBUG_BLK_CGROUP -- cgit v1.2.3 From 599d067dd3c1b9697b786c992b17cd6529c0459c Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Thu, 16 Aug 2018 22:51:40 +0800 Subject: block: change return type to bool Because blk_do_io_stat() only does a judgement about the request contributes to IO statistics, it better changes return type to bool. Signed-off-by: Chengguang Xu Signed-off-by: Jens Axboe --- block/blk.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk.h b/block/blk.h index d4d67e948920..644975e85053 100644 --- a/block/blk.h +++ b/block/blk.h @@ -297,7 +297,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int); * b) the queue had IO stats enabled when this request was started, and * c) it's a file system request */ -static inline int blk_do_io_stat(struct request *rq) +static inline bool blk_do_io_stat(struct request *rq) { return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT) && -- cgit v1.2.3 From 566484a9e84c89d9875e8d5d1aac3dbc2d8f0ff4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 16 Aug 2018 14:09:28 -0600 Subject: pktcdvd: fix setting of 'ret' error return for a few cases We initialize it to -ENOMEM, but then later overwrite it. After overwriting, we don't set it again for two later failure cases. Reported-by: Jason Wood Signed-off-by: Jens Axboe --- drivers/block/pktcdvd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index e285413d4a75..6f1d25c1eb64 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) pd->write_congestion_on = write_congestion_on; pd->write_congestion_off = write_congestion_off; + ret = -ENOMEM; disk = alloc_disk(1); if (!disk) goto out_mem; -- cgit v1.2.3 From 757d9140072054528b13bbe291583d9823cde195 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 16 Aug 2018 16:08:37 -0400 Subject: tracing/blktrace: Fix to allow setting same value Masami Hiramatsu reported: Current trace-enable attribute in sysfs returns an error if user writes the same setting value as current one, e.g. # cat /sys/block/sda/trace/enable 0 # echo 0 > /sys/block/sda/trace/enable bash: echo: write error: Invalid argument # echo 1 > /sys/block/sda/trace/enable # echo 1 > /sys/block/sda/trace/enable bash: echo: write error: Device or resource busy But this is not a preferred behavior, it should ignore if new setting is same as current one. This fixes the problem as below. # cat /sys/block/sda/trace/enable 0 # echo 0 > /sys/block/sda/trace/enable # echo 1 > /sys/block/sda/trace/enable # echo 1 > /sys/block/sda/trace/enable Link: http://lkml.kernel.org/r/20180816103802.08678002@gandalf.local.home Cc: Ingo Molnar Cc: Jens Axboe Cc: linux-block@vger.kernel.org Cc: stable@vger.kernel.org Fixes: cd649b8bb830d ("blktrace: remove sysfs_blk_trace_enable_show/store()") Reported-by: Masami Hiramatsu Tested-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b951aa1fac61..96457ad8d720 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1841,6 +1841,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, mutex_lock(&q->blk_trace_mutex); if (attr == &dev_attr_enable) { + if (!!value == !!q->blk_trace) { + ret = 0; + goto out_unlock_bdev; + } if (value) ret = blk_trace_setup_queue(q, bdev); else -- cgit v1.2.3 From fcedba42d94ecdc14ca13d3797cba1ccbf743fa4 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Thu, 16 Aug 2018 15:45:29 -0700 Subject: block: remove duplicate initialization This patch removes the duplicate initialization of q->queue_head in the blk_alloc_queue_node(). This removes the 2nd initialization so that we preserve the initialization order same as declaration present in struct request_queue. Reviewed-by: Omar Sandoval Signed-off-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- block/blk-core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 7aeef19704f2..5832c4003cfb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); INIT_WORK(&q->timeout_work, NULL); - INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP -- cgit v1.2.3 From d48ece209f82c9ce07be942441b53d3fa3664936 Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Tue, 21 Aug 2018 15:15:03 +0800 Subject: blk-mq: init hctx sched after update ctx and hctx mapping Currently, when update nr_hw_queues, IO scheduler's init_hctx will be invoked before the mapping between ctx and hctx is adapted correctly by blk_mq_map_swqueue. The IO scheduler init_hctx (kyber) may depend on this mapping and get wrong result and panic finally. A simply way to fix this is that switch the IO scheduler to 'none' before update the nr_hw_queues, and then switch it back after update nr_hw_queues. blk_mq_sched_init_/exit_hctx are removed due to nobody use them any more. Signed-off-by: Jianchao Wang Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 44 ------------------------- block/blk-mq-sched.h | 5 --- block/blk-mq.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++----- block/blk.h | 2 ++ block/elevator.c | 20 +++++++----- 5 files changed, 98 insertions(+), 65 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index cf9c66c6d35a..29bfe8017a2d 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) blk_mq_sched_free_tags(set, hctx, i); } -int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx) -{ - struct elevator_queue *e = q->elevator; - int ret; - - if (!e) - return 0; - - ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); - if (ret) - return ret; - - if (e->type->ops.mq.init_hctx) { - ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); - if (ret) { - blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); - return ret; - } - } - - blk_mq_debugfs_register_sched_hctx(q, hctx); - - return 0; -} - -void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx) -{ - struct elevator_queue *e = q->elevator; - - if (!e) - return; - - blk_mq_debugfs_unregister_sched_hctx(hctx); - - if (e->type->ops.mq.exit_hctx && hctx->sched_data) { - e->type->ops.mq.exit_hctx(hctx, hctx_idx); - hctx->sched_data = NULL; - } - - blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); -} - int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { struct blk_mq_hw_ctx *hctx; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 0cb8f938dff9..4e028ee42430 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); -int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx); -void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx); - static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 5efd789910e2..9c8c8c71a13f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2147,8 +2147,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, if (set->ops->exit_request) set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); - blk_mq_sched_exit_hctx(q, hctx, hctx_idx); - if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -2216,12 +2214,9 @@ static int blk_mq_init_hctx(struct request_queue *q, set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto free_bitmap; - if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) - goto exit_hctx; - hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); if (!hctx->fq) - goto sched_exit_hctx; + goto exit_hctx; if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) goto free_fq; @@ -2235,8 +2230,6 @@ static int blk_mq_init_hctx(struct request_queue *q, free_fq: kfree(hctx->fq); - sched_exit_hctx: - blk_mq_sched_exit_hctx(q, hctx, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -2898,10 +2891,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) return ret; } +/* + * request_queue and elevator_type pair. + * It is just used by __blk_mq_update_nr_hw_queues to cache + * the elevator_type associated with a request_queue. + */ +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +/* + * Cache the elevator_type in qe pair list and switch the + * io scheduler to 'none' + */ +static bool blk_mq_elv_switch_none(struct list_head *head, + struct request_queue *q) +{ + struct blk_mq_qe_pair *qe; + + if (!q->elevator) + return true; + + qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); + if (!qe) + return false; + + INIT_LIST_HEAD(&qe->node); + qe->q = q; + qe->type = q->elevator->type; + list_add(&qe->node, head); + + mutex_lock(&q->sysfs_lock); + /* + * After elevator_switch_mq, the previous elevator_queue will be + * released by elevator_release. The reference of the io scheduler + * module get by elevator_get will also be put. So we need to get + * a reference of the io scheduler module here to prevent it to be + * removed. + */ + __module_get(qe->type->elevator_owner); + elevator_switch_mq(q, NULL); + mutex_unlock(&q->sysfs_lock); + + return true; +} + +static void blk_mq_elv_switch_back(struct list_head *head, + struct request_queue *q) +{ + struct blk_mq_qe_pair *qe; + struct elevator_type *t = NULL; + + list_for_each_entry(qe, head, node) + if (qe->q == q) { + t = qe->type; + break; + } + + if (!t) + return; + + list_del(&qe->node); + kfree(qe); + + mutex_lock(&q->sysfs_lock); + elevator_switch_mq(q, t); + mutex_unlock(&q->sysfs_lock); +} + static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { struct request_queue *q; + LIST_HEAD(head); lockdep_assert_held(&set->tag_list_lock); @@ -2912,6 +2976,14 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); + /* + * Switch IO scheduler to 'none', cleaning up the data associated + * with the previous scheduler. We will switch back once we are done + * updating the new sw to hw queue mappings. + */ + list_for_each_entry(q, &set->tag_list, tag_set_list) + if (!blk_mq_elv_switch_none(&head, q)) + goto switch_back; set->nr_hw_queues = nr_hw_queues; blk_mq_update_queue_map(set); @@ -2920,6 +2992,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_queue_reinit(q); } +switch_back: + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_elv_switch_back(&head, q); + list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); } diff --git a/block/blk.h b/block/blk.h index 644975e85053..9db4e389582c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq int elevator_init(struct request_queue *); int elevator_init_mq(struct request_queue *q); +int elevator_switch_mq(struct request_queue *q, + struct elevator_type *new_e); void elevator_exit(struct request_queue *, struct elevator_queue *); int elv_register_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q); diff --git a/block/elevator.c b/block/elevator.c index fa828b5bfd4b..5ea6e7d600e4 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e) } EXPORT_SYMBOL_GPL(elv_unregister); -static int elevator_switch_mq(struct request_queue *q, +int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e) { int ret; lockdep_assert_held(&q->sysfs_lock); - blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); - if (q->elevator) { if (q->elevator->registered) elv_unregister_queue(q); @@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q, blk_add_trace_msg(q, "elv switch: none"); out: - blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); return ret; } @@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) lockdep_assert_held(&q->sysfs_lock); - if (q->mq_ops) - return elevator_switch_mq(q, new_e); + if (q->mq_ops) { + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + + err = elevator_switch_mq(q, new_e); + + blk_mq_unquiesce_queue(q); + blk_mq_unfreeze_queue(q); + + return err; + } /* * Turn on BYPASS and drain all requests w/ elevator private data. -- cgit v1.2.3 From f5bbbbe4d63577026f908a809f22f5fd5a90ea1f Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Tue, 21 Aug 2018 15:15:04 +0800 Subject: blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to account the inflight requests. It will access the queue_hw_ctx and nr_hw_queues w/o any protection. When updating nr_hw_queues and blk_mq_in_flight/rw occur concurrently, panic comes up. Before update nr_hw_queues, the q will be frozen. So we could use q_usage_counter to avoid the race. percpu_ref_is_zero is used here so that we will not miss any in-flight request. The access to nr_hw_queues and queue_hw_ctx in blk_mq_queue_tag_busy_iter are under rcu critical section, __blk_mq_update_nr_hw_queues could use synchronize_rcu to ensure the zeroed q_usage_counter to be globally visible. Signed-off-by: Jianchao Wang Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 14 +++++++++++++- block/blk-mq.c | 4 ++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c0c4e63583ae..8c5cc115b3f8 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, struct blk_mq_hw_ctx *hctx; int i; + /* + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and + * queue_hw_ctx after freeze the queue. So we could use q_usage_counter + * to avoid race with it. __blk_mq_update_nr_hw_queues will users + * synchronize_rcu to ensure all of the users go out of the critical + * section below and see zeroed q_usage_counter. + */ + rcu_read_lock(); + if (percpu_ref_is_zero(&q->q_usage_counter)) { + rcu_read_unlock(); + return; + } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } - + rcu_read_unlock(); } static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, diff --git a/block/blk-mq.c b/block/blk-mq.c index 9c8c8c71a13f..81cb84b17b73 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2976,6 +2976,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); + /* + * Sync with blk_mq_queue_tag_busy_iter. + */ + synchronize_rcu(); /* * Switch IO scheduler to 'none', cleaning up the data associated * with the previous scheduler. We will switch back once we are done -- cgit v1.2.3 From 1e7da865b8c0428b9bcb18ba05ba0f6f47bcfdb4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 21 Aug 2018 10:12:48 +0100 Subject: block/DAC960.c: make some arrays static const, shrinks object size Don't populate the arrays ReadCacheStatus, WriteCacheStatus and SenseErrors on the stack but instead make them static const. Makes the object code smaller by 47 bytes: Before: text data bss dec hex filename 160974 34628 832 196434 2ff52 drivers/block/DAC960.o After: text data bss dec hex filename 160671 34884 832 196387 2ff23 drivers/block/DAC960.o (gcc version 8.2.0 x86_64) Signed-off-by: Colin Ian King Signed-off-by: Jens Axboe --- drivers/block/DAC960.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index f99e5c883368..581312ac375f 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T { DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = Controller->V2.LogicalDeviceInformation[LogicalDriveNumber]; - unsigned char *ReadCacheStatus[] = { "Read Cache Disabled", - "Read Cache Enabled", - "Read Ahead Enabled", - "Intelligent Read Ahead Enabled", - "-", "-", "-", "-" }; - unsigned char *WriteCacheStatus[] = { "Write Cache Disabled", - "Logical Device Read Only", - "Write Cache Enabled", - "Intelligent Write Cache Enabled", - "-", "-", "-", "-" }; + static const unsigned char *ReadCacheStatus[] = { + "Read Cache Disabled", + "Read Cache Enabled", + "Read Ahead Enabled", + "Intelligent Read Ahead Enabled", + "-", "-", "-", "-" + }; + static const unsigned char *WriteCacheStatus[] = { + "Write Cache Disabled", + "Logical Device Read Only", + "Write Cache Enabled", + "Intelligent Write Cache Enabled", + "-", "-", "-", "-" + }; unsigned char *GeometryTranslation; if (LogicalDeviceInfo == NULL) continue; switch (LogicalDeviceInfo->DriveGeometry) @@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command) static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command) { DAC960_Controller_T *Controller = Command->Controller; - unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR", - "NOT READY", "MEDIUM ERROR", - "HARDWARE ERROR", "ILLEGAL REQUEST", - "UNIT ATTENTION", "DATA PROTECT", - "BLANK CHECK", "VENDOR-SPECIFIC", - "COPY ABORTED", "ABORTED COMMAND", - "EQUAL", "VOLUME OVERFLOW", - "MISCOMPARE", "RESERVED" }; + static const unsigned char *SenseErrors[] = { + "NO SENSE", "RECOVERED ERROR", + "NOT READY", "MEDIUM ERROR", + "HARDWARE ERROR", "ILLEGAL REQUEST", + "UNIT ATTENTION", "DATA PROTECT", + "BLANK CHECK", "VENDOR-SPECIFIC", + "COPY ABORTED", "ABORTED COMMAND", + "EQUAL", "VOLUME OVERFLOW", + "MISCOMPARE", "RESERVED" + }; unsigned char *CommandName = "UNKNOWN"; switch (Command->CommandType) { -- cgit v1.2.3