diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-12-17 21:56:21 -0800 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2014-01-08 13:05:12 -0800 |
commit | fafff81cead78157099df1ee10af16cc51893ddc (patch) | |
tree | a198145a2ac94431667e82e2eb09c1365fb94b84 /drivers/md | |
parent | 085d2a3dd4d65b7bce1dead987c647dbbc014281 (diff) | |
download | linux-fafff81cead78157099df1ee10af16cc51893ddc.tar.bz2 |
bcache: Bkey indexing renaming
More refactoring:
node() -> bset_bkey_idx()
end() -> bset_bkey_last()
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/bcache.h | 11 | ||||
-rw-r--r-- | drivers/md/bcache/bset.c | 28 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 30 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 33 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 6 |
6 files changed, 62 insertions, 52 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 3fd87323368c..2b46c86ac440 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -724,9 +724,6 @@ struct bbio { #define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c)) #define set_blocks(i, c) __set_blocks(i, (i)->keys, c) -#define node(i, j) ((struct bkey *) ((i)->d + (j))) -#define end(i) node(i, (i)->keys) - #define btree_data_space(b) (PAGE_SIZE << (b)->page_order) #define prios_per_bucket(c) \ @@ -791,18 +788,14 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k, /* Btree key macros */ -static inline void bkey_init(struct bkey *k) -{ - *k = ZERO_KEY; -} - /* * This is used for various on disk data structures - cache_sb, prio_set, bset, * jset: The checksum is _always_ the first 8 bytes of these structs */ #define csum_set(i) \ bch_crc64(((void *) (i)) + sizeof(uint64_t), \ - ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) + ((void *) bset_bkey_last(i)) - \ + (((void *) (i)) + sizeof(uint64_t))) /* Error handling macros */ diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index f91347a55c41..bfee926e35f0 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -500,7 +500,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j) : tree_to_prev_bkey(t, j >> ffs(j)); struct bkey *r = is_power_of_2(j + 1) - ? node(t->data, t->data->keys - bkey_u64s(&t->end)) + ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) : tree_to_bkey(t, j >> (ffz(j) + 1)); BUG_ON(m < l || m > r); @@ -559,7 +559,7 @@ static void bset_build_written_tree(struct btree *b) bset_alloc_tree(b, t); t->size = min_t(unsigned, - bkey_to_cacheline(t, end(t->data)), + bkey_to_cacheline(t, bset_bkey_last(t->data)), b->sets->tree + bset_tree_space(b) - t->tree); if (t->size < 2) { @@ -582,7 +582,7 @@ static void bset_build_written_tree(struct btree *b) t->tree[j].m = bkey_to_cacheline_offset(k); } - while (bkey_next(k) != end(t->data)) + while (bkey_next(k) != bset_bkey_last(t->data)) k = bkey_next(k); t->end = *k; @@ -600,7 +600,7 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k) unsigned inorder, j = 1; for (t = b->sets; t <= &b->sets[b->nsets]; t++) - if (k < end(t->data)) + if (k < bset_bkey_last(t->data)) goto found_set; BUG(); @@ -613,7 +613,7 @@ found_set: if (k == t->data->start) goto fix_left; - if (bkey_next(k) == end(t->data)) { + if (bkey_next(k) == bset_bkey_last(t->data)) { t->end = *k; goto fix_right; } @@ -679,7 +679,7 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k) /* Possibly add a new entry to the end of the lookup table */ for (k = table_to_bkey(t, t->size - 1); - k != end(t->data); + k != bset_bkey_last(t->data); k = bkey_next(k)) if (t->size == bkey_to_cacheline(t, k)) { t->prev[t->size] = bkey_to_cacheline_offset(k); @@ -715,7 +715,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b, unsigned li = 0, ri = t->size; BUG_ON(!b->nsets && - t->size < bkey_to_cacheline(t, end(t->data))); + t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); while (li + 1 != ri) { unsigned m = (li + ri) >> 1; @@ -728,7 +728,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b, return (struct bset_search_iter) { table_to_bkey(t, li), - ri < t->size ? table_to_bkey(t, ri) : end(t->data) + ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) }; } @@ -780,7 +780,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b, f = &t->tree[inorder_next(j, t->size)]; r = cacheline_to_bkey(t, inorder, f->m); } else - r = end(t->data); + r = bset_bkey_last(t->data); } else { r = cacheline_to_bkey(t, inorder, f->m); @@ -816,7 +816,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, if (unlikely(!t->size)) { i.l = t->data->start; - i.r = end(t->data); + i.r = bset_bkey_last(t->data); } else if (bset_written(b, t)) { /* * Each node in the auxiliary search tree covers a certain range @@ -826,7 +826,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, */ if (unlikely(bkey_cmp(search, &t->end) >= 0)) - return end(t->data); + return bset_bkey_last(t->data); if (unlikely(bkey_cmp(search, t->data->start) < 0)) return t->data->start; @@ -842,7 +842,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, inorder_to_tree(bkey_to_cacheline(t, i.l), t)), search) > 0); - BUG_ON(i.r != end(t->data) && + BUG_ON(i.r != bset_bkey_last(t->data) && bkey_cmp(i.r, search) <= 0); } @@ -897,7 +897,7 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, for (; start <= &b->sets[b->nsets]; start++) { ret = bch_bset_search(b, start, search); - bch_btree_iter_push(iter, ret, end(start->data)); + bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); } return ret; @@ -1067,7 +1067,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, } else { b->sets[start].data->keys = out->keys; memcpy(b->sets[start].data->start, out->start, - (void *) end(out) - (void *) out->start); + (void *) bset_bkey_last(out) - (void *) out->start); } if (used_mempool) diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 303d31a3b9e6..88b6edbf508b 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -190,14 +190,6 @@ struct bset_tree { struct bset *data; }; -static __always_inline int64_t bkey_cmp(const struct bkey *l, - const struct bkey *r) -{ - return unlikely(KEY_INODE(l) != KEY_INODE(r)) - ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) - : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); -} - /* Keylists */ struct keylist { @@ -261,6 +253,28 @@ struct bkey *bch_keylist_pop(struct keylist *); void bch_keylist_pop_front(struct keylist *); int __bch_keylist_realloc(struct keylist *, unsigned); +/* Bkey utility code */ + +#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) + +static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) +{ + return bkey_idx(i->start, idx); +} + +static inline void bkey_init(struct bkey *k) +{ + *k = ZERO_KEY; +} + +static __always_inline int64_t bkey_cmp(const struct bkey *l, + const struct bkey *r) +{ + return unlikely(KEY_INODE(l) != KEY_INODE(r)) + ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) + : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); +} + void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, unsigned); bool __bch_cut_front(const struct bkey *, struct bkey *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index f0a6399fdd3c..8aaaf16637a0 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -197,7 +197,7 @@ void bkey_put(struct cache_set *c, struct bkey *k) static uint64_t btree_csum_set(struct btree *b, struct bset *i) { uint64_t crc = b->key.ptr[0]; - void *data = (void *) i + 8, *end = end(i); + void *data = (void *) i + 8, *end = bset_bkey_last(i); crc = bch_crc64_update(crc, data, end - data); return crc ^ 0xffffffffffffffffULL; @@ -251,7 +251,7 @@ void bch_btree_node_read_done(struct btree *b) if (i != b->sets[0].data && !i->keys) goto err; - bch_btree_iter_push(iter, i->start, end(i)); + bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); b->written += set_blocks(i, b->c); } @@ -1310,7 +1310,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (i > 1) { for (k = n2->start; - k < end(n2); + k < bset_bkey_last(n2); k = bkey_next(k)) { if (__set_blocks(n1, n1->keys + keys + bkey_u64s(k), b->c) > blocks) @@ -1343,16 +1343,17 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (last) bkey_copy_key(&new_nodes[i]->key, last); - memcpy(end(n1), + memcpy(bset_bkey_last(n1), n2->start, - (void *) node(n2, keys) - (void *) n2->start); + (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); n1->keys += keys; r[i].keys = n1->keys; memmove(n2->start, - node(n2, keys), - (void *) end(n2) - (void *) node(n2, keys)); + bset_bkey_idx(n2, keys), + (void *) bset_bkey_last(n2) - + (void *) bset_bkey_idx(n2, keys)); n2->keys -= keys; @@ -1830,7 +1831,7 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert) memmove((uint64_t *) where + bkey_u64s(insert), where, - (void *) end(i) - (void *) where); + (void *) bset_bkey_last(i) - (void *) where); i->keys += bkey_u64s(insert); bkey_copy(where, insert); @@ -2014,7 +2015,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); - while (m != end(i) && + while (m != bset_bkey_last(i) && bkey_cmp(k, &START_KEY(m)) > 0) prev = m, m = bkey_next(m); @@ -2028,12 +2029,12 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op, goto merged; status = BTREE_INSERT_STATUS_OVERWROTE; - if (m != end(i) && + if (m != bset_bkey_last(i) && KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) goto copy; status = BTREE_INSERT_STATUS_FRONT_MERGE; - if (m != end(i) && + if (m != bset_bkey_last(i) && bch_bkey_try_merge(b, k, m)) goto copy; } else { @@ -2142,16 +2143,18 @@ static int btree_split(struct btree *b, struct btree_op *op, */ while (keys < (n1->sets[0].data->keys * 3) / 5) - keys += bkey_u64s(node(n1->sets[0].data, keys)); + keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data, + keys)); - bkey_copy_key(&n1->key, node(n1->sets[0].data, keys)); - keys += bkey_u64s(node(n1->sets[0].data, keys)); + bkey_copy_key(&n1->key, + bset_bkey_idx(n1->sets[0].data, keys)); + keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data, keys)); n2->sets[0].data->keys = n1->sets[0].data->keys - keys; n1->sets[0].data->keys = keys; memcpy(n2->sets[0].data->start, - end(n1->sets[0].data), + bset_bkey_last(n1->sets[0].data), n2->sets[0].data->keys * sizeof(uint64_t)); bkey_copy_key(&n2->key, &b->key); diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 8887c550d56c..955fa1d31774 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -84,7 +84,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set) unsigned j; char buf[80]; - for (k = i->start; k < end(i); k = next) { + for (k = i->start; k < bset_bkey_last(i); k = next) { next = bkey_next(k); bch_bkey_to_text(buf, sizeof(buf), k); @@ -102,7 +102,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set) printk(" %s\n", bch_ptr_status(b->c, k)); - if (next < end(i) && + if (next < bset_bkey_last(i) && bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0) printk(KERN_ERR "Key skipped backwards\n"); } @@ -162,7 +162,7 @@ void bch_btree_verify(struct btree *b) if (inmemory->keys != sorted->keys || memcmp(inmemory->start, sorted->start, - (void *) end(inmemory) - (void *) inmemory->start)) { + (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { struct bset *i; unsigned j; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 9d32d5790822..5e14e3325ec1 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -284,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } for (k = i->j.start; - k < end(&i->j); + k < bset_bkey_last(&i->j); k = bkey_next(k)) { unsigned j; @@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) n, i->j.seq - 1, start, end); for (k = i->j.start; - k < end(&i->j); + k < bset_bkey_last(&i->j); k = bkey_next(k)) { trace_bcache_journal_replay_key(k); @@ -751,7 +751,7 @@ atomic_t *bch_journal(struct cache_set *c, w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); - memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys)); + memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys)); w->data->keys += bch_keylist_nkeys(keys); ret = &fifo_back(&c->journal.pin); |