summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorWang Shilong <wangsl.fnst@cn.fujitsu.com>2014-01-29 00:25:34 +0800
committerChris Mason <clm@fb.com>2014-01-29 07:06:27 -0800
commit4c7a6f74ceeafd738b55d1c57349327f7ea8e895 (patch)
treeeb3d4c6e6406490e6c31ee4da3512506daddc4b8 /fs
parentf05c474688762f186b16a26366755b6ef0bfed0c (diff)
downloadlinux-4c7a6f74ceeafd738b55d1c57349327f7ea8e895.tar.bz2
Btrfs: rework ulist with list+rb_tree
We are really suffering from now ulist's implementation, some developers gave their try, and i just gave some of my ideas for things: 1. use list+rb_tree instead of arrary+rb_tree 2. add cur_list to iterator rather than ulist structure. 3. add seqnum into every node when they are added, this is used to do selfcheck when iterating node. I noticed Zach Brown's comments before, long term is to kick off ulist implementation, however, for now, we need at least avoid arrary from ulist. Cc: Liu Bo <bo.li.liu@oracle.com> Cc: Josef Bacik <jbacik@fb.com> Cc: Zach Brown <zab@redhat.com> Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ulist.c105
-rw-r--r--fs/btrfs/ulist.h38
2 files changed, 55 insertions, 88 deletions
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 35f5de9dd498..8dd0e8dfdaf4 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include "ulist.h"
+#include "ctree.h"
/*
* ulist is a generic data structure to hold a collection of unique u64
@@ -14,10 +15,6 @@
* enumerating it.
* It is possible to store an auxiliary value along with the key.
*
- * The implementation is preliminary and can probably be sped up
- * significantly. A first step would be to store the values in an rbtree
- * as soon as ULIST_SIZE is exceeded.
- *
* A sample usage for ulists is the enumeration of directed graphs without
* visiting a node twice. The pseudo-code could look like this:
*
@@ -50,10 +47,9 @@
*/
void ulist_init(struct ulist *ulist)
{
- ulist->nnodes = 0;
- ulist->nodes = ulist->int_nodes;
- ulist->nodes_alloced = ULIST_SIZE;
+ INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
+ ulist->nnodes = 0;
}
EXPORT_SYMBOL(ulist_init);
@@ -66,14 +62,14 @@ EXPORT_SYMBOL(ulist_init);
*/
void ulist_fini(struct ulist *ulist)
{
- /*
- * The first ULIST_SIZE elements are stored inline in struct ulist.
- * Only if more elements are alocated they need to be freed.
- */
- if (ulist->nodes_alloced > ULIST_SIZE)
- kfree(ulist->nodes);
- ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
+ struct ulist_node *node;
+ struct ulist_node *next;
+
+ list_for_each_entry_safe(node, next, &ulist->nodes, list) {
+ kfree(node);
+ }
ulist->root = RB_ROOT;
+ INIT_LIST_HEAD(&ulist->nodes);
}
EXPORT_SYMBOL(ulist_fini);
@@ -192,57 +188,29 @@ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
u64 *old_aux, gfp_t gfp_mask)
{
- int ret = 0;
- struct ulist_node *node = NULL;
+ int ret;
+ struct ulist_node *node;
+
node = ulist_rbtree_search(ulist, val);
if (node) {
if (old_aux)
*old_aux = node->aux;
return 0;
}
+ node = kmalloc(sizeof(*node), gfp_mask);
+ if (!node)
+ return -ENOMEM;
- if (ulist->nnodes >= ulist->nodes_alloced) {
- u64 new_alloced = ulist->nodes_alloced + 128;
- struct ulist_node *new_nodes;
- void *old = NULL;
- int i;
-
- /*
- * if nodes_alloced == ULIST_SIZE no memory has been allocated
- * yet, so pass NULL to krealloc
- */
- if (ulist->nodes_alloced > ULIST_SIZE)
- old = ulist->nodes;
+ node->val = val;
+ node->aux = aux;
+#ifdef CONFIG_BTRFS_DEBUG
+ node->seqnum = ulist->nnodes;
+#endif
- new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
- gfp_mask);
- if (!new_nodes)
- return -ENOMEM;
-
- if (!old)
- memcpy(new_nodes, ulist->int_nodes,
- sizeof(ulist->int_nodes));
-
- ulist->nodes = new_nodes;
- ulist->nodes_alloced = new_alloced;
-
- /*
- * krealloc actually uses memcpy, which does not copy rb_node
- * pointers, so we have to do it ourselves. Otherwise we may
- * be bitten by crashes.
- */
- ulist->root = RB_ROOT;
- for (i = 0; i < ulist->nnodes; i++) {
- ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
- if (ret < 0)
- return ret;
- }
- }
- ulist->nodes[ulist->nnodes].val = val;
- ulist->nodes[ulist->nnodes].aux = aux;
- ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]);
- BUG_ON(ret);
- ++ulist->nnodes;
+ ret = ulist_rbtree_insert(ulist, node);
+ ASSERT(!ret);
+ list_add_tail(&node->list, &ulist->nodes);
+ ulist->nnodes++;
return 1;
}
@@ -266,11 +234,26 @@ EXPORT_SYMBOL(ulist_add);
*/
struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
{
- if (ulist->nnodes == 0)
+ struct ulist_node *node;
+
+ if (list_empty(&ulist->nodes))
return NULL;
- if (uiter->i < 0 || uiter->i >= ulist->nnodes)
+ if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
return NULL;
-
- return &ulist->nodes[uiter->i++];
+ if (uiter->cur_list) {
+ uiter->cur_list = uiter->cur_list->next;
+ } else {
+ uiter->cur_list = ulist->nodes.next;
+#ifdef CONFIG_BTRFS_DEBUG
+ uiter->i = 0;
+#endif
+ }
+ node = list_entry(uiter->cur_list, struct ulist_node, list);
+#ifdef CONFIG_BTRFS_DEBUG
+ ASSERT(node->seqnum == uiter->i);
+ ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes);
+ uiter->i++;
+#endif
+ return node;
}
EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index fb36731074b5..2be7102d8073 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -17,18 +17,12 @@
* enumerating it.
* It is possible to store an auxiliary value along with the key.
*
- * The implementation is preliminary and can probably be sped up
- * significantly. A first step would be to store the values in an rbtree
- * as soon as ULIST_SIZE is exceeded.
*/
-
-/*
- * number of elements statically allocated inside struct ulist
- */
-#define ULIST_SIZE 16
-
struct ulist_iterator {
+#ifdef CONFIG_BTRFS_DEBUG
int i;
+#endif
+ struct list_head *cur_list; /* hint to start search */
};
/*
@@ -37,6 +31,12 @@ struct ulist_iterator {
struct ulist_node {
u64 val; /* value to store */
u64 aux; /* auxiliary value saved along with the val */
+
+#ifdef CONFIG_BTRFS_DEBUG
+ int seqnum; /* sequence number this node is added */
+#endif
+
+ struct list_head list; /* used to link node */
struct rb_node rb_node; /* used to speed up search */
};
@@ -46,24 +46,8 @@ struct ulist {
*/
unsigned long nnodes;
- /*
- * number of nodes we already have room for
- */
- unsigned long nodes_alloced;
-
- /*
- * pointer to the array storing the elements. The first ULIST_SIZE
- * elements are stored inline. In this case the it points to int_nodes.
- * After exceeding ULIST_SIZE, dynamic memory is allocated.
- */
- struct ulist_node *nodes;
-
+ struct list_head nodes;
struct rb_root root;
-
- /*
- * inline storage space for the first ULIST_SIZE entries
- */
- struct ulist_node int_nodes[ULIST_SIZE];
};
void ulist_init(struct ulist *ulist);
@@ -77,6 +61,6 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
struct ulist_node *ulist_next(struct ulist *ulist,
struct ulist_iterator *uiter);
-#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
+#define ULIST_ITER_INIT(uiter) ((uiter)->cur_list = NULL)
#endif