summaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram
diff options
context:
space:
mode:
authorRobert Jennings <rcj@linux.vnet.ibm.com>2011-01-28 09:01:55 -0600
committerGreg Kroah-Hartman <gregkh@suse.de>2011-02-04 13:29:12 -0800
commit939b3f0b1415755d534a20f4067e6b367e1e4021 (patch)
tree73cdbaa2517bc6dcab4621d5de2be09949e48c55 /drivers/staging/zram
parent2787f959d6c5fb258d964218ac75346019f49ee9 (diff)
downloadlinux-939b3f0b1415755d534a20f4067e6b367e1e4021.tar.bz2
zram/xvmalloc: combine duplicate block delete code
This patch eliminates duplicate code. The remove_block_head function is a special case of remove_block which can be contained in remove_block without confusion. The portion of code in remove_block_head which was noted as "DEBUG ONLY" is now mandatory. Doing this provides consistent management of the double linked list of blocks under a freelist and makes this consolidation of delete block code safe. The first and last blocks will have NULL pointers in their previous and next page pointers respectively. Additionally, any time a block is removed from a free list the next and previous pointers will be set to NULL to avoid misuse outside xvmalloc. Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/zram')
-rw-r--r--drivers/staging/zram/xvmalloc.c73
1 files changed, 31 insertions, 42 deletions
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c
index 4f6cb8de6865..ae0623a65ab9 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/zram/xvmalloc.c
@@ -213,54 +213,14 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
}
/*
- * Remove block from head of freelist. Index 'slindex' identifies the freelist.
- */
-static void remove_block_head(struct xv_pool *pool,
- struct block_header *block, u32 slindex)
-{
- struct block_header *tmpblock;
- u32 flindex = slindex / BITS_PER_LONG;
-
- pool->freelist[slindex].page = block->link.next_page;
- pool->freelist[slindex].offset = block->link.next_offset;
- block->link.prev_page = NULL;
- block->link.prev_offset = 0;
-
- if (!pool->freelist[slindex].page) {
- __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
- if (!pool->slbitmap[flindex])
- __clear_bit(flindex, &pool->flbitmap);
- } else {
- /*
- * DEBUG ONLY: We need not reinitialize freelist head previous
- * pointer to 0 - we never depend on its value. But just for
- * sanity, lets do it.
- */
- tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
- pool->freelist[slindex].offset, KM_USER1);
- tmpblock->link.prev_page = NULL;
- tmpblock->link.prev_offset = 0;
- put_ptr_atomic(tmpblock, KM_USER1);
- }
-}
-
-/*
* Remove block from freelist. Index 'slindex' identifies the freelist.
*/
static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
struct block_header *block, u32 slindex)
{
- u32 flindex;
+ u32 flindex = slindex / BITS_PER_LONG;
struct block_header *tmpblock;
- if (pool->freelist[slindex].page == page
- && pool->freelist[slindex].offset == offset) {
- remove_block_head(pool, block, slindex);
- return;
- }
-
- flindex = slindex / BITS_PER_LONG;
-
if (block->link.prev_page) {
tmpblock = get_ptr_atomic(block->link.prev_page,
block->link.prev_offset, KM_USER1);
@@ -276,6 +236,35 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
tmpblock->link.prev_offset = block->link.prev_offset;
put_ptr_atomic(tmpblock, KM_USER1);
}
+
+ /* Is this block is at the head of the freelist? */
+ if (pool->freelist[slindex].page == page
+ && pool->freelist[slindex].offset == offset) {
+
+ pool->freelist[slindex].page = block->link.next_page;
+ pool->freelist[slindex].offset = block->link.next_offset;
+
+ if (pool->freelist[slindex].page) {
+ struct block_header *tmpblock;
+ tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
+ pool->freelist[slindex].offset,
+ KM_USER1);
+ tmpblock->link.prev_page = NULL;
+ tmpblock->link.prev_offset = 0;
+ put_ptr_atomic(tmpblock, KM_USER1);
+ } else {
+ /* This freelist bucket is empty */
+ __clear_bit(slindex % BITS_PER_LONG,
+ &pool->slbitmap[flindex]);
+ if (!pool->slbitmap[flindex])
+ __clear_bit(flindex, &pool->flbitmap);
+ }
+ }
+
+ block->link.prev_page = NULL;
+ block->link.prev_offset = 0;
+ block->link.next_page = NULL;
+ block->link.next_offset = 0;
}
/*
@@ -384,7 +373,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
block = get_ptr_atomic(*page, *offset, KM_USER0);
- remove_block_head(pool, block, index);
+ remove_block(pool, *page, *offset, block, index);
/* Split the block if required */
tmpoffset = *offset + size + XV_ALIGN;