diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2006-02-01 03:05:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-01 08:53:18 -0800 |
commit | 6ed5eb2211204224799b2821656bbbfde26ef200 (patch) | |
tree | 46c9c3d34317bcee626343acf9aec12c87ee9f32 /mm/slab.c | |
parent | 5295a74cc0bcf1291686eb734ccb06baa3d55c1a (diff) | |
download | linux-6ed5eb2211204224799b2821656bbbfde26ef200.tar.bz2 |
[PATCH] slab: extract virt_to_{cache|slab}
Introduce virt_to_cache() and virt_to_slab() functions to reduce duplicate
code and introduce a proper abstraction should we want to support other kind
of mapping for address to slab and cache (eg. for vmalloc() or I/O memory).
Acked-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c index ba288b3877d1..c2f9e0a330ff 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -596,6 +596,18 @@ static inline struct slab *page_get_slab(struct page *page) return (struct slab *)page->lru.prev; } +static inline struct kmem_cache *virt_to_cache(const void *obj) +{ + struct page *page = virt_to_page(obj); + return page_get_cache(page); +} + +static inline struct slab *virt_to_slab(const void *obj) +{ + struct page *page = virt_to_page(obj); + return page_get_slab(page); +} + /* These are the default caches for kmalloc. Custom caches can have other sizes. */ struct cache_sizes malloc_sizes[] = { #define CACHE(x) { .cs_size = (x) }, @@ -1437,7 +1449,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) /* Print some data about the neighboring objects, if they * exist: */ - struct slab *slabp = page_get_slab(virt_to_page(objp)); + struct slab *slabp = virt_to_slab(objp); int objnr; objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; @@ -2767,7 +2779,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, void *objp = objpp[i]; struct slab *slabp; - slabp = page_get_slab(virt_to_page(objp)); + slabp = virt_to_slab(objp); l3 = cachep->nodelists[node]; list_del(&slabp->list); check_spinlock_acquired_node(cachep, node); @@ -2867,7 +2879,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) #ifdef CONFIG_NUMA { struct slab *slabp; - slabp = page_get_slab(virt_to_page(objp)); + slabp = virt_to_slab(objp); if (unlikely(slabp->nodeid != numa_node_id())) { struct array_cache *alien = NULL; int nodeid = slabp->nodeid; @@ -3130,7 +3142,7 @@ void kfree(const void *objp) return; local_irq_save(flags); kfree_debugcheck(objp); - c = page_get_cache(virt_to_page(objp)); + c = virt_to_cache(objp); mutex_debug_check_no_locks_freed(objp, obj_size(c)); __cache_free(c, (void *)objp); local_irq_restore(flags); @@ -3704,5 +3716,5 @@ unsigned int ksize(const void *objp) if (unlikely(objp == NULL)) return 0; - return obj_size(page_get_cache(virt_to_page(objp))); + return obj_size(virt_to_cache(objp)); } |