summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJes Sorensen <jes@wildopensource.com>2005-06-21 17:15:02 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 18:46:18 -0700
commitf14f75b81187cdbe10cc53a521bf9fdf97b59f8c (patch)
tree5c0d48c8a3338e6f1747e6cd55f699be96ffef1a /lib
parent2caaad41e4aa8f5dd999695b4ddeaa0e7f3912a4 (diff)
downloadlinux-f14f75b81187cdbe10cc53a521bf9fdf97b59f8c.tar.bz2
[PATCH] ia64 uncached alloc
This patch contains the ia64 uncached page allocator and the generic allocator (genalloc). The uncached allocator was formerly part of the SN2 mspec driver but there are several other users of it so it has been split off from the driver. The generic allocator can be used by device driver to manage special memory etc. The generic allocator is based on the allocator from the sym53c8xx_2 driver. Various users on ia64 needs uncached memory. The SGI SN architecture requires it for inter-partition communication between partitions within a large NUMA cluster. The specific user for this is the XPC code. Another application is large MPI style applications which use it for synchronization, on SN this can be done using special 'fetchop' operations but it also benefits non SN hardware which may use regular uncached memory for this purpose. Performance of doing this through uncached vs cached memory is pretty substantial. This is handled by the mspec driver which I will push out in a seperate patch. Rather than creating a specific allocator for just uncached memory I came up with genalloc which is a generic purpose allocator that can be used by device drivers and other subsystems as they please. For instance to handle onboard device memory. It was derived from the sym53c7xx_2 driver's allocator which is also an example of a potential user (I am refraining from modifying sym2 right now as it seems to have been under fairly heavy development recently). On ia64 memory has various properties within a granule, ie. it isn't safe to access memory as uncached within the same granule as currently has memory accessed in cached mode. The regular system therefore doesn't utilize memory in the lower granules which is mixed in with device PAL code etc. The uncached driver walks the EFI memmap and pulls out the spill uncached pages and sticks them into the uncached pool. Only after these chunks have been utilized, will it start converting regular cached memory into uncached memory. Hence the reason for the EFI related code additions. Signed-off-by: Jes Sorensen <jes@wildopensource.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile1
-rw-r--r--lib/genalloc.c188
3 files changed, 195 insertions, 0 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb45225248f..2d4d4e3bc4aa 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,12 @@ config ZLIB_DEFLATE
tristate
#
+# Generic allocator support is selected if needed
+#
+config GENERIC_ALLOCATOR
+ boolean
+
+#
# reed solomon support is select'ed if needed
#
config REED_SOLOMON
diff --git a/lib/Makefile b/lib/Makefile
index 5f10cb898407..dcb4231916e2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 000000000000..d6d30d2e7166
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,188 @@
+/*
+ * Basic general purpose allocator for managing special purpose memory
+ * not managed by the regular kmalloc/kfree interface.
+ * Uses for this includes on-device special memory, uncached memory
+ * etc.
+ *
+ * This code is based on the buddy allocator found in the sym53c8xx_2
+ * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
+ * and adapted for general purpose use.
+ *
+ * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+
+#include <asm/page.h>
+
+
+struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
+ unsigned long (*fp)(struct gen_pool *),
+ unsigned long data)
+{
+ struct gen_pool *poolp;
+ unsigned long tmp;
+ int i;
+
+ /*
+ * This is really an arbitrary limit, +10 is enough for
+ * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
+ * this can be increased without problems.
+ */
+ if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
+ ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
+ return NULL;
+
+ if (!max_chunk_shift)
+ max_chunk_shift = PAGE_SHIFT;
+
+ poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
+ if (!poolp)
+ return NULL;
+ memset(poolp, 0, sizeof(struct gen_pool));
+ poolp->h = kmalloc(sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
+ GFP_KERNEL);
+ if (!poolp->h) {
+ printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
+ kfree(poolp);
+ return NULL;
+ }
+ memset(poolp->h, 0, sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
+
+ spin_lock_init(&poolp->lock);
+ poolp->get_new_chunk = fp;
+ poolp->max_chunk_shift = max_chunk_shift;
+ poolp->private = data;
+
+ for (i = 0; i < nr_chunks; i++) {
+ tmp = poolp->get_new_chunk(poolp);
+ printk(KERN_INFO "allocated %lx\n", tmp);
+ if (!tmp)
+ break;
+ gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
+ }
+
+ return poolp;
+}
+EXPORT_SYMBOL(gen_pool_create);
+
+
+/*
+ * Simple power of two buddy-like generic allocator.
+ * Provides naturally aligned memory chunks.
+ */
+unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+{
+ int j, i, s, max_chunk_size;
+ unsigned long a, flags;
+ struct gen_pool_link *h = poolp->h;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return 0;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ j = i;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (!h[j].next) {
+ if (s == max_chunk_size) {
+ struct gen_pool_link *ptr;
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
+ spin_lock_irqsave(&poolp->lock, flags);
+ h[j].next = ptr;
+ if (h[j].next)
+ h[j].next->next = NULL;
+ break;
+ }
+ j++;
+ s <<= 1;
+ }
+ a = (unsigned long) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ /*
+ * This should be split into a seperate function doing
+ * the chunk split in order to support custom
+ * handling memory not physically accessible by host
+ */
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (struct gen_pool_link *) (a + s);
+ h[j].next->next = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ return a;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+
+/*
+ * Counter-part of the generic allocator.
+ */
+void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+{
+ struct gen_pool_link *q;
+ struct gen_pool_link *h = poolp->h;
+ unsigned long a, b, flags;
+ int i, s, max_chunk_size;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ a = ptr;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (1) {
+ if (s == max_chunk_size) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ b = a ^ s;
+ q = &h[i];
+
+ while (q->next && q->next != (struct gen_pool_link *)b)
+ q = q->next;
+
+ if (!q->next) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ i++;
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+}
+EXPORT_SYMBOL(gen_pool_free);