summaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorNigel Cunningham <nigel@tuxonice.net>2009-12-06 16:15:53 +0100
committerRafael J. Wysocki <rjw@sisk.pl>2009-12-06 16:15:53 +0100
commit0414f2ec03d72dc4e569627e6112fa6dafc99a79 (patch)
tree679fe5c0b89cc58194d187e25787f79bb7da3441 /kernel/power
parent64357ed468025614d48daa6cc87674ae5616f8fb (diff)
downloadlinux-0414f2ec03d72dc4e569627e6112fa6dafc99a79.tar.bz2
PM / Hibernate: Move swap functions to kernel/power/swap.c.
Move hibernation code's functions for allocating and freeing swap from swsusp.c to swap.c, which is where you'd expect to find them. Signed-off-by: Nigel Cunningham <nigel@tuxonice.net> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/swap.c101
-rw-r--r--kernel/power/swsusp.c101
2 files changed, 101 insertions, 101 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 890f6b11b1d3..0ce9b00f5d33 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -38,6 +38,107 @@ struct swsusp_header {
static struct swsusp_header *swsusp_header;
+/**
+ * The following functions are used for tracing the allocated
+ * swap pages, so that they can be freed in case of an error.
+ */
+
+struct swsusp_extent {
+ struct rb_node node;
+ unsigned long start;
+ unsigned long end;
+};
+
+static struct rb_root swsusp_extents = RB_ROOT;
+
+static int swsusp_extents_insert(unsigned long swap_offset)
+{
+ struct rb_node **new = &(swsusp_extents.rb_node);
+ struct rb_node *parent = NULL;
+ struct swsusp_extent *ext;
+
+ /* Figure out where to put the new node */
+ while (*new) {
+ ext = container_of(*new, struct swsusp_extent, node);
+ parent = *new;
+ if (swap_offset < ext->start) {
+ /* Try to merge */
+ if (swap_offset == ext->start - 1) {
+ ext->start--;
+ return 0;
+ }
+ new = &((*new)->rb_left);
+ } else if (swap_offset > ext->end) {
+ /* Try to merge */
+ if (swap_offset == ext->end + 1) {
+ ext->end++;
+ return 0;
+ }
+ new = &((*new)->rb_right);
+ } else {
+ /* It already is in the tree */
+ return -EINVAL;
+ }
+ }
+ /* Add the new node and rebalance the tree. */
+ ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->start = swap_offset;
+ ext->end = swap_offset;
+ rb_link_node(&ext->node, parent, new);
+ rb_insert_color(&ext->node, &swsusp_extents);
+ return 0;
+}
+
+/**
+ * alloc_swapdev_block - allocate a swap page and register that it has
+ * been allocated, so that it can be freed in case of an error.
+ */
+
+sector_t alloc_swapdev_block(int swap)
+{
+ unsigned long offset;
+
+ offset = swp_offset(get_swap_page_of_type(swap));
+ if (offset) {
+ if (swsusp_extents_insert(offset))
+ swap_free(swp_entry(swap, offset));
+ else
+ return swapdev_block(swap, offset);
+ }
+ return 0;
+}
+
+/**
+ * free_all_swap_pages - free swap pages allocated for saving image data.
+ * It also frees the extents used to register which swap entres had been
+ * allocated.
+ */
+
+void free_all_swap_pages(int swap)
+{
+ struct rb_node *node;
+
+ while ((node = swsusp_extents.rb_node)) {
+ struct swsusp_extent *ext;
+ unsigned long offset;
+
+ ext = container_of(node, struct swsusp_extent, node);
+ rb_erase(node, &swsusp_extents);
+ for (offset = ext->start; offset <= ext->end; offset++)
+ swap_free(swp_entry(swap, offset));
+
+ kfree(ext);
+ }
+}
+
+int swsusp_swap_in_use(void)
+{
+ return (swsusp_extents.rb_node != NULL);
+}
+
/*
* General things
*/
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 6a07f4dbf2f8..57222d2089b8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -58,107 +58,6 @@
int in_suspend __nosavedata = 0;
/**
- * The following functions are used for tracing the allocated
- * swap pages, so that they can be freed in case of an error.
- */
-
-struct swsusp_extent {
- struct rb_node node;
- unsigned long start;
- unsigned long end;
-};
-
-static struct rb_root swsusp_extents = RB_ROOT;
-
-static int swsusp_extents_insert(unsigned long swap_offset)
-{
- struct rb_node **new = &(swsusp_extents.rb_node);
- struct rb_node *parent = NULL;
- struct swsusp_extent *ext;
-
- /* Figure out where to put the new node */
- while (*new) {
- ext = container_of(*new, struct swsusp_extent, node);
- parent = *new;
- if (swap_offset < ext->start) {
- /* Try to merge */
- if (swap_offset == ext->start - 1) {
- ext->start--;
- return 0;
- }
- new = &((*new)->rb_left);
- } else if (swap_offset > ext->end) {
- /* Try to merge */
- if (swap_offset == ext->end + 1) {
- ext->end++;
- return 0;
- }
- new = &((*new)->rb_right);
- } else {
- /* It already is in the tree */
- return -EINVAL;
- }
- }
- /* Add the new node and rebalance the tree. */
- ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
- if (!ext)
- return -ENOMEM;
-
- ext->start = swap_offset;
- ext->end = swap_offset;
- rb_link_node(&ext->node, parent, new);
- rb_insert_color(&ext->node, &swsusp_extents);
- return 0;
-}
-
-/**
- * alloc_swapdev_block - allocate a swap page and register that it has
- * been allocated, so that it can be freed in case of an error.
- */
-
-sector_t alloc_swapdev_block(int swap)
-{
- unsigned long offset;
-
- offset = swp_offset(get_swap_page_of_type(swap));
- if (offset) {
- if (swsusp_extents_insert(offset))
- swap_free(swp_entry(swap, offset));
- else
- return swapdev_block(swap, offset);
- }
- return 0;
-}
-
-/**
- * free_all_swap_pages - free swap pages allocated for saving image data.
- * It also frees the extents used to register which swap entres had been
- * allocated.
- */
-
-void free_all_swap_pages(int swap)
-{
- struct rb_node *node;
-
- while ((node = swsusp_extents.rb_node)) {
- struct swsusp_extent *ext;
- unsigned long offset;
-
- ext = container_of(node, struct swsusp_extent, node);
- rb_erase(node, &swsusp_extents);
- for (offset = ext->start; offset <= ext->end; offset++)
- swap_free(swp_entry(swap, offset));
-
- kfree(ext);
- }
-}
-
-int swsusp_swap_in_use(void)
-{
- return (swsusp_extents.rb_node != NULL);
-}
-
-/**
* swsusp_show_speed - print the time elapsed between two events represented by
* @start and @stop
*