summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d58361109066..9fffc5af29d1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -975,8 +975,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
{
unsigned long idx;
struct swap_cluster_info *ci;
- unsigned long offset, i;
- unsigned char *map;
+ unsigned long offset;
/*
* Should not even be attempting cluster allocations when huge
@@ -996,9 +995,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
alloc_cluster(si, idx);
cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
- map = si->swap_map + offset;
- for (i = 0; i < SWAPFILE_CLUSTER; i++)
- map[i] = SWAP_HAS_CACHE;
+ memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
unlock_cluster(ci);
swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
*slot = swp_entry(si->type, offset);
@@ -1045,16 +1042,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
/* Only single cluster request supported */
WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
+ spin_lock(&swap_avail_lock);
+
avail_pgs = atomic_long_read(&nr_swap_pages) / size;
- if (avail_pgs <= 0)
+ if (avail_pgs <= 0) {
+ spin_unlock(&swap_avail_lock);
goto noswap;
+ }
n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
atomic_long_sub(n_goal * size, &nr_swap_pages);
- spin_lock(&swap_avail_lock);
-
start_over:
node = numa_node_id();
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
@@ -1128,14 +1127,13 @@ swp_entry_t get_swap_page_of_type(int type)
spin_lock(&si->lock);
if (si->flags & SWP_WRITEOK) {
- atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
+ atomic_long_dec(&nr_swap_pages);
spin_unlock(&si->lock);
return swp_entry(type, offset);
}
- atomic_long_inc(&nr_swap_pages);
}
spin_unlock(&si->lock);
fail:
@@ -3445,11 +3443,11 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
unsigned long offset;
unsigned char count;
unsigned char has_cache;
- int err = -EINVAL;
+ int err;
p = get_swap_device(entry);
if (!p)
- goto out;
+ return -EINVAL;
offset = swp_offset(entry);
ci = lock_cluster_or_swap_info(p, offset);
@@ -3496,7 +3494,6 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
unlock_out:
unlock_cluster_or_swap_info(p, ci);
-out:
if (p)
put_swap_device(p);
return err;
@@ -3613,7 +3610,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
ci = lock_cluster(si, offset);
- count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
+ count = swap_count(si->swap_map[offset]);
if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
/*