summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:14:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:14:42 -0700
commit33caee39925b887a99a2400dc5c980097c3573f9 (patch)
tree8e68ad97e1fee88c4a3f31453041f8d139f2027e /drivers
parent6456a0438b984186a0c9c8ecc9fe3d97b7ac3613 (diff)
parentf84223087402c45179be5e7060c5736c17a7b271 (diff)
downloadlinux-33caee39925b887a99a2400dc5c980097c3573f9.tar.bz2
Merge branch 'akpm' (patchbomb from Andrew Morton)
Merge incoming from Andrew Morton: - Various misc things. - arch/sh updates. - Part of ocfs2. Review is slow. - Slab updates. - Most of -mm. - printk updates. - lib/ updates. - checkpatch updates. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (226 commits) checkpatch: update $declaration_macros, add uninitialized_var checkpatch: warn on missing spaces in broken up quoted checkpatch: fix false positives for --strict "space after cast" test checkpatch: fix false positive MISSING_BREAK warnings with --file checkpatch: add test for native c90 types in unusual order checkpatch: add signed generic types checkpatch: add short int to c variable types checkpatch: add for_each tests to indentation and brace tests checkpatch: fix brace style misuses of else and while checkpatch: add --fix option for a couple OPEN_BRACE misuses checkpatch: use the correct indentation for which() checkpatch: add fix_insert_line and fix_delete_line helpers checkpatch: add ability to insert and delete lines to patch/file checkpatch: add an index variable for fixed lines checkpatch: warn on break after goto or return with same tab indentation checkpatch: emit a warning on file add/move/delete checkpatch: add test for commit id formatting style in commit log checkpatch: emit fewer kmalloc_array/kcalloc conversion warnings checkpatch: improve "no space after cast" test checkpatch: allow multiple const * types ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/libata-core.c72
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/dma-contiguous.c220
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/block/zram/zram_drv.c71
-rw-r--r--drivers/block/zram/zram_drv.h29
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/lguest/core.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/staging/android/binder.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c4
-rw-r--r--drivers/tty/sysrq.c2
17 files changed, 111 insertions, 355 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e65d400efd44..e1b92788c225 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -16,6 +16,7 @@ menuconfig ATA
depends on BLOCK
depends on !(M32R || M68K || S390) || BROKEN
select SCSI
+ select GLOB
---help---
If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
any other ATA device under Linux, say Y and make sure that you know
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 677c0c1b03bd..dbdc5d32343f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,6 +59,7 @@
#include <linux/async.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/glob.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -4250,73 +4251,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ }
};
-/**
- * glob_match - match a text string against a glob-style pattern
- * @text: the string to be examined
- * @pattern: the glob-style pattern to be matched against
- *
- * Either/both of text and pattern can be empty strings.
- *
- * Match text against a glob-style pattern, with wildcards and simple sets:
- *
- * ? matches any single character.
- * * matches any run of characters.
- * [xyz] matches a single character from the set: x, y, or z.
- * [a-d] matches a single character from the range: a, b, c, or d.
- * [a-d0-9] matches a single character from either range.
- *
- * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
- * Behaviour with malformed patterns is undefined, though generally reasonable.
- *
- * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
- *
- * This function uses one level of recursion per '*' in pattern.
- * Since it calls _nothing_ else, and has _no_ explicit local variables,
- * this will not cause stack problems for any reasonable use here.
- *
- * RETURNS:
- * 0 on match, 1 otherwise.
- */
-static int glob_match (const char *text, const char *pattern)
-{
- do {
- /* Match single character or a '?' wildcard */
- if (*text == *pattern || *pattern == '?') {
- if (!*pattern++)
- return 0; /* End of both strings: match */
- } else {
- /* Match single char against a '[' bracketed ']' pattern set */
- if (!*text || *pattern != '[')
- break; /* Not a pattern set */
- while (*++pattern && *pattern != ']' && *text != *pattern) {
- if (*pattern == '-' && *(pattern - 1) != '[')
- if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
- ++pattern;
- break;
- }
- }
- if (!*pattern || *pattern == ']')
- return 1; /* No match */
- while (*pattern && *pattern++ != ']');
- }
- } while (*++text && *pattern);
-
- /* Match any run of chars against a '*' wildcard */
- if (*pattern == '*') {
- if (!*++pattern)
- return 0; /* Match: avoid recursion at end of pattern */
- /* Loop to handle additional pattern chars after the wildcard */
- while (*text) {
- if (glob_match(text, pattern) == 0)
- return 0; /* Remainder matched */
- ++text; /* Absorb (match) this char and try again */
- }
- }
- if (!*text && !*pattern)
- return 0; /* End of both strings: match */
- return 1; /* No match */
-}
-
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4327,10 +4261,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (!glob_match(model_num, ad->model_num)) {
+ if (glob_match(model_num, ad->model_num)) {
if (ad->model_rev == NULL)
return ad->horkage;
- if (!glob_match(model_rev, ad->model_rev))
+ if (glob_match(model_rev, ad->model_rev))
return ad->horkage;
}
ad++;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 88500fed3c7a..4e7f0ff83ae7 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -289,16 +289,6 @@ config CMA_ALIGNMENT
If unsure, leave the default value "8".
-config CMA_AREAS
- int "Maximum count of the CMA device-private areas"
- default 7
- help
- CMA allows to create CMA areas for particular devices. This parameter
- sets the maximum number of such device private CMA areas in the
- system.
-
- If unsure, leave the default value "7".
-
endif
endmenu
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,23 +24,9 @@
#include <linux/memblock.h>
#include <linux/err.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/page-isolation.h>
#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
-
-struct cma {
- unsigned long base_pfn;
- unsigned long count;
- unsigned long *bitmap;
- struct mutex lock;
-};
-
-struct cma *dma_contiguous_default_area;
+#include <linux/cma.h>
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
#define CMA_SIZE_MBYTES 0
#endif
+struct cma *dma_contiguous_default_area;
+
/*
* Default global CMA area size can be defined in kernel's .config.
* This is useful mainly for distro maintainers to create a kernel
@@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
}
}
-static DEFINE_MUTEX(cma_mutex);
-
-static int __init cma_activate_area(struct cma *cma)
-{
- int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
- unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
- unsigned i = cma->count >> pageblock_order;
- struct zone *zone;
-
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
- if (!cma->bitmap)
- return -ENOMEM;
-
- WARN_ON_ONCE(!pfn_valid(pfn));
- zone = page_zone(pfn_to_page(pfn));
-
- do {
- unsigned j;
- base_pfn = pfn;
- for (j = pageblock_nr_pages; j; --j, pfn++) {
- WARN_ON_ONCE(!pfn_valid(pfn));
- /*
- * alloc_contig_range requires the pfn range
- * specified to be in the same zone. Make this
- * simple by forcing the entire CMA resv range
- * to be in the same zone.
- */
- if (page_zone(pfn_to_page(pfn)) != zone)
- goto err;
- }
- init_cma_reserved_pageblock(pfn_to_page(base_pfn));
- } while (--i);
-
- mutex_init(&cma->lock);
- return 0;
-
-err:
- kfree(cma->bitmap);
- return -EINVAL;
-}
-
-static struct cma cma_areas[MAX_CMA_AREAS];
-static unsigned cma_area_count;
-
-static int __init cma_init_reserved_areas(void)
-{
- int i;
-
- for (i = 0; i < cma_area_count; i++) {
- int ret = cma_activate_area(&cma_areas[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-core_initcall(cma_init_reserved_areas);
-
/**
* dma_contiguous_reserve_area() - reserve custom contiguous area
* @size: Size of the reserved area (in bytes),
@@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma,
bool fixed)
{
- struct cma *cma = &cma_areas[cma_area_count];
- phys_addr_t alignment;
- int ret = 0;
-
- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
- (unsigned long)size, (unsigned long)base,
- (unsigned long)limit);
-
- /* Sanity checks */
- if (cma_area_count == ARRAY_SIZE(cma_areas)) {
- pr_err("Not enough slots for CMA reserved regions!\n");
- return -ENOSPC;
- }
-
- if (!size)
- return -EINVAL;
-
- /* Sanitise input arguments */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
- base = ALIGN(base, alignment);
- size = ALIGN(size, alignment);
- limit &= ~(alignment - 1);
-
- /* Reserve memory */
- if (base && fixed) {
- if (memblock_is_region_reserved(base, size) ||
- memblock_reserve(base, size) < 0) {
- ret = -EBUSY;
- goto err;
- }
- } else {
- phys_addr_t addr = memblock_alloc_range(size, alignment, base,
- limit);
- if (!addr) {
- ret = -ENOMEM;
- goto err;
- } else {
- base = addr;
- }
- }
-
- /*
- * Each reserved area must be initialised later, when more kernel
- * subsystems (like slab allocator) are available.
- */
- cma->base_pfn = PFN_DOWN(base);
- cma->count = size >> PAGE_SHIFT;
- *res_cma = cma;
- cma_area_count++;
+ int ret;
- pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
- (unsigned long)base);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
+ if (ret)
+ return ret;
/* Architecture specific contiguous memory fixup. */
- dma_contiguous_early_fixup(base, size);
- return 0;
-err:
- pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return ret;
-}
+ dma_contiguous_early_fixup(cma_get_base(*res_cma),
+ cma_get_size(*res_cma));
-static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
-{
- mutex_lock(&cma->lock);
- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
- mutex_unlock(&cma->lock);
+ return 0;
}
/**
@@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int align)
{
- unsigned long mask, pfn, pageno, start = 0;
- struct cma *cma = dev_get_cma_area(dev);
- struct page *page = NULL;
- int ret;
-
- if (!cma || !cma->count)
- return NULL;
-
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
- count, align);
-
- if (!count)
- return NULL;
-
- mask = (1 << align) - 1;
-
-
- for (;;) {
- mutex_lock(&cma->lock);
- pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
- start, count, mask);
- if (pageno >= cma->count) {
- mutex_unlock(&cma->lock);
- break;
- }
- bitmap_set(cma->bitmap, pageno, count);
- /*
- * It's safe to drop the lock here. We've marked this region for
- * our exclusive use. If the migration fails we will take the
- * lock again and unmark it.
- */
- mutex_unlock(&cma->lock);
-
- pfn = cma->base_pfn + pageno;
- mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
- mutex_unlock(&cma_mutex);
- if (ret == 0) {
- page = pfn_to_page(pfn);
- break;
- } else if (ret != -EBUSY) {
- clear_cma_bitmap(cma, pfn, count);
- break;
- }
- clear_cma_bitmap(cma, pfn, count);
- pr_debug("%s(): memory range at %p is busy, retrying\n",
- __func__, pfn_to_page(pfn));
- /* try again with a bit different memory target */
- start = pageno + mask + 1;
- }
-
- pr_debug("%s(): returned %p\n", __func__, page);
- return page;
+ return cma_alloc(dev_get_cma_area(dev), count, align);
}
/**
@@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count)
{
- struct cma *cma = dev_get_cma_area(dev);
- unsigned long pfn;
-
- if (!cma || !pages)
- return false;
-
- pr_debug("%s(page %p)\n", __func__, (void *)pages);
-
- pfn = page_to_pfn(pages);
-
- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
- return false;
-
- VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
-
- free_contig_range(pfn, count);
- clear_cma_bitmap(cma, pfn, count);
-
- return true;
+ return cma_release(dev_get_cma_area(dev), pages, count);
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 89f752dd8465..a2e13e250bba 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev)
* attribute and need to set the online_type.
*/
if (mem->online_type < 0)
- mem->online_type = ONLINE_KEEP;
+ mem->online_type = MMOP_ONLINE_KEEP;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -315,23 +315,23 @@ store_mem_state(struct device *dev,
if (ret)
return ret;
- if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
- online_type = ONLINE_KERNEL;
- else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
- online_type = ONLINE_MOVABLE;
- else if (!strncmp(buf, "online", min_t(int, count, 6)))
- online_type = ONLINE_KEEP;
- else if (!strncmp(buf, "offline", min_t(int, count, 7)))
- online_type = -1;
+ if (sysfs_streq(buf, "online_kernel"))
+ online_type = MMOP_ONLINE_KERNEL;
+ else if (sysfs_streq(buf, "online_movable"))
+ online_type = MMOP_ONLINE_MOVABLE;
+ else if (sysfs_streq(buf, "online"))
+ online_type = MMOP_ONLINE_KEEP;
+ else if (sysfs_streq(buf, "offline"))
+ online_type = MMOP_OFFLINE;
else {
ret = -EINVAL;
goto err;
}
switch (online_type) {
- case ONLINE_KERNEL:
- case ONLINE_MOVABLE:
- case ONLINE_KEEP:
+ case MMOP_ONLINE_KERNEL:
+ case MMOP_ONLINE_MOVABLE:
+ case MMOP_ONLINE_KEEP:
/*
* mem->online_type is not protected so there can be a
* race here. However, when racing online, the first
@@ -342,7 +342,7 @@ store_mem_state(struct device *dev,
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
- case -1:
+ case MMOP_OFFLINE:
ret = device_offline(&mem->dev);
break;
default:
@@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
int i, ret;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
- phys_addr = simple_strtoull(buf, NULL, 0);
+ ret = kstrtoull(buf, 0, &phys_addr);
+ if (ret)
+ return ret;
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8f7ed9933a7c..c6d3ae05f1ca 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(node_page_state(nid, NR_ANON_PAGES)),
- nid, K(node_page_state(nid, NR_SHMEM)),
+ nid, K(i.sharedram),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
nid, K(node_page_state(nid, NR_PAGETABLE)),
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 36e54be402df..dfa4024c448a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return meta->table[index].flags & BIT(flag);
+ return meta->table[index].value & BIT(flag);
}
static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].flags |= BIT(flag);
+ meta->table[index].value |= BIT(flag);
}
static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].flags &= ~BIT(flag);
+ meta->table[index].value &= ~BIT(flag);
+}
+
+static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
+{
+ return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+}
+
+static void zram_set_obj_size(struct zram_meta *meta,
+ u32 index, size_t size)
+{
+ unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
+
+ meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}
static inline int is_partial_io(struct bio_vec *bvec)
@@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table;
}
- rwlock_init(&meta->tb_lock);
return meta;
free_table:
@@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page);
}
-/* NOTE: caller should hold meta->tb_lock with write-side */
+
+/*
+ * To protect concurrent access to the same index entry,
+ * caller should hold this table index entry's bit_spinlock to
+ * indicate this index entry is accessing.
+ */
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(meta->mem_pool, handle);
- atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
+ atomic64_sub(zram_get_obj_size(meta, index),
+ &zram->stats.compr_data_size);
atomic64_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
- meta->table[index].size = 0;
+ zram_set_obj_size(meta, index, 0);
}
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
@@ -337,14 +355,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
unsigned long handle;
- u16 size;
+ size_t size;
- read_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
handle = meta->table[index].handle;
- size = meta->table[index].size;
+ size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
clear_page(mem);
return 0;
}
@@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
@@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- read_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
handle_zero_page(bvec);
return 0;
}
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO);
- write_unlock(&zram->meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
atomic64_inc(&zram->stats.zero_pages);
ret = 0;
@@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector
* before overwriting unused sectors.
*/
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
meta->table[index].handle = handle;
- meta->table[index].size = clen;
- write_unlock(&zram->meta->tb_lock);
+ zram_set_obj_size(meta, index, clen);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
int offset, struct bio *bio)
{
size_t n = bio->bi_iter.bi_size;
+ struct zram_meta *meta = zram->meta;
/*
* zram manages data in physical block size units. Because logical block
@@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
while (n >= PAGE_SIZE) {
- /*
- * Discard request can be large so the lock hold times could be
- * lengthy. So take the lock once per page.
- */
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
- write_unlock(&zram->meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
index++;
n -= PAGE_SIZE;
}
@@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram = bdev->bd_disk->private_data;
meta = zram->meta;
- write_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
- write_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
atomic64_inc(&zram->stats.notify_free);
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 7f21c145e317..5b0afde729cd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*-- End of configurable params */
#define SECTOR_SHIFT 9
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
@@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-/* Flags for zram pages (table[page_no].flags) */
+
+/*
+ * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * object size (excluding header), the higher bits is for
+ * zram_pageflags.
+ *
+ * zram is mainly used for memory efficiency so we want to keep memory
+ * footprint small so we can squeeze size and flags into a field.
+ * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
+ * the higher bits is for zram_pageflags.
+ */
+#define ZRAM_FLAG_SHIFT 24
+
+/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO,
+ ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
+ ZRAM_ACCESS, /* page in now accessed */
__NR_ZRAM_PAGEFLAGS,
};
@@ -62,11 +75,10 @@ enum zram_pageflags {
/*-- Data structures */
/* Allocated for each disk page */
-struct table {
+struct zram_table_entry {
unsigned long handle;
- u16 size; /* object size (excluding header) */
- u8 flags;
-} __aligned(4);
+ unsigned long value;
+};
struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
@@ -81,8 +93,7 @@ struct zram_stats {
};
struct zram_meta {
- rwlock_t tb_lock; /* protect table */
- struct table *table;
+ struct zram_table_entry *table;
struct zs_pool *mem_pool;
};
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 17cf96c45f2b..79f18e6d9c4f 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -286,7 +286,11 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = firmware_map_find_entry_bootmem(start, end, type);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (entry)
+ return 0;
+
+ entry = firmware_map_find_entry_bootmem(start, end - 1, type);
if (!entry) {
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7e4bae760e27..c3b80fd65d62 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = &entry->head;
}
if (parent) {
- hlist_add_after_rcu(parent, &item->head);
+ hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index ae208f612198..cccef87963e0 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -688,7 +688,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
atk_debugfs_gitm_get,
NULL,
- "0x%08llx\n")
+ "0x%08llx\n");
static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
{
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 0bf1e4edf04d..6590558d1d31 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
static __init int map_switcher(void)
{
int i, err;
- struct page **pagep;
/*
* Map the Switcher in to high memory.
@@ -110,11 +109,9 @@ static __init int map_switcher(void)
* This code actually sets up the pages we've allocated to appear at
* switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our
- * array of struct pages. It increments that pointer, but we don't
- * care.
+ * array of struct pages.
*/
- pagep = lg_switcher_pages;
- err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
+ err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 681a9e81ff51..e8ba7470700a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1948,7 +1948,7 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* add filter to the list */
if (parent)
- hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ hlist_add_behind(&input->fdir_node, &parent->fdir_node);
else
hlist_add_head(&input->fdir_node,
&pf->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 94a1c07efeb0..e4100b5737b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2517,7 +2517,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ hlist_add_behind(&input->fdir_node, &parent->fdir_node);
else
hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list);
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 02b0379ae550..4f34dc0095b5 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
- struct page **page_array_ptr;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
@@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
}
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
- page_array_ptr = page;
- ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
proc->pid, page_addr);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 5dde79418297..8ef1deb59d4a 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -351,7 +351,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
cfs_hash_dhead_t, dh_head);
if (dh->dh_tail != NULL) /* not empty */
- hlist_add_after(dh->dh_tail, hnode);
+ hlist_add_behind(hnode, dh->dh_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dh_head);
dh->dh_tail = hnode;
@@ -406,7 +406,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
cfs_hash_dhead_dep_t, dd_head);
if (dh->dd_tail != NULL) /* not empty */
- hlist_add_after(dh->dd_tail, hnode);
+ hlist_add_behind(hnode, dh->dd_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dd_head);
dh->dd_tail = hnode;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 454b65898e2c..42bad18c66c9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -355,7 +355,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(first_online_node, GFP_KERNEL), GFP_KERNEL,
+ out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
0, NULL, true);
}