summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/crc32.c9
-rw-r--r--lib/decompress.c9
-rw-r--r--lib/gcd.c3
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c88
-rw-r--r--lib/idr.c32
-rw-r--r--lib/parser.c10
-rw-r--r--lib/plist.c4
-rw-r--r--lib/scatterlist.c16
-rw-r--r--lib/spinlock_debug.c32
-rw-r--r--lib/vsprintf.c139
12 files changed, 226 insertions, 131 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35c4565ee8fa..7fba3a98967f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,12 +196,13 @@ config LOCKUP_DETECTOR
thresholds can be controlled through the sysctl watchdog_thresh.
config HARDLOCKUP_DETECTOR
- def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
- !HAVE_NMI_WATCHDOG
+ def_bool y
+ depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "hard lockups",
which are bugs that cause the kernel to loop in kernel
@@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC
config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
int
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
default 1 if BOOTPARAM_HARDLOCKUP_PANIC
diff --git a/lib/crc32.c b/lib/crc32.c
index 61774b8db4de..072fbd8234d5 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/decompress.c b/lib/decompress.c
index 3d766b7f60ab..31a804277282 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/init.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
@@ -31,11 +32,13 @@
# define unlzo NULL
#endif
-static const struct compress_format {
+struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
-} compressed_formats[] = {
+};
+
+static const struct compress_format compressed_formats[] __initdata = {
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
@@ -45,7 +48,7 @@ static const struct compress_format {
{ {0, 0}, NULL, NULL }
};
-decompress_fn decompress_method(const unsigned char *inbuf, int len,
+decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
const char **name)
{
const struct compress_format *cf;
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3cd14b3..3657f129d7b8 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
if (a < b)
swap(a, b);
+
+ if (!b)
+ return a;
while ((r = a % b) != 0) {
a = b;
b = r;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f8d5439e2d9..71fcfcd96410 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
if (CRC_LE_BITS > 1) {
crc32init_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
if (CRC_BE_BITS > 1) {
crc32init_be();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 6bc04aab6ec7..ca208a92628c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
+ pool->algo = gen_pool_first_fit;
+ pool->data = NULL;
}
return pool;
}
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy);
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm. Can not be used in NMI handler on
- * architectures without NMI-safe cmpxchg implementation.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
retry:
- start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
- start_bit, nbits, 0);
+ start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
+ pool->data);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool)
return size;
}
EXPORT_SYMBOL_GPL(gen_pool_size);
+
+/**
+ * gen_pool_set_algo - set the allocation algorithm
+ * @pool: pool to change allocation algorithm
+ * @algo: custom algorithm function
+ * @data: additional data used by @algo
+ *
+ * Call @algo for each memory allocation in the pool.
+ * If @algo is NULL use gen_pool_first_fit as default
+ * memory allocation function.
+ */
+void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
+{
+ rcu_read_lock();
+
+ pool->algo = algo;
+ if (!pool->algo)
+ pool->algo = gen_pool_first_fit;
+
+ pool->data = data;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_set_algo);
+
+/**
+ * gen_pool_first_fit - find the first available region
+ * of memory matching the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ return bitmap_find_next_zero_area(map, size, start, nr, 0);
+}
+EXPORT_SYMBOL(gen_pool_first_fit);
+
+/**
+ * gen_pool_best_fit - find the best fitting region of memory
+ * macthing the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ *
+ * Iterate over the bitmap to find the smallest free region
+ * which we can allocate the memory.
+ */
+unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ unsigned long start_bit = size;
+ unsigned long len = size + 1;
+ unsigned long index;
+
+ index = bitmap_find_next_zero_area(map, size, start, nr, 0);
+
+ while (index < size) {
+ int next_bit = find_next_bit(map, size, index + nr);
+ if ((next_bit - index) < len) {
+ len = next_bit - index;
+ start_bit = index;
+ if (len == nr)
+ return start_bit;
+ }
+ index = bitmap_find_next_zero_area(map, size,
+ next_bit + 1, nr, 0);
+ }
+
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_best_fit);
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a99..648239079dd2 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
* that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
+ * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
- while (idp->id_free_cnt < IDR_FREE_MAX) {
+ while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
- if ((id >= MAX_ID_BIT) || (id < 0))
+ if ((id >= MAX_IDR_BIT) || (id < 0))
return IDR_NOMORE_SPACE;
if (l == 0)
break;
@@ -223,7 +223,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
int id;
id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
to_free->bitmap = to_free->count = 0;
free_layer(to_free);
}
- while (idp->id_free_cnt >= IDR_FREE_MAX) {
+ while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
n = (p->layer+1) * IDR_BITS;
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
if (t < 0)
return _idr_rc_to_errno(t);
- if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
+ if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
}
id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_ID_BIT)
+ if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);
diff --git a/lib/parser.c b/lib/parser.c
index c43410084838..52cfa69f73df 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[])
*
* Description: Given a &substring_t and a base, attempts to parse the substring
* as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure.
+ * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
char *endp;
char *buf;
int ret;
+ long val;
size_t len = s->to - s->from;
buf = kmalloc(len + 1, GFP_KERNEL);
@@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base)
return -ENOMEM;
memcpy(buf, s->from, len);
buf[len] = '\0';
- *result = simple_strtol(buf, &endp, base);
+
ret = 0;
+ val = simple_strtol(buf, &endp, base);
if (endp == buf)
ret = -EINVAL;
+ else if (val < (long)INT_MIN || val > (long)INT_MAX)
+ ret = -ERANGE;
+ else
+ *result = (int) val;
kfree(buf);
return ret;
}
diff --git a/lib/plist.c b/lib/plist.c
index 6ab0e521c48b..1ebc95f7a46f 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -175,7 +175,7 @@ static int __init plist_test(void)
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
- printk(KERN_INFO "start plist test\n");
+ pr_debug("start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
@@ -203,7 +203,7 @@ static int __init plist_test(void)
plist_test_check(nr_expect);
}
- printk(KERN_INFO "end plist test\n");
+ pr_debug("end plist test\n");
return 0;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index fadae774a20c..e76d85cf3175 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -404,14 +404,13 @@ EXPORT_SYMBOL(sg_miter_start);
* @miter: sg mapping iter to proceed
*
* Description:
- * Proceeds @miter@ to the next mapping. @miter@ should have been
- * started using sg_miter_start(). On successful return,
- * @miter@->page, @miter@->addr and @miter@->length point to the
- * current mapping.
+ * Proceeds @miter to the next mapping. @miter should have been started
+ * using sg_miter_start(). On successful return, @miter->page,
+ * @miter->addr and @miter->length point to the current mapping.
*
* Context:
- * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
- * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
+ * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
@@ -465,7 +464,8 @@ EXPORT_SYMBOL(sg_miter_next);
* resources (kmap) need to be released during iteration.
*
* Context:
- * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
+ * otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
@@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON(!irqs_disabled());
+ WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
- int print_once = 1;
- for (;;) {
- for (i = 0; i < loops; i++) {
- if (arch_spin_trylock(&lock->raw_lock))
- return;
- __delay(1);
- }
- /* lockup suspected: */
- if (print_once) {
- print_once = 0;
- spin_dump(lock, "lockup suspected");
+ for (i = 0; i < loops; i++) {
+ if (arch_spin_trylock(&lock->raw_lock))
+ return;
+ __delay(1);
+ }
+ /* lockup suspected: */
+ spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
- trigger_all_cpu_backtrace();
+ trigger_all_cpu_backtrace();
#endif
- }
- }
+
+ /*
+ * The trylock above was causing a livelock. Give the lower level arch
+ * specific lock code a chance to acquire the lock. We have already
+ * printed a warning/backtrace at this point. The non-debug arch
+ * specific code might actually succeed in acquiring the lock. If it is
+ * not successful, the end-result is the same - there is no forward
+ * progress.
+ */
+ arch_spin_lock(&lock->raw_lock);
}
void do_raw_spin_lock(raw_spinlock_t *lock)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0e337541f005..39c99fea7c03 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r)
unsigned q;
/* Copy of previous function's body with added early returns */
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 2 */
- if (q == 0)
- return buf;
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 3 */
- if (r == 0)
- return buf;
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 4 */
- if (q == 0)
- return buf;
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 5 */
- if (r == 0)
- return buf;
- q = (r * 0x199a) >> 16;
- *buf++ = (r - 10 * q) + '0'; /* 6 */
+ while (r >= 10000) {
+ q = r + '0';
+ r = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = q - 10*r;
+ }
+
+ q = (r * 0x199a) >> 16; /* r <= 9999 */
+ *buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
- r = (q * 0xcd) >> 11;
- *buf++ = (q - 10 * r) + '0'; /* 7 */
+ r = (q * 0xcd) >> 11; /* q <= 999 */
+ *buf++ = (q - 10 * r) + '0';
if (r == 0)
return buf;
- q = (r * 0xcd) >> 11;
- *buf++ = (r - 10 * q) + '0'; /* 8 */
+ q = (r * 0xcd) >> 11; /* r <= 99 */
+ *buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
- *buf++ = q + '0'; /* 9 */
+ *buf++ = q + '0'; /* q <= 9 */
return buf;
}
@@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n)
/* Second algorithm: valid only for 64-bit long longs */
+/* See comment in put_dec_full9 for choice of constants */
static noinline_for_stack
-char *put_dec_full4(char *buf, unsigned q)
+void put_dec_full4(char *buf, unsigned q)
{
unsigned r;
- r = (q * 0xcccd) >> 19;
- *buf++ = (q - 10 * r) + '0';
- q = (r * 0x199a) >> 16;
- *buf++ = (r - 10 * q) + '0';
+ r = (q * 0xccd) >> 15;
+ buf[0] = (q - 10 * r) + '0';
+ q = (r * 0xcd) >> 11;
+ buf[1] = (r - 10 * q) + '0';
r = (q * 0xcd) >> 11;
- *buf++ = (q - 10 * r) + '0';
- *buf++ = r + '0';
- return buf;
+ buf[2] = (q - 10 * r) + '0';
+ buf[3] = r + '0';
+}
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (d1 in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned put_dec_helper4(char *buf, unsigned x)
+{
+ uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
+
+ put_dec_full4(buf, x - q * 10000);
+ return q;
}
/* Based on code by Douglas W. Jones found at
@@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n)
d3 = (h >> 16); /* implicit "& 0xffff" */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+ q = put_dec_helper4(buf, q);
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(buf+4, q);
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(buf+8, q);
- buf = put_dec_full4(buf, q % 10000);
- q = q / 10000;
-
- d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1;
- buf = put_dec_full4(buf, d1 % 10000);
- q = d1 / 10000;
-
- d2 = q + 4749 * d3 + 42 * d2;
- buf = put_dec_full4(buf, d2 % 10000);
- q = d2 / 10000;
-
- d3 = q + 281 * d3;
- if (!d3)
- goto done;
- buf = put_dec_full4(buf, d3 % 10000);
- q = d3 / 10000;
- if (!q)
- goto done;
- buf = put_dec_full4(buf, q);
- done:
- while (buf[-1] == '0')
+ q += 281 * d3;
+ buf += 12;
+ if (q)
+ buf = put_dec_trunc8(buf, q);
+ else while (buf[-1] == '0')
--buf;
return buf;
@@ -990,7 +987,7 @@ int kptr_restrict __read_mostly;
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
- * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth)
+ * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -1341,7 +1338,10 @@ qualifier:
* %pR output the address range in a struct resource with decoded flags
* %pr output the address range in a struct resource with raw flags
* %pM output a 6-byte MAC address with colons
+ * %pMR output a 6-byte MAC address with colons in reversed order
+ * %pMF output a 6-byte MAC address with dashes
* %pm output a 6-byte MAC address without colons
+ * %pmR output a 6-byte MAC address without colons in reversed order
* %pI4 print an IPv4 address without leading zeros
* %pi4 print an IPv4 address with leading zeros
* %pI6 print an IPv6 address with colons
@@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
s16 field_width;
bool is_sign;
- while (*fmt && *str) {
+ while (*fmt) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
@@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
* advance both strings to next white space
*/
if (*fmt == '*') {
+ if (!*str)
+ break;
while (!isspace(*fmt) && *fmt != '%' && *fmt)
fmt++;
while (!isspace(*str) && *str)
@@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
}
}
- if (!*fmt || !*str)
+ if (!*fmt)
+ break;
+
+ if (*fmt == 'n') {
+ /* return number of characters read so far */
+ *va_arg(args, int *) = str - buf;
+ ++fmt;
+ continue;
+ }
+
+ if (!*str)
break;
base = 10;
@@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
num++;
}
continue;
- case 'n':
- /* return number of characters read so far */
- {
- int *i = (int *)va_arg(args, int*);
- *i = str - buf;
- }
- continue;
case 'o':
base = 8;
break;
@@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
str = next;
}
- /*
- * Now we've come all the way through so either the input string or the
- * format ended. In the former case, there can be a %n at the current
- * position in the format that needs to be filled.
- */
- if (*fmt == '%' && *(fmt + 1) == 'n') {
- int *p = (int *)va_arg(args, int *);
- *p = str - buf;
- }
-
return num;
}
EXPORT_SYMBOL(vsscanf);