summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/find_next_bit.c43
2 files changed, 75 insertions, 0 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 14fb355e3caa..c4ecb2994ba3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -79,6 +79,38 @@ config HEADERS_CHECK
exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
your build tree), to make sure they're suitable.
+config DEBUG_SECTION_MISMATCH
+ bool "Enable full Section mismatch analysis"
+ default n
+ help
+ The section mismatch analysis checks if there are illegal
+ references from one section to another section.
+ Linux will during link or during runtime drop some sections
+ and any use of code/data previously in these sections will
+ most likely result in an oops.
+ In the code functions and variables are annotated with
+ __init, __devinit etc. (see full list in include/linux/init.h)
+ which result in the code/data being placed in specific sections.
+ The section mismatch anaylsis are always done after a full
+ kernel build but enabling this options will in addition
+ do the following:
+ - Add the option -fno-inline-functions-called-once to gcc
+ When inlining a function annotated __init in a non-init
+ function we would loose the section information and thus
+ the analysis would not catch the illegal reference.
+ This options tell gcc to inline less but will also
+ result in a larger kernel.
+ - Run the section mismatch analysis for each module/built-in.o
+ When we run the section mismatch analysis on vmlinux.o we
+ looses valueable information about where the mismatch was
+ introduced.
+ Running the analysis for each module/built-in.o file
+ will tell where the mismatch happens much closer to the
+ source. The drawback is that we will report the same
+ mismatch at least twice.
+ - Enable verbose reporting from modpost to help solving
+ the section mismatches reported.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index bda0d71a2514..78ccd73a8841 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -178,4 +178,47 @@ found_middle_swap:
EXPORT_SYMBOL(generic_find_next_zero_le_bit);
+unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= (BITS_PER_LONG - 1UL);
+ if (offset) {
+ tmp = ext2_swabp(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ tmp = *(p++);
+ if (tmp)
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = ext2_swabp(p);
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+
+found_middle_swap:
+ return result + __ffs(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(generic_find_next_le_bit);
#endif /* __BIG_ENDIAN */