diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 17 | ||||
-rw-r--r-- | lib/bitmap.c | 12 | ||||
-rw-r--r-- | lib/dma-debug.c | 28 | ||||
-rw-r--r-- | lib/flex_array.c | 41 | ||||
-rw-r--r-- | lib/inflate.c | 2 | ||||
-rw-r--r-- | lib/is_single_threaded.c | 61 | ||||
-rw-r--r-- | lib/lmb.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 124 | ||||
-rw-r--r-- | lib/vsprintf.c | 199 |
9 files changed, 296 insertions, 190 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 43173c4e0ade..55d2acc607a1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. +config DEBUG_CREDENTIALS + bool "Debug credential management" + depends on DEBUG_KERNEL + help + Enable this to turn on some debug checking for credential + management. The additional code keeps track of the number of + pointers from task_structs to any given cred struct, and checks to + see that this number never exceeds the usage count of the cred + struct. + + Furthermore, if SELinux is enabled, this also checks that the + security pointer in the cred struct is never seen to be invalid. + + If unsure, say N. + # # Select this config option from the architecture Kconfig, if it # it is preferred to always offer frame pointers as a config @@ -725,7 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_DETECTOR bool "Check for stalled CPUs delaying RCU grace periods" - depends on CLASSIC_RCU || TREE_RCU + depends on TREE_RCU || TREE_PREEMPT_RCU default n help This option causes RCU to printk information on which diff --git a/lib/bitmap.c b/lib/bitmap.c index 35a1f7ff4149..702565821c99 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst, } EXPORT_SYMBOL(__bitmap_shift_left); -void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); + unsigned long result = 0; for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] & bitmap2[k]; + result |= (dst[k] = bitmap1[k] & bitmap2[k]); + return result != 0; } EXPORT_SYMBOL(__bitmap_and); @@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_xor); -void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); + unsigned long result = 0; for (k = 0; k < nr; k++) - dst[k] = bitmap1[k] & ~bitmap2[k]; + result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); + return result != 0; } EXPORT_SYMBOL(__bitmap_andnot); diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 65b0d99b6d0a..58a9f9fc609a 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -156,9 +156,13 @@ static bool driver_filter(struct device *dev) return true; /* driver filter on and initialized */ - if (current_driver && dev->driver == current_driver) + if (current_driver && dev && dev->driver == current_driver) return true; + /* driver filter on, but we can't filter on a NULL device... */ + if (!dev) + return false; + if (current_driver || !current_driver_name[0]) return false; @@ -183,17 +187,17 @@ static bool driver_filter(struct device *dev) return ret; } -#define err_printk(dev, entry, format, arg...) do { \ - error_count += 1; \ - if (driver_filter(dev) && \ - (show_all_errors || show_num_errors > 0)) { \ - WARN(1, "%s %s: " format, \ - dev_driver_string(dev), \ - dev_name(dev) , ## arg); \ - dump_entry_trace(entry); \ - } \ - if (!show_all_errors && show_num_errors > 0) \ - show_num_errors -= 1; \ +#define err_printk(dev, entry, format, arg...) do { \ + error_count += 1; \ + if (driver_filter(dev) && \ + (show_all_errors || show_num_errors > 0)) { \ + WARN(1, "%s %s: " format, \ + dev ? dev_driver_string(dev) : "NULL", \ + dev ? dev_name(dev) : "NULL", ## arg); \ + dump_entry_trace(entry); \ + } \ + if (!show_all_errors && show_num_errors > 0) \ + show_num_errors -= 1; \ } while (0); /* diff --git a/lib/flex_array.c b/lib/flex_array.c index 08f1636d296a..7baed2fc3bc8 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa) * capacity in the base structure. Also note that no effort is made * to efficiently pack objects across page boundaries. */ -struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) +struct flex_array *flex_array_alloc(int element_size, unsigned int total, + gfp_t flags) { struct flex_array *ret; int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); @@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) return ret; } -static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) +static int fa_element_to_part_nr(struct flex_array *fa, + unsigned int element_nr) { return element_nr / __elements_per_part(fa->element_size); } /** * flex_array_free_parts - just free the second-level pages - * @src: address of data to copy into the array - * @element_nr: index of the position in which to insert - * the new element. * * This is to be used in cases where the base 'struct flex_array' * has been statically allocated and should not be free. @@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa) kfree(fa); } -static int fa_index_inside_part(struct flex_array *fa, int element_nr) +static unsigned int index_inside_part(struct flex_array *fa, + unsigned int element_nr) { - return element_nr % __elements_per_part(fa->element_size); -} + unsigned int part_offset; -static int index_inside_part(struct flex_array *fa, int element_nr) -{ - int part_offset = fa_index_inside_part(fa, element_nr); + part_offset = element_nr % __elements_per_part(fa->element_size); return part_offset * fa->element_size; } @@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) * * Locking must be provided by the caller. */ -int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) +int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, + gfp_t flags) { int part_nr = fa_element_to_part_nr(fa, element_nr); struct flex_array_part *part; @@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags return -ENOSPC; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; - else + else { part = __fa_get_part(fa, part_nr, flags); - if (!part) - return -ENOMEM; + if (!part) + return -ENOMEM; + } dst = &part->elements[index_inside_part(fa, element_nr)]; memcpy(dst, src, fa->element_size); return 0; @@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags * * Locking must be provided by the caller. */ -int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) +int flex_array_prealloc(struct flex_array *fa, unsigned int start, + unsigned int end, gfp_t flags) { int start_part; int end_part; @@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) * * Locking must be provided by the caller. */ -void *flex_array_get(struct flex_array *fa, int element_nr) +void *flex_array_get(struct flex_array *fa, unsigned int element_nr) { int part_nr = fa_element_to_part_nr(fa, element_nr); struct flex_array_part *part; if (element_nr >= fa->total_nr_elements) return NULL; - if (!fa->parts[part_nr]) - return NULL; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; - else + else { part = fa->parts[part_nr]; + if (!part) + return NULL; + } return &part->elements[index_inside_part(fa, element_nr)]; } diff --git a/lib/inflate.c b/lib/inflate.c index 1a8e8a978128..d10255973a9f 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -7,7 +7,7 @@ * Adapted for booting Linux by Hannu Savolainen 1993 * based on gzip-1.0.3 * - * Nicolas Pitre <nico@cam.org>, 1999/04/14 : + * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 : * Little mods for all variable to reside either into rodata or bss segments * by marking constant variables with 'const' and initializing all the others * at run-time only. This allows for the kernel uncompressor to run diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index f1ed2fe76c65..bd2bea963364 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -12,34 +12,47 @@ #include <linux/sched.h> -/** - * is_single_threaded - Determine if a thread group is single-threaded or not - * @p: A task in the thread group in question - * - * This returns true if the thread group to which a task belongs is single - * threaded, false if it is not. +/* + * Returns true if the task does not share ->mm with another thread/process. */ -bool is_single_threaded(struct task_struct *p) +bool current_is_single_threaded(void) { - struct task_struct *g, *t; - struct mm_struct *mm = p->mm; + struct task_struct *task = current; + struct mm_struct *mm = task->mm; + struct task_struct *p, *t; + bool ret; - if (atomic_read(&p->signal->count) != 1) - goto no; + if (atomic_read(&task->signal->live) != 1) + return false; - if (atomic_read(&p->mm->mm_users) != 1) { - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (t->mm == mm && t != p) - goto no_unlock; - } while_each_thread(g, t); - read_unlock(&tasklist_lock); - } + if (atomic_read(&mm->mm_users) == 1) + return true; - return true; + ret = false; + rcu_read_lock(); + for_each_process(p) { + if (unlikely(p->flags & PF_KTHREAD)) + continue; + if (unlikely(p == task->group_leader)) + continue; + + t = p; + do { + if (unlikely(t->mm == mm)) + goto found; + if (likely(t->mm)) + break; + /* + * t->mm == NULL. Make sure next_thread/next_task + * will see other CLONE_VM tasks which might be + * forked before exiting. + */ + smp_rmb(); + } while_each_thread(p, t); + } + ret = true; +found: + rcu_read_unlock(); -no_unlock: - read_unlock(&tasklist_lock); -no: - return false; + return ret; } diff --git a/lib/lmb.c b/lib/lmb.c index e4a6482d8b26..0343c05609f0 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void) return lmb.memory.size; } -u64 __init lmb_end_of_DRAM(void) +u64 lmb_end_of_DRAM(void) { int idx = lmb.memory.cnt - 1; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bffe6d7ef9d9..ac25cd28e807 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) -{ - return alloc_bootmem_low_pages(size); -} - -void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) -{ - return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); -} - -dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) -{ - return paddr; -} - -phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) -{ - return baddr; -} - +/* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { - return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); -} - -void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) -{ - return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); -} - -int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, - dma_addr_t addr, size_t size) -{ - return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); -} - -int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) -{ - return 0; + return phys_to_dma(hwdev, virt_to_phys(address)); } static void swiotlb_print_info(unsigned long bytes) @@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); + io_tlb_start = alloc_bootmem_low_pages(bytes); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + bytes; @@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size) bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); + io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); if (io_tlb_start) break; order--; @@ -315,20 +281,10 @@ cleanup1: return -ENOMEM; } -static inline int -address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) +static int is_swiotlb_buffer(phys_addr_t paddr) { - return swiotlb_arch_address_needs_mapping(hwdev, addr, size); -} - -static inline int range_needs_mapping(phys_addr_t paddr, size_t size) -{ - return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); -} - -static int is_swiotlb_buffer(char *addr) -{ - return addr >= io_tlb_start && addr < io_tlb_end; + return paddr >= virt_to_phys(io_tlb_start) && + paddr < virt_to_phys(io_tlb_end); } /* @@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && - !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), - size)) { + if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { /* * The allocated memory isn't reachable by the device. */ @@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ - if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { + if (dev_addr + size > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); @@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent); void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dev_addr) { + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + WARN_ON(irqs_disabled()); - if (!is_swiotlb_buffer(vaddr)) - free_pages((unsigned long) vaddr, get_order(size)); + if (!is_swiotlb_buffer(paddr)) + free_pages((unsigned long)vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); @@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); - if (size > io_tlb_overflow && do_panic) { - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - panic("DMA: Memory would be corrupted\n"); - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) - panic("DMA: Random memory would be DMAed\n"); - } + if (size <= io_tlb_overflow || !do_panic) + return; + + if (dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory could be DMA accessed\n"); + if (dir == DMA_FROM_DEVICE) + panic("DMA: Random memory could be DMA written\n"); + if (dir == DMA_TO_DEVICE) + panic("DMA: Random memory could be DMA read\n"); } /* @@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, struct dma_attrs *attrs) { phys_addr_t phys = page_to_phys(page) + offset; - dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); + dma_addr_t dev_addr = phys_to_dma(dev, phys); void *map; BUG_ON(dir == DMA_NONE); @@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(dev, dev_addr, size) && - !range_needs_mapping(phys, size)) + if (dma_capable(dev, dev_addr, size) && !swiotlb_force) return dev_addr; /* @@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(dev, dev_addr, size)) + if (!dma_capable(dev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; @@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) { - char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dma_addr)) { - do_unmap_single(hwdev, dma_addr, size, dir); + if (is_swiotlb_buffer(paddr)) { + do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); return; } if (dir != DMA_FROM_DEVICE) return; - dma_mark_clean(dma_addr, size); + /* + * phys_to_virt doesn't work with hihgmem page but we could + * call dma_mark_clean() with hihgmem page here. However, we + * are fine since dma_mark_clean() is null on POWERPC. We can + * make dma_mark_clean() take a physical address if necessary. + */ + dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, @@ -728,19 +692,19 @@ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, int target) { - char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dma_addr)) { - sync_single(hwdev, dma_addr, size, dir, target); + if (is_swiotlb_buffer(paddr)) { + sync_single(hwdev, phys_to_virt(paddr), size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; - dma_mark_clean(dma_addr, size); + dma_mark_clean(phys_to_virt(paddr), size); } void @@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); + dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); - if (range_needs_mapping(paddr, sg->length) || - address_needs_mapping(hwdev, dev_addr, sg->length)) { + if (swiotlb_force || + !dma_capable(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); if (!map) { diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 756ccafa9cec..cb8a112030bb 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -25,6 +25,7 @@ #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ioport.h> +#include <net/addrconf.h> #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> @@ -630,60 +631,156 @@ static char *resource_string(char *buf, char *end, struct resource *res, } static char *mac_address_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) + struct printf_spec spec, const char *fmt) { - char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ + char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); - if (!(spec.flags & SPECIAL) && i != 5) + if (fmt[0] == 'M' && i != 5) *p++ = ':'; } *p = '\0'; - spec.flags &= ~SPECIAL; return string(buf, end, mac_addr, spec); } -static char *ip6_addr_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) +static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) +{ + int i; + + for (i = 0; i < 4; i++) { + char temp[3]; /* hold each IP quad in reverse order */ + int digits = put_dec_trunc(temp, addr[i]) - temp; + if (leading_zeros) { + if (digits < 3) + *p++ = '0'; + if (digits < 2) + *p++ = '0'; + } + /* reverse the digits in the quad */ + while (digits--) + *p++ = temp[digits]; + if (i < 3) + *p++ = '.'; + } + + *p = '\0'; + return p; +} + +static char *ip6_compressed_string(char *p, const struct in6_addr *addr) { - char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ - char *p = ip6_addr; int i; + int j; + int range; + unsigned char zerolength[8]; + int longest = 1; + int colonpos = -1; + u16 word; + u8 hi; + u8 lo; + bool needcolon = false; + bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr); + + memset(zerolength, 0, sizeof(zerolength)); + + if (useIPv4) + range = 6; + else + range = 8; + + /* find position of longest 0 run */ + for (i = 0; i < range; i++) { + for (j = i; j < range; j++) { + if (addr->s6_addr16[j] != 0) + break; + zerolength[i]++; + } + } + for (i = 0; i < range; i++) { + if (zerolength[i] > longest) { + longest = zerolength[i]; + colonpos = i; + } + } + + /* emit address */ + for (i = 0; i < range; i++) { + if (i == colonpos) { + if (needcolon || i == 0) + *p++ = ':'; + *p++ = ':'; + needcolon = false; + i += longest - 1; + continue; + } + if (needcolon) { + *p++ = ':'; + needcolon = false; + } + /* hex u16 without leading 0s */ + word = ntohs(addr->s6_addr16[i]); + hi = word >> 8; + lo = word & 0xff; + if (hi) { + if (hi > 0x0f) + p = pack_hex_byte(p, hi); + else + *p++ = hex_asc_lo(hi); + } + if (hi || lo > 0x0f) + p = pack_hex_byte(p, lo); + else + *p++ = hex_asc_lo(lo); + needcolon = true; + } + + if (useIPv4) { + if (needcolon) + *p++ = ':'; + p = ip4_string(p, &addr->s6_addr[12], false); + } + *p = '\0'; + return p; +} + +static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt) +{ + int i; for (i = 0; i < 8; i++) { - p = pack_hex_byte(p, addr[2 * i]); - p = pack_hex_byte(p, addr[2 * i + 1]); - if (!(spec.flags & SPECIAL) && i != 7) + p = pack_hex_byte(p, addr->s6_addr[2 * i]); + p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]); + if (fmt[0] == 'I' && i != 7) *p++ = ':'; } + *p = '\0'; - spec.flags &= ~SPECIAL; + return p; +} + +static char *ip6_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) +{ + char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; + + if (fmt[0] == 'I' && fmt[2] == 'c') + ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr); + else + ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt); return string(buf, end, ip6_addr, spec); } -static char *ip4_addr_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) +static char *ip4_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) { - char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ - char temp[3]; /* hold each IP quad in reverse order */ - char *p = ip4_addr; - int i, digits; + char ip4_addr[sizeof("255.255.255.255")]; - for (i = 0; i < 4; i++) { - digits = put_dec_trunc(temp, addr[i]) - temp; - /* reverse the digits in the quad */ - while (digits--) - *p++ = temp[digits]; - if (i != 3) - *p++ = '.'; - } - *p = '\0'; - spec.flags &= ~SPECIAL; + ip4_string(ip4_addr, addr, fmt[0] == 'i'); return string(buf, end, ip4_addr, spec); } @@ -702,11 +799,15 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, * addresses (not the name nor the flags) * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation - * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated - * decimal for v4 and colon separated network-order 16 bit hex for v6) - * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is - * currently the same - * + * - 'm' For a 6-byte MAC address, it prints the hex address without colons + * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way + * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) + * IPv6 uses colon separated network-order 16 bit hex with leading 0's + * - 'i' [46] for 'raw' IPv4/IPv6 addresses + * IPv6 omits the colons (01020304...0f) + * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) + * - 'I6c' for IPv6 addresses printed as specified by + * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. @@ -726,20 +827,24 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return symbol_string(buf, end, ptr, spec, *fmt); case 'R': return resource_string(buf, end, ptr, spec); - case 'm': - spec.flags |= SPECIAL; - /* Fallthrough */ - case 'M': - return mac_address_string(buf, end, ptr, spec); - case 'i': - spec.flags |= SPECIAL; - /* Fallthrough */ - case 'I': - if (fmt[1] == '6') - return ip6_addr_string(buf, end, ptr, spec); - if (fmt[1] == '4') - return ip4_addr_string(buf, end, ptr, spec); - spec.flags &= ~SPECIAL; + case 'M': /* Colon separated: 00:01:02:03:04:05 */ + case 'm': /* Contiguous: 000102030405 */ + return mac_address_string(buf, end, ptr, spec, fmt); + case 'I': /* Formatted IP supported + * 4: 1.2.3.4 + * 6: 0001:0203:...:0708 + * 6c: 1::708 or 1::1.2.3.4 + */ + case 'i': /* Contiguous: + * 4: 001.002.003.004 + * 6: 000102...0f + */ + switch (fmt[1]) { + case '6': + return ip6_addr_string(buf, end, ptr, spec, fmt); + case '4': + return ip4_addr_string(buf, end, ptr, spec, fmt); + } break; } spec.flags |= SMALL; |