diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-23 16:23:23 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-23 16:23:23 +0100 |
commit | 1ccedb7cdba6886939dd8b4c8f965a826f696e56 (patch) | |
tree | 0f5fc519a68faca5318c296315c9b6c502907056 /lib | |
parent | a98f8fd24fb24fcb9a359553e64dd6aac5cf4279 (diff) | |
parent | 929096fe9ff1f4b3645cf3919527ab47e8d5e17c (diff) | |
download | linux-1ccedb7cdba6886939dd8b4c8f965a826f696e56.tar.bz2 |
Merge commit 'v2.6.28-rc9' into x86/apic
Diffstat (limited to 'lib')
-rw-r--r-- | lib/dynamic_printk.c | 6 | ||||
-rw-r--r-- | lib/idr.c | 22 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 10 |
5 files changed, 32 insertions, 15 deletions
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c index d83660fd6fdd..8e30295e8566 100644 --- a/lib/dynamic_printk.c +++ b/lib/dynamic_printk.c @@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name) nr_entries--; out: up(&debug_list_mutex); - return 0; + return ret; } EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); @@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, dynamic_enabled = DYNAMIC_ENABLED_SOME; err = 0; printk(KERN_DEBUG - "debugging enabled for module %s", + "debugging enabled for module %s\n", elem->name); } else if (!value && (elem->enable == 1)) { elem->enable = 0; @@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, err = 0; printk(KERN_DEBUG "debugging disabled for module " - "%s", elem->name); + "%s\n", elem->name); } } } diff --git a/lib/idr.c b/lib/idr.c index e728c7fccc4d..1c4f9281f412 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) new = get_from_free_list(idp); if (!new) return -1; + new->layer = l-1; rcu_assign_pointer(p->ary[m], new); p->count++; } @@ -210,6 +211,7 @@ build_up: if (unlikely(!p)) { if (!(p = get_from_free_list(idp))) return -1; + p->layer = 0; layers = 1; } /* @@ -218,8 +220,14 @@ build_up: */ while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { layers++; - if (!p->count) + if (!p->count) { + /* special case: if the tree is currently empty, + * then we grow the tree by moving the top node + * upwards. + */ + p->layer++; continue; + } if (!(new = get_from_free_list(idp))) { /* * The allocation failed. If we built part of @@ -237,6 +245,7 @@ build_up: } new->ary[0] = p; new->count = 1; + new->layer = layers-1; if (p->bitmap == IDR_FULL) __set_bit(0, &new->bitmap); p = new; @@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - n = idp->layers * IDR_BITS; p = rcu_dereference(idp->top); + if (!p) + return NULL; + n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; if (id >= (1 << n)) return NULL; + BUG_ON(n == 0); while (n > 0 && p) { n -= IDR_BITS; + BUG_ON(n != p->layer*IDR_BITS); p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); @@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) int n; struct idr_layer *p, *old_p; - n = idp->layers * IDR_BITS; p = idp->top; + if (!p) + return ERR_PTR(-EINVAL); + + n = (p->layer+1) * IDR_BITS; id &= MAX_ID_MASK; diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a8663890a88c..b255b939bc1b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; - *pcount = 0; } - fbc->count = ret; - spin_unlock(&fbc->lock); return ret; } @@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc) if (!fbc->counters) return; - free_percpu(fbc->counters); - fbc->counters = NULL; #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); mutex_unlock(&percpu_counters_lock); #endif + free_percpu(fbc->counters); + fbc->counters = NULL; } EXPORT_SYMBOL(percpu_counter_destroy); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d2688ff1352..b7b449dafbe5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) WARN_ON(!irqs_disabled()); kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); } else - kunmap(miter->addr); + kunmap(miter->page); miter->page = NULL; miter->addr = NULL; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 78330c37a61b..5f6c629a924d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t dev_addr; void *ret; int order = get_order(size); + u64 dma_mask = DMA_32BIT_MASK; + + if (hwdev && hwdev->coherent_dma_mask) + dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { + if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ - if (address_needs_mapping(hwdev, dev_addr, size)) { + if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)*hwdev->dma_mask, + (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |