diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 10:51:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 10:51:26 -0700 |
commit | 8837c70d531a1788f975c366c254a5cb973a5291 (patch) | |
tree | f7a719d01090efb3bc534f5b0d7f13ec87eecadb /drivers | |
parent | b284d4d5a6785f8cd07eda2646a95782373cd01e (diff) | |
parent | b93b016313b3ba8003c3b8bb71f569af91f19fc7 (diff) | |
download | linux-8837c70d531a1788f975c366c254a5cb973a5291.tar.bz2 |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- almost all of the rest of MM
- kasan updates
- lots of procfs work
- misc things
- lib/ updates
- checkpatch
- rapidio
- ipc/shm updates
- the start of willy's XArray conversion
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (140 commits)
page cache: use xa_lock
xarray: add the xa_lock to the radix_tree_root
fscache: use appropriate radix tree accessors
export __set_page_dirty
unicore32: turn flush_dcache_mmap_lock into a no-op
arm64: turn flush_dcache_mmap_lock into a no-op
mac80211_hwsim: use DEFINE_IDA
radix tree: use GFP_ZONEMASK bits of gfp_t for flags
linux/const.h: refactor _BITUL and _BITULL a bit
linux/const.h: move UL() macro to include/linux/const.h
linux/const.h: prefix include guard of uapi/linux/const.h with _UAPI
xen, mm: allow deferred page initialization for xen pv domains
elf: enforce MAP_FIXED on overlaying elf segments
fs, elf: drop MAP_FIXED usage from elf_map
mm: introduce MAP_FIXED_NOREPLACE
MAINTAINERS: update bouncing aacraid@adaptec.com addresses
fs/dcache.c: add cond_resched() in shrink_dentry_list()
include/linux/kfifo.h: fix comment
ipc/shm.c: shm_split(): remove unneeded test for NULL shm_file_data.vm_ops
kernel/sysctl.c: add kdoc comments to do_proc_do{u}intvec_minmax_conv_param
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/memory.c | 7 | ||||
-rw-r--r-- | drivers/net/wireless/mac80211_hwsim.c | 2 | ||||
-rw-r--r-- | drivers/of/unittest.c | 2 | ||||
-rw-r--r-- | drivers/rapidio/devices/rio_mport_cdev.c | 122 | ||||
-rw-r--r-- | drivers/rapidio/rio-scan.c | 6 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/llite/glimpse.c | 2 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/mdc/mdc_request.c | 8 |
7 files changed, 30 insertions, 119 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 79fcd2bae96b..bffe8616bd55 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -837,11 +837,8 @@ int __init memory_dev_init(void) * during boot and have been initialized */ mutex_lock(&mem_sysfs_mutex); - for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) { - /* Don't iterate over sections we know are !present: */ - if (i > __highest_present_section_nr) - break; - + for (i = 0; i <= __highest_present_section_nr; + i += sections_per_block) { err = add_memory_block(i); if (!ret) ret = err; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6afe896e5cb8..96d26cfae90b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -253,7 +253,7 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) static unsigned int hwsim_net_id; -static struct ida hwsim_netgroup_ida = IDA_INIT; +static DEFINE_IDA(hwsim_netgroup_ida); struct hwsim_net { int netgroup; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 02c5984ab09b..6bb37c18292a 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -295,7 +295,7 @@ static void __init of_unittest_printf(void) return; } - num_to_str(phandle_str, sizeof(phandle_str), np->phandle); + num_to_str(phandle_str, sizeof(phandle_str), np->phandle, 0); of_unittest_printf_one(np, "%pOF", full_name); of_unittest_printf_one(np, "%pOFf", full_name); diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index cfb54e01d758..9d27016c899e 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -212,7 +212,6 @@ struct mport_cdev_priv { #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_chan *dmach; struct list_head async_list; - struct list_head pend_list; spinlock_t req_lock; struct mutex dma_lock; struct kref dma_ref; @@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); static struct class *dev_class; static dev_t dev_number; -static struct workqueue_struct *dma_wq; - static void mport_release_mapping(struct kref *ref); static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, @@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct mport_dma_req { + struct kref refcount; struct list_head node; struct file *filp; struct mport_cdev_priv *priv; @@ -554,11 +552,6 @@ struct mport_dma_req { struct completion req_comp; }; -struct mport_faf_work { - struct work_struct work; - struct mport_dma_req *req; -}; - static void mport_release_def_dma(struct kref *dma_ref) { struct mport_dev *md = @@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref) complete(&priv->comp); } -static void dma_req_free(struct mport_dma_req *req) +static void dma_req_free(struct kref *ref) { + struct mport_dma_req *req = container_of(ref, struct mport_dma_req, + refcount); struct mport_cdev_priv *priv = req->priv; unsigned int i; @@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param) req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, NULL, NULL); complete(&req->req_comp); -} - -static void dma_faf_cleanup(struct work_struct *_work) -{ - struct mport_faf_work *work = container_of(_work, - struct mport_faf_work, work); - struct mport_dma_req *req = work->req; - - dma_req_free(req); - kfree(work); -} - -static void dma_faf_callback(void *param) -{ - struct mport_dma_req *req = (struct mport_dma_req *)param; - struct mport_faf_work *work; - - work = kmalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return; - - INIT_WORK(&work->work, dma_faf_cleanup); - work->req = req; - queue_work(dma_wq, &work->work); + kref_put(&req->refcount, dma_req_free); } /* @@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req, goto err_out; } - if (sync == RIO_TRANSFER_FAF) - tx->callback = dma_faf_callback; - else - tx->callback = dma_xfer_callback; + tx->callback = dma_xfer_callback; tx->callback_param = req; req->dmach = chan; req->sync = sync; req->status = DMA_IN_PROGRESS; init_completion(&req->req_comp); + kref_get(&req->refcount); cookie = dmaengine_submit(tx); req->cookie = cookie; @@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req, if (dma_submit_error(cookie)) { rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", cookie, xfer->rio_addr, xfer->length); + kref_put(&req->refcount, dma_req_free); ret = -EIO; goto err_out; } @@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, if (!req) return -ENOMEM; + kref_init(&req->refcount); + ret = get_dma_channel(priv); if (ret) { kfree(req); @@ -968,42 +941,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, ret = do_dma_request(req, xfer, sync, nents); if (ret >= 0) { - if (sync == RIO_TRANSFER_SYNC) - goto sync_out; - return ret; /* return ASYNC cookie */ - } - - if (ret == -ETIMEDOUT || ret == -EINTR) { - /* - * This can happen only in case of SYNC transfer. - * Do not free unfinished request structure immediately. - * Place it into pending list and deal with it later - */ - spin_lock(&priv->req_lock); - list_add_tail(&req->node, &priv->pend_list); - spin_unlock(&priv->req_lock); - return ret; + if (sync == RIO_TRANSFER_ASYNC) + return ret; /* return ASYNC cookie */ + } else { + rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); } - - rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); -sync_out: - dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); - sg_free_table(&req->sgt); err_pg: - if (page_list) { + if (!req->page_list) { for (i = 0; i < nr_pages; i++) put_page(page_list[i]); kfree(page_list); } err_req: - if (req->map) { - mutex_lock(&md->buf_mutex); - kref_put(&req->map->ref, mport_release_mapping); - mutex_unlock(&md->buf_mutex); - } - put_dma_channel(priv); - kfree(req); + kref_put(&req->refcount, dma_req_free); return ret; } @@ -1121,7 +1072,7 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) ret = 0; if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) - dma_req_free(req); + kref_put(&req->refcount, dma_req_free); return ret; @@ -1966,7 +1917,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) #ifdef CONFIG_RAPIDIO_DMA_ENGINE INIT_LIST_HEAD(&priv->async_list); - INIT_LIST_HEAD(&priv->pend_list); spin_lock_init(&priv->req_lock); mutex_init(&priv->dma_lock); #endif @@ -2006,8 +1956,6 @@ static void mport_cdev_release_dma(struct file *filp) md = priv->md; - flush_workqueue(dma_wq); - spin_lock(&priv->req_lock); if (!list_empty(&priv->async_list)) { rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", @@ -2023,20 +1971,7 @@ static void mport_cdev_release_dma(struct file *filp) req->filp, req->cookie, completion_done(&req->req_comp)?"yes":"no"); list_del(&req->node); - dma_req_free(req); - } - } - - if (!list_empty(&priv->pend_list)) { - rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)", - filp, current->comm, task_pid_nr(current)); - list_for_each_entry_safe(req, - req_next, &priv->pend_list, node) { - rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", - req->filp, req->cookie, - completion_done(&req->req_comp)?"yes":"no"); - list_del(&req->node); - dma_req_free(req); + kref_put(&req->refcount, dma_req_free); } } @@ -2048,15 +1983,6 @@ static void mport_cdev_release_dma(struct file *filp) current->comm, task_pid_nr(current), wret); } - spin_lock(&priv->req_lock); - - if (!list_empty(&priv->pend_list)) { - rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)", - filp, current->comm, task_pid_nr(current)); - } - - spin_unlock(&priv->req_lock); - if (priv->dmach != priv->md->dma_chan) { rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", filp, current->comm, task_pid_nr(current)); @@ -2573,8 +2499,6 @@ static void mport_cdev_remove(struct mport_dev *md) cdev_device_del(&md->cdev, &md->dev); mport_cdev_kill_fasync(md); - flush_workqueue(dma_wq); - /* TODO: do we need to give clients some time to close file * descriptors? Simple wait for XX, or kref? */ @@ -2691,17 +2615,8 @@ static int __init mport_init(void) goto err_cli; } - dma_wq = create_singlethread_workqueue("dma_wq"); - if (!dma_wq) { - rmcd_error("failed to create DMA work queue"); - ret = -ENOMEM; - goto err_wq; - } - return 0; -err_wq: - class_interface_unregister(&rio_mport_interface); err_cli: unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); err_chr: @@ -2717,7 +2632,6 @@ static void __exit mport_exit(void) class_interface_unregister(&rio_mport_interface); class_destroy(dev_class); unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); - destroy_workqueue(dma_wq); } module_init(mport_init); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 23429bdaca84..161b927d9de1 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -76,7 +76,7 @@ static u16 rio_destid_alloc(struct rio_net *net) } /** - * rio_destid_reserve - Reserve the specivied destID + * rio_destid_reserve - Reserve the specified destID * @net: RIO network * @destid: destID to reserve * @@ -885,7 +885,7 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, * * For each enumerated device, ensure that each switch in a system * has correct routing entries. Add routes for devices that where - * unknown dirung the first enumeration pass through the switch. + * unknown during the first enumeration pass through the switch. */ static void rio_update_route_tables(struct rio_net *net) { @@ -983,7 +983,7 @@ static int rio_enum_mport(struct rio_mport *mport, u32 flags) /* reserve mport destID in new net */ rio_destid_reserve(net, mport->host_deviceid); - /* Enable Input Output Port (transmitter reviever) */ + /* Enable Input Output Port (transmitter receiver) */ rio_enable_rx_tx_port(mport, 1, 0, 0, 0); /* Set component tag for host */ diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c index c43ac574274c..3075358f3f08 100644 --- a/drivers/staging/lustre/lustre/llite/glimpse.c +++ b/drivers/staging/lustre/lustre/llite/glimpse.c @@ -69,7 +69,7 @@ blkcnt_t dirty_cnt(struct inode *inode) void *results[1]; if (inode->i_mapping) - cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree, + cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->i_pages, results, 0, 1, PAGECACHE_TAG_DIRTY); if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0) diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 3b1c8e5a3053..8ee7b4d273b2 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -934,14 +934,14 @@ static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash, struct page *page; int found; - spin_lock_irq(&mapping->tree_lock); - found = radix_tree_gang_lookup(&mapping->page_tree, + xa_lock_irq(&mapping->i_pages); + found = radix_tree_gang_lookup(&mapping->i_pages, (void **)&page, offset, 1); if (found > 0 && !radix_tree_exceptional_entry(page)) { struct lu_dirpage *dp; get_page(page); - spin_unlock_irq(&mapping->tree_lock); + xa_unlock_irq(&mapping->i_pages); /* * In contrast to find_lock_page() we are sure that directory * page cannot be truncated (while DLM lock is held) and, @@ -989,7 +989,7 @@ static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash, page = ERR_PTR(-EIO); } } else { - spin_unlock_irq(&mapping->tree_lock); + xa_unlock_irq(&mapping->i_pages); page = NULL; } return page; |