diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 12:57:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 12:57:53 -0700 |
commit | 776edb59317ada867dfcddde40b55648beeb0078 (patch) | |
tree | f6a6136374642323cfefd7d6399ea429f9018ade /drivers | |
parent | 59a3d4c3631e553357b7305dc09db1990aa6757c (diff) | |
parent | 3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff) | |
download | linux-776edb59317ada867dfcddde40b55648beeb0078.tar.bz2 |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle were:
- reduced/streamlined smp_mb__*() interface that allows more usecases
and makes the existing ones less buggy, especially in rarer
architectures
- add rwsem implementation comments
- bump up lockdep limits"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
rwsem: Add comments to explain the meaning of the rwsem's count field
lockdep: Increase static allocations
arch: Mass conversion of smp_mb__*()
arch,doc: Convert smp_mb__*()
arch,xtensa: Convert smp_mb__*()
arch,x86: Convert smp_mb__*()
arch,tile: Convert smp_mb__*()
arch,sparc: Convert smp_mb__*()
arch,sh: Convert smp_mb__*()
arch,score: Convert smp_mb__*()
arch,s390: Convert smp_mb__*()
arch,powerpc: Convert smp_mb__*()
arch,parisc: Convert smp_mb__*()
arch,openrisc: Convert smp_mb__*()
arch,mn10300: Convert smp_mb__*()
arch,mips: Convert smp_mb__*()
arch,metag: Convert smp_mb__*()
arch,m68k: Convert smp_mb__*()
arch,m32r: Convert smp_mb__*()
arch,ia64: Convert smp_mb__*()
...
Diffstat (limited to 'drivers')
43 files changed, 151 insertions, 151 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ae098a261fcd..eee55c1e5fde 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index cb6654bfad77..73fe2f8d7f96 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -159,7 +159,7 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { int n = dev->coupled->online_count; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < n) diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 586f2f7f6993..ce7a5812ae9d 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -3498,7 +3498,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) } clear_bit_unlock(0, &ctx->flushing_completions); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } tasklet_enable(&ctx->context.tasklet); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c2676b5908d9..ec5c3f4cdd01 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -156,7 +156,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) */ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } /* Invalidate all timestamps while vblank irq's are off. */ @@ -864,9 +864,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) vblanktimestamp(dev, crtc, tslot) = t_vblank; } - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(diff, &dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } /** @@ -1330,9 +1330,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&dev->vblank[crtc].count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f98ba4e6e70b..0b99de95593b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2157,7 +2157,7 @@ static void i915_error_work_func(struct work_struct *work) * updates before * the counter increment. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&dev_priv->gpu_error.reset_counter); kobject_uevent_env(&dev->primary->kdev->kobj, diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 82c9c5d35251..d2ebcf323094 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -828,7 +828,7 @@ static inline bool cached_dev_get(struct cached_dev *dc) return false; /* Paired with the mb in cached_dev_attach */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return true; } diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 7ef7461912be..a08e3eeac3c5 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -243,7 +243,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn, cl->fn = fn; cl->wq = wq; /* between atomic_dec() in closure_put() */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); } static inline void closure_queue(struct closure *cl) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 66c5d130c8c2..4e84095833db 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -607,9 +607,9 @@ static void write_endio(struct bio *bio, int error) BUG_ON(!test_bit(B_WRITING, &b->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(B_WRITING, &b->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&b->state, B_WRITING); } @@ -997,9 +997,9 @@ static void read_endio(struct bio *bio, int error) BUG_ON(!test_bit(B_READING, &b->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(B_READING, &b->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&b->state, B_READING); } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ebddef5237e4..8e0caed0bf74 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) struct dm_snapshot *s = pe->snap; mempool_free(pe, s->pending_pool); - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&s->pending_exceptions_count); } @@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s) static void merge_shutdown(struct dm_snapshot *s) { clear_bit_unlock(RUNNING_MERGE, &s->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&s->state_bits, RUNNING_MERGE); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6a71bc7c9133..aa9e093343d4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2446,7 +2446,7 @@ static void dm_wq_work(struct work_struct *work) static void dm_queue_flush(struct mapped_device *md) { clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); queue_work(md->wq, &md->work); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ad1b9bea446e..2afef4ec9312 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4400,7 +4400,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) * STRIPE_ON_UNPLUG_LIST clear but the stripe * is still in our list */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); /* * STRIPE_ON_RELEASE_LIST could be set here. In that diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index de02db802ace..e35580618936 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c @@ -399,7 +399,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) /* clear 'streaming' status bit */ clear_bit(ADAP_STREAMING, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_STREAMING); skip_feed_stop: @@ -550,7 +550,7 @@ static int dvb_usb_fe_init(struct dvb_frontend *fe) err: if (!adap->suspend_resume_active) { clear_bit(ADAP_INIT, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_INIT); } @@ -591,7 +591,7 @@ err: if (!adap->suspend_resume_active) { adap->active_fe = -1; clear_bit(ADAP_SLEEP, &adap->state_bits); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); wake_up_bit(&adap->state_bits, ADAP_SLEEP); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9261d5313b5b..dd57c7c5a3da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2781,7 +2781,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) case LOAD_OPEN: netif_tx_start_all_queues(bp->dev); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break; case LOAD_DIAG: @@ -4939,9 +4939,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, u32 verbose) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3b0d43154e67..3a8e51ed5bec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1858,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) return; #endif - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&bp->cq_spq_left); /* push the change in bp->spq_left and towards the memory */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); @@ -1876,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) * sp_state is cleared, and this order prevents * races */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); wmb(); clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* schedule the sp task as mcp ack is required */ bnx2x_schedule_sp_task(bp); @@ -5272,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp) __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); /* mark latest Q bit */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* send Q update ramrod for FCoE Q */ rc = bnx2x_queue_state_change(bp, &queue_params); @@ -5500,7 +5500,7 @@ next_spqe: spqe_cnt++; } /* for */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(spqe_cnt, &bp->eq_spq_left); bp->eq_cons = sw_cons; @@ -13875,9 +13875,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(count, &bp->cq_spq_left); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } case DRV_CTL_ULP_REGISTER_CMD: { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 31297266b743..d725317c4277 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /** @@ -2131,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, /* The operation is completed */ clear_bit(p->state, p->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -3576,16 +3576,16 @@ error_exit1: static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) @@ -4200,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_Q_STATE_MAX; clear_bit(pending_bit, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } @@ -4288,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -5279,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, wmb(); clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return 0; } @@ -5926,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp, if (rc) { o->next_state = BNX2X_F_STATE_MAX; clear_bit(cmd, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index b8078d50261b..faf01488d26e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1650,9 +1650,9 @@ static void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, struct bnx2x_virtf *vf) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, @@ -2990,9 +2990,9 @@ void bnx2x_iov_task(struct work_struct *work) void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->iov_task_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 09f3fefcbf9c..4dd48d2fa804 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk) static int cnic_close_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) @@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk) static int cnic_abort_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) msleep(1); @@ -3646,7 +3646,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk) csk_hold(csk); clear_bit(SK_F_INUSE, &csk->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); while (atomic_read(&csk->ref_count) != 1) msleep(1); cnic_cm_cleanup(csk); @@ -4026,7 +4026,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) set_bit(SK_F_HW_ERR, &csk->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(SK_F_OFFLD_SCHED, &csk->flags); cnic_cm_upcall(cp, csk, opcode); break; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 675550fe8ee9..3a77f9ead004 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -249,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); return sent; @@ -1126,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work) bnad_txq_cleanup(bnad, tcb); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } @@ -2992,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) sent = bnad_txcmpl_process(bnad, tcb); if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } else { netif_stop_queue(netdev); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 0fe7ff750d77..05613a85ce61 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -281,7 +281,7 @@ static int cxgb_close(struct net_device *dev) if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 8b069f96e920..3dfcf600fcc6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, struct sge_qset *qs = txq_to_qset(q, qid); set_bit(qid, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(qid, &qs->txq_stopped)) @@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data) if (!skb_queue_empty(&q->sendq)) { set_bit(TXQ_CTRL, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) @@ -1697,7 +1697,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); if (unlikely(q->size - q->in_use < ndesc)) { set_bit(TXQ_OFLD, &qs->txq_stopped); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ca95cf2954eb..e249528c8e60 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2031,7 +2031,7 @@ static void sge_rx_timer_cb(unsigned long data) struct sge_fl *fl = s->egr_map[id]; clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (fl_starving(fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9cfa4b4bb089..9d88c1d50b49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1951,7 +1951,7 @@ static void sge_rx_timer_cb(unsigned long data) struct sge_fl *fl = s->egr_map[id]; clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* * Since we are accessing fl without a lock there's a diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e2d42475b006..ee6ddbd4f252 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1798,9 +1798,9 @@ void stop_gfar(struct net_device *dev) netif_tx_stop_all_queues(dev); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); disable_napi(priv); @@ -2043,9 +2043,9 @@ int startup_gfar(struct net_device *ndev) gfar_init_tx_rx_base(priv); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* Start Rx/Tx DMA and enable the interrupts */ gfar_start(priv); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cf0761f08911..2e72449f1265 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4676,7 +4676,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf) BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, &pf->state); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d62e7a25cf97..c047c3ef8d71 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -376,7 +376,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); /* flush memory to make sure state is correct before next watchdog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } @@ -4672,7 +4672,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); @@ -5568,7 +5568,7 @@ static int ixgbe_resume(struct pci_dev *pdev) e_dev_err("Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); @@ -8542,7 +8542,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d0799e8e31e4..de2793b06305 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1668,7 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -3354,7 +3354,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -3712,7 +3712,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ed88d3913483..e71eae353368 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -543,7 +543,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) * wl1271_ps_elp_wakeup cannot be called concurrently. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); ret = wlcore_fw_status(wl, wl->fw_status); if (ret < 0) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 179b8edc2262..53df39a22c8a 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -662,9 +662,9 @@ static void pcifront_do_aer(struct work_struct *data) notify_remote_via_evtchn(pdev->evtchn); /*in case of we lost an aer request in four lines time_window*/ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(_PDEVB_op_active, &pdev->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); schedule_pcifront_aer_op(pdev); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 96a26f454673..cc51f38b116d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref) clear_bit(IDEV_STOP_PENDING, &idev->flags); clear_bit(IDEV_IO_READY, &idev->flags); clear_bit(IDEV_GONE, &idev->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(IDEV_ALLOCATED, &idev->flags); wake_up(&ihost->eventq); } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c886ad1c39fb..73ab75ddaf42 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -951,7 +951,7 @@ static int tcm_loop_port_link( struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; atomic_inc(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); /* * Add Linux/SCSI struct scsi_device by HCTL */ @@ -986,7 +986,7 @@ static void tcm_loop_port_unlink( scsi_device_put(sd); atomic_dec(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fcbe6125b73e..0b79b852f4b2 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) continue; atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -990,7 +990,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) * TARGET PORT GROUPS command */ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); @@ -1020,7 +1020,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) spin_lock(&tg_pt_gp->tg_pt_gp_lock); atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* @@ -1054,7 +1054,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (tg_pt_gp->tg_pt_gp_transition_complete) @@ -1116,7 +1116,7 @@ static int core_alua_do_transition_tg_pt( */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { @@ -1159,7 +1159,7 @@ int core_alua_do_port_transition( spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', @@ -1176,7 +1176,7 @@ int core_alua_do_port_transition( rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return rc; } /* @@ -1190,7 +1190,7 @@ int core_alua_do_port_transition( dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1219,7 +1219,7 @@ int core_alua_do_port_transition( tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return @@ -1230,7 +1230,7 @@ int core_alua_do_port_transition( spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); if (rc) break; } @@ -1238,7 +1238,7 @@ int core_alua_do_port_transition( spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&lu_gp->lu_gp_lock); @@ -1252,7 +1252,7 @@ int core_alua_do_port_transition( } atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return rc; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 26416c15d65c..11d26fe65bfb 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; atomic_inc(&deve->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&nacl->device_list_lock); return deve; @@ -1396,7 +1396,7 @@ int core_dev_add_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); atomic_inc(&lun->lun_acl_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1430,7 +1430,7 @@ int core_dev_del_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); atomic_dec(&lun->lun_acl_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); spin_unlock(&lun->lun_acl_lock); core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 9e0232cca92e..7e6b857c6b3f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err) * Bump the ib_bio_err_cnt and release bio. */ atomic_inc(&ibr->ib_bio_err_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } bio_put(bio); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 3013287a2aaa..df357862286e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { atomic_inc(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); spin_lock_bh(&port->sep_alua_lock); @@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( continue; atomic_inc(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_bh(&port->sep_alua_lock); /* * Grab a configfs group dependency that is released @@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); goto out; } /* @@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( sa_res_key, all_tg_pt, aptpl); if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } @@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_lock(&dev->se_port_lock); atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&dev->se_port_lock); @@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; } atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( continue; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) { atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_check_implicit_release( @@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) { atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } @@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) */ if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); return; } nacl = lun_acl->se_lun_nacl; @@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static sense_reason_t @@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port( continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(tmp_tpg)) { pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; } @@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port( tmp_tpg, i_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&tmp_tpg->acl_node_lock); @@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port( pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_tpg_undepend_item(tmp_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; @@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port( pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(dest_se_tpg)) { pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_put_pr_reg; } @@ -3273,7 +3273,7 @@ after_iport_check: initiator_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } spin_unlock_irq(&dest_se_tpg->acl_node_lock); @@ -3289,7 +3289,7 @@ after_iport_check: pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_node_acl = NULL; ret = TCM_INVALID_PARAMETER_LIST; goto out; @@ -3314,7 +3314,7 @@ after_iport_check: if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dest_se_deve = NULL; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; @@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) add_desc_len = 0; atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock(&pr_tmpl->registration_lock); /* * Determine expected length of $FABRIC_MOD specific @@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); break; } /* @@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 789aa9eb0a1e..2179feed0d63 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1149,7 +1149,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Dormant to Active status. */ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, dev->transport->name); @@ -1706,7 +1706,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) return false; case MSG_ORDERED_TAG: atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " " se_ordered_id: %u\n", @@ -1724,7 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) * For SIMPLE and UNTAGGED Task Attribute commands */ atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } @@ -1829,7 +1829,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1841,7 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1900,7 +1900,7 @@ static void transport_handle_queue_full( spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); @@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return; } } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 505519b10cb7..101858e245b3 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -162,7 +162,7 @@ int core_scsi3_ua_allocate( spin_unlock_irq(&nacl->device_list_lock); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -175,7 +175,7 @@ int core_scsi3_ua_allocate( asc, ascq); atomic_inc(&deve->ua_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -190,7 +190,7 @@ void core_scsi3_ua_release_all( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); } @@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); @@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense( kmem_cache_free(se_ua_cache, ua); atomic_dec(&deve->ua_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index fe9d129c8735..f95569dedc88 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2041,7 +2041,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, if (found) clear_bit(eol, ldata->read_flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); ldata->read_tail += c; if (found) { diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index aa97fd845b4d..4b5b3c2fe328 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -200,7 +200,7 @@ static void dma_tx_callback(void *param) /* clear the bit used to serialize the DMA tx. */ clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* wake up the possible processes. */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -275,7 +275,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) mxs_auart_dma_tx(s, i); } else { clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } return; } diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index 49481e0a3382..6cdb7a534f23 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -1843,7 +1843,7 @@ static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun) struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); atomic_inc(&tpg->tpg_port_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); return 0; } @@ -1853,7 +1853,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg, struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); atomic_dec(&tpg->tpg_port_count); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); } static int usbg_check_stop_free(struct se_cmd *se_cmd) diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 2932d9cfb166..2f805cb386a5 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -312,7 +312,7 @@ static void usb_wwan_outdat_callback(struct urb *urb) for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(i, &portdata->out_busy); break; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index cf50ce93975b..aeb513108448 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1255,7 +1255,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); match = true; } mutex_unlock(&tpg->tv_tpg_mutex); diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 3bff6b37b472..3651ec801f45 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -139,9 +139,9 @@ void w1_family_get(struct w1_family *f) void __w1_family_get(struct w1_family *f) { - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&f->refcnt); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } EXPORT_SYMBOL(w1_unregister_family); diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 607e41460c0d..c4a0666de6f5 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -348,9 +348,9 @@ void xen_pcibk_do_op(struct work_struct *data) notify_remote_via_irq(pdev->evtchn_irq); /* Mark that we're done. */ - smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ + smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ clear_bit(_PDEVF_op_active, &pdev->flags); - smp_mb__after_clear_bit(); /* /before/ final check for work */ + smp_mb__after_atomic(); /* /before/ final check for work */ /* Check to see if the driver domain tried to start another request in * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. |