summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 13:34:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 13:34:31 -0800
commit60f7c503d971a731ee3c4f884a9f2e80d476730d (patch)
tree30e23c822306b0e407f6218135feb5b2e847665d /drivers/scsi/ufs/ufshcd.c
parent69f637c33560b02ae7313e0c142d847361cc723a (diff)
parentbe1b500212541a70006887bae558ff834d7365d0 (diff)
downloadlinux-60f7c503d971a731ee3c4f884a9f2e80d476730d.tar.bz2
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This consists of the usual driver updates (ufs, qla2xxx, smartpqi, target, zfcp, fnic, mpt3sas, ibmvfc) plus a load of cleanups, a major power management rework and a load of assorted minor updates. There are a few core updates (formatting fixes being the big one) but nothing major this cycle" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits) scsi: mpt3sas: Update driver version to 36.100.00.00 scsi: mpt3sas: Handle trigger page after firmware update scsi: mpt3sas: Add persistent MPI trigger page scsi: mpt3sas: Add persistent SCSI sense trigger page scsi: mpt3sas: Add persistent Event trigger page scsi: mpt3sas: Add persistent Master trigger page scsi: mpt3sas: Add persistent trigger pages support scsi: mpt3sas: Sync time periodically between driver and firmware scsi: qla2xxx: Update version to 10.02.00.104-k scsi: qla2xxx: Fix device loss on 4G and older HBAs scsi: qla2xxx: If fcport is undergoing deletion complete I/O with retry scsi: qla2xxx: Fix the call trace for flush workqueue scsi: qla2xxx: Fix flash update in 28XX adapters on big endian machines scsi: qla2xxx: Handle aborts correctly for port undergoing deletion scsi: qla2xxx: Fix N2N and NVMe connect retry failure scsi: qla2xxx: Fix FW initialization error on big endian machines scsi: qla2xxx: Fix crash during driver load on big endian machines scsi: qla2xxx: Fix compilation issue in PPC systems scsi: qla2xxx: Don't check for fw_started while posting NVMe command scsi: qla2xxx: Tear down session if FW say it is down ...
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c500
1 files changed, 338 insertions, 162 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c148fcd24de..9902b7e3aa4a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -163,6 +163,11 @@ struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+ /*
+ * For DeepSleep, the link is first put in hibern8 and then off.
+ * Leaving the link in hibern8 is not supported.
+ */
+ {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
@@ -221,8 +226,6 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -245,6 +248,8 @@ static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
@@ -348,7 +353,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
sector_t lba = -1;
- u8 opcode = 0;
+ u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
@@ -374,13 +379,20 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ if (opcode == WRITE_10)
+ group_id = lrbp->cmd->cmnd[6];
+ } else if (opcode == UNMAP) {
+ if (cmd->request) {
+ lba = scsi_get_lba(cmd);
+ transfer_len = blk_rq_bytes(cmd->request);
+ }
}
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
trace_ufshcd_command(dev_name(hba->dev), str, tag,
- doorbell, transfer_len, intr, lba, opcode);
+ doorbell, transfer_len, intr, lba, opcode, group_id);
}
static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
@@ -399,20 +411,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
}
-static void ufshcd_print_err_hist(struct ufs_hba *hba,
- struct ufs_err_reg_hist *err_hist,
- char *err_name)
+static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
+ char *err_name)
{
int i;
bool found = false;
+ struct ufs_event_hist *e;
- for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
- int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
+ if (id >= UFS_EVT_CNT)
+ return;
- if (err_hist->tstamp[p] == 0)
+ e = &hba->ufs_stats.event[id];
+
+ for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
+ int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
+
+ if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ e->val[p], ktime_to_us(e->tstamp[p]));
found = true;
}
@@ -420,26 +437,26 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
dev_err(hba->dev, "No record of %s\n", err_name);
}
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
+static void ufshcd_print_evt_hist(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
- "auto_hibern8_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
- "link_startup_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
- "suspend_fail");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
- ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
+ ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
+ ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
+ ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
+ ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
+ ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
+ ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
+ "auto_hibern8_err");
+ ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
+ ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
+ "link_startup_fail");
+ ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
+ ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
+ "suspend_fail");
+ ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
+ ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
+ ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
ufshcd_vops_dbg_register_dump(hba);
}
@@ -1102,7 +1119,6 @@ out:
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
- #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
@@ -1113,16 +1129,16 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
- if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
- || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+ if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
+ hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
/* save the current power mode */
memcpy(&hba->clk_scaling.saved_pwr_info.info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
/* scale down gear */
- new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
- new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+ new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
+ new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
}
}
@@ -1555,6 +1571,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
ufshcd_enable_irq(hba);
@@ -1714,12 +1731,10 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_disable_irq(hba);
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ufshcd_setup_clocks(hba, false);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case keep the state
@@ -1751,8 +1766,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done)
+ hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done ||
+ hba->clk_gating.state == CLKS_OFF)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -1814,19 +1830,19 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
return -EINVAL;
value = !!value;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (value == hba->clk_gating.is_enabled)
goto out;
- if (value) {
- ufshcd_release(hba);
- } else {
- spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value)
+ __ufshcd_release(hba);
+ else
hba->clk_gating.active_reqs++;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
hba->clk_gating.is_enabled = value;
out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
@@ -1837,6 +1853,9 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ if (!hba->clk_scaling.min_gear)
+ hba->clk_scaling.min_gear = UFS_HS_G1;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1874,7 +1893,7 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI);
hba->clk_gating.is_enabled = true;
@@ -2557,6 +2576,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
(hba->clk_gating.state != CLKS_ON));
lrbp = &hba->lrb[tag];
+ if (unlikely(lrbp->in_use)) {
+ if (hba->pm_op_in_progress)
+ set_host_byte(cmd, DID_BAD_TARGET);
+ else
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ ufshcd_release(hba);
+ goto out;
+ }
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
@@ -2799,6 +2826,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
init_completion(&wait);
lrbp = &hba->lrb[tag];
+ if (unlikely(lrbp->in_use)) {
+ err = -EBUSY;
+ goto out;
+ }
+
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
@@ -2815,6 +2847,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+out:
ufshcd_add_query_upiu_trace(hba, tag,
err ? "query_complete_err" : "query_complete");
@@ -2960,14 +2993,14 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ ufshcd_hold(hba, false);
+
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -2999,7 +3032,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
-out:
ufshcd_release(hba);
return err;
}
@@ -3051,21 +3083,20 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ ufshcd_hold(hba, false);
+
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3100,7 +3131,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
-out:
ufshcd_release(hba);
return err;
}
@@ -3601,6 +3631,22 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
+ int agreed_gear,
+ int adapt_val)
+{
+ int ret;
+
+ if (agreed_gear != UFS_HS_G4)
+ adapt_val = PA_NO_ADAPT;
+
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ adapt_val);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
+
/**
* ufshcd_dme_enable - UIC command for DME_ENABLE
* @hba: per adapter instance
@@ -3854,7 +3900,7 @@ out:
if (ret) {
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
}
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4350,8 +4396,10 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
- int retry;
+ int retry_outer = 3;
+ int retry_inner;
+start:
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
ufshcd_hba_stop(hba);
@@ -4377,13 +4425,17 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
- retry = 50;
+ retry_inner = 50;
while (ufshcd_is_hba_active(hba)) {
- if (retry) {
- retry--;
+ if (retry_inner) {
+ retry_inner--;
} else {
dev_err(hba->dev,
"Controller enable failed\n");
+ if (retry_outer) {
+ retry_outer--;
+ goto start;
+ }
return -EIO;
}
usleep_range(1000, 1100);
@@ -4460,14 +4512,21 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
-void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
{
- reg_hist->reg[reg_hist->pos] = reg;
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
- reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
+ struct ufs_event_hist *e;
+
+ if (id >= UFS_EVT_CNT)
+ return;
+
+ e = &hba->ufs_stats.event[id];
+ e->val[e->pos] = val;
+ e->tstamp[e->pos] = ktime_get();
+ e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
+
+ ufshcd_vops_event_notify(hba, id, &val);
}
-EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
+EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
@@ -4496,7 +4555,8 @@ link_startup:
/* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
0);
dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO;
@@ -4509,7 +4569,8 @@ link_startup:
* succeeds. So reset the local Uni-Pro and try again.
*/
if (ret && ufshcd_hba_enable(hba)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
@@ -4517,7 +4578,8 @@ link_startup:
if (ret) {
/* failed to get the link up... retire */
- ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ ufshcd_update_evt_hist(hba,
+ UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
@@ -4551,7 +4613,7 @@ out:
dev_err(hba->dev, "link startup failed %d\n", ret);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
}
return ret;
}
@@ -4906,7 +4968,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev,
"OCS error from controller = %x for tag %d\n",
ocs, lrbp->task_tag);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
@@ -4964,9 +5026,11 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
struct scsi_cmnd *cmd;
int result;
int index;
+ bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
+ lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
@@ -4979,15 +5043,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
+ update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
"dev_complete");
complete(hba->dev_cmd.complete);
+ update_scaling = true;
}
}
- if (ufshcd_is_clkscaling_supported(hba))
+ if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
hba->clk_scaling.active_reqs--;
}
@@ -5632,7 +5698,9 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
- if (pm_runtime_suspended(hba->dev)) {
+ if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
+ enum ufs_pm_op pm_op;
+
/*
* Don't assume anything of pm_runtime_get_sync(), if
* resume fails, irq and clocks can be OFF, and powers
@@ -5647,7 +5715,8 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
- ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
+ ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
if (hba->clk_scaling.is_allowed) {
@@ -5668,7 +5737,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
- return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
}
@@ -5681,6 +5750,7 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
struct request_queue *q;
int ret;
+ hba->is_sys_suspended = false;
/*
* Set RPM status of hba device to RPM_ACTIVE,
* this also clears its runtime error.
@@ -5739,11 +5809,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
+ down(&hba->eh_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ up(&hba->eh_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
@@ -5751,20 +5823,18 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_err_handling_prepare(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_scsi_block_requests(hba);
- /*
- * A full reset and restore might have happened after preparation
- * is finished, double check whether we should stop.
- */
- if (ufshcd_err_handling_should_stop(hba)) {
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- goto out;
- }
hba->ufshcd_state = UFSHCD_STATE_RESET;
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
+ /*
+ * A full reset and restore might have happened after preparation
+ * is finished, double check whether we should stop.
+ */
+ if (ufshcd_err_handling_should_stop(hba))
+ goto skip_err_handling;
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
@@ -5772,17 +5842,10 @@ static void ufshcd_err_handler(struct work_struct *work)
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
+ if (!ret && ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
}
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
- needs_reset = true;
-
if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
@@ -5791,7 +5854,7 @@ static void ufshcd_err_handler(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -5802,8 +5865,14 @@ static void ufshcd_err_handler(struct work_struct *work)
* transfers forcefully because they will get cleared during
* host reset and restore
*/
- if (needs_reset)
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
+ needs_reset = true;
goto do_reset;
+ }
/*
* If LINERESET was caught, UFS might have been put to PWM mode,
@@ -5911,12 +5980,11 @@ skip_err_handling:
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
-
-out:
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
+ up(&hba->eh_sem);
}
/**
@@ -5936,7 +6004,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
(reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
@@ -5966,7 +6034,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
(reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
@@ -5985,7 +6053,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
if ((reg & UIC_NETWORK_LAYER_ERROR) &&
(reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
retval |= IRQ_HANDLED;
}
@@ -5993,7 +6061,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
(reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
retval |= IRQ_HANDLED;
}
@@ -6001,7 +6069,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
if ((reg & UIC_DME_ERROR) &&
(reg & UIC_DME_ERROR_CODE_MASK)) {
- ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
retval |= IRQ_HANDLED;
}
@@ -6043,7 +6111,8 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
- ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
+ ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
+ hba->errors);
queue_eh_work = true;
}
@@ -6060,7 +6129,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
"Enter" : "Exit",
hba->errors, ufshcd_get_upmcrs(hba));
- ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
+ ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
hba->errors);
ufshcd_set_link_broken(hba);
queue_eh_work = true;
@@ -6075,7 +6144,8 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->saved_uic_err |= hba->uic_error;
/* dump controller state before resetting */
- if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
+ if ((hba->saved_err &
+ (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
@@ -6407,8 +6477,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
init_completion(&wait);
lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
+ if (unlikely(lrbp->in_use)) {
+ err = -EBUSY;
+ goto out;
+ }
+ WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
lrbp->sense_buffer = NULL;
@@ -6480,6 +6554,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
+out:
blk_put_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
@@ -6601,7 +6676,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
out:
hba->req_abort_count = 0;
- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
err = SUCCESS;
} else {
@@ -6624,7 +6699,8 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
/**
* ufshcd_try_to_abort_task - abort a specific task
- * @cmd: SCSI command pointer
+ * @hba: Pointer to adapter instance
+ * @tag: Task tag/index to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
@@ -6729,16 +6805,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
BUG();
}
- /*
- * Task abort to the device W-LUN is illegal. When this command
- * will fail, due to spec violation, scsi err handling next step
- * will be to send LU reset which, again, is a spec violation.
- * To avoid these unnecessary/illegal step we skip to the last error
- * handling stage: reset and restore.
- */
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
- return ufshcd_eh_host_reset_handler(cmd);
-
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
@@ -6759,10 +6825,10 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
- scsi_print_command(hba->lrb[tag].cmd);
+ scsi_print_command(cmd);
if (!hba->req_abort_count) {
- ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
- ufshcd_print_host_regs(hba);
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_trs(hba, 1 << tag, true);
@@ -6778,6 +6844,29 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto cleanup;
}
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal steps, first we clean up
+ * the lrb taken by this cmd and mark the lrb as in_use, then
+ * queue the eh_work and bail.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
+ spin_lock_irqsave(host->host_lock, flags);
+ if (lrbp->cmd) {
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ __set_bit(tag, &hba->outstanding_reqs);
+ lrbp->in_use = true;
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+ goto out;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip)
err = -EIO;
@@ -6845,7 +6934,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
- ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
+ ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
return err;
}
@@ -6891,6 +6980,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
scsi_report_bus_reset(hba->host, 0);
if (err) {
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
hba->saved_err |= saved_err;
hba->saved_uic_err |= saved_uic_err;
}
@@ -7092,7 +7182,6 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_rpmb;
struct scsi_device *sdev_boot;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
@@ -7105,14 +7194,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(sdev_rpmb)) {
- ret = PTR_ERR(sdev_rpmb);
+ if (IS_ERR(hba->sdev_rpmb)) {
+ ret = PTR_ERR(hba->sdev_rpmb);
goto remove_sdev_ufs_device;
}
- ufshcd_blk_pm_runtime_init(sdev_rpmb);
- scsi_device_put(sdev_rpmb);
+ ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
+ scsi_device_put(hba->sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7636,6 +7725,63 @@ out:
return ret;
}
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+
+static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
+{
+ struct scsi_device *sdp;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
+ sdp = hba->sdev_ufs_device;
+ else if (wlun == UFS_UPIU_RPMB_WLUN)
+ sdp = hba->sdev_rpmb;
+ else
+ BUG();
+ if (sdp) {
+ ret = scsi_device_get(sdp);
+ if (!ret && !scsi_device_online(sdp)) {
+ ret = -ENODEV;
+ scsi_device_put(sdp);
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ret)
+ goto out_err;
+
+ ret = ufshcd_send_request_sense(hba, sdp);
+ scsi_device_put(sdp);
+out_err:
+ if (ret)
+ dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
+ __func__, wlun, ret);
+ return ret;
+}
+
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->wlun_dev_clr_ua)
+ goto out;
+
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
+ if (!ret)
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+ if (!ret)
+ hba->wlun_dev_clr_ua = false;
+out:
+ if (ret)
+ dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
+ __func__, ret);
+ return ret;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -7739,8 +7885,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
+ down(&hba->eh_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
+ up(&hba->eh_sem);
if (ret)
goto out;
@@ -7755,6 +7903,8 @@ out:
pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
+ } else {
+ ufshcd_clear_ua_wluns(hba);
}
}
@@ -7988,8 +8138,7 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0;
}
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk)
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
{
int ret = 0;
struct ufs_clk_info *clki;
@@ -8007,7 +8156,12 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
- if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+ /*
+ * Don't disable clocks which are needed
+ * to keep the link active.
+ */
+ if (ufshcd_is_link_active(hba) &&
+ clki->keep_link_active)
continue;
clk_state_changed = on ^ clki->enabled;
@@ -8052,11 +8206,6 @@ out:
return ret;
}
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
-{
- return __ufshcd_setup_clocks(hba, on, false);
-}
-
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
@@ -8113,15 +8262,9 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_init(hba);
if (err)
- goto out;
-
- err = ufshcd_vops_setup_regulators(hba, true);
- if (err)
- ufshcd_vops_exit(hba);
-out:
- if (err)
dev_err(hba->dev, "%s: variant %s init failed err %d\n",
__func__, ufshcd_get_var_name(hba), err);
+out:
return err;
}
@@ -8130,8 +8273,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_regulators(hba, false);
-
ufshcd_vops_exit(hba);
}
@@ -8327,7 +8468,8 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
}
/*
* If autobkops is enabled, link can't be turned off because
- * turning off the link would also turn off the device.
+ * turning off the link would also turn off the device, except in the
+ * case of DeepSleep where the device is expected to remain powered.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
(!check_for_bkops || !hba->auto_bkops_enabled)) {
@@ -8337,6 +8479,9 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* put the link in low power mode is to send the DME end point
* to device and then send the DME reset command to local
* unipro. But putting the link in hibern8 is much faster.
+ *
+ * Note also that putting the link in Hibern8 is a requirement
+ * for entering DeepSleep.
*/
ret = ufshcd_uic_hibern8_enter(hba);
if (ret) {
@@ -8440,13 +8585,13 @@ out:
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, false);
}
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, true);
}
@@ -8469,6 +8614,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
+ int check_for_bkops;
enum ufs_pm_level pm_lvl;
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
@@ -8540,11 +8686,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op)) {
+ if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
- }
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
@@ -8554,7 +8698,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
flush_work(&hba->eeh_work);
- ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+
+ /*
+ * In the case of DeepSleep, the device is expected to remain powered
+ * with the link off, so do not check for bkops.
+ */
+ check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
+ ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
if (ret)
goto set_dev_active;
@@ -8575,11 +8725,7 @@ disable_clks:
*/
ufshcd_disable_irq(hba);
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
- else
- /* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ufshcd_setup_clocks(hba, false);
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
@@ -8595,11 +8741,25 @@ set_link_active:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
+ /*
+ * Device hardware reset is required to exit DeepSleep. Also, for
+ * DeepSleep, the link is off so host reset and restore will be done
+ * further below.
+ */
+ if (ufshcd_is_ufs_dev_deepsleep(hba)) {
+ ufshcd_vops_device_reset(hba);
+ WARN_ON(!ufshcd_is_link_off(hba));
+ }
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
else if (ufshcd_is_link_off(hba))
ufshcd_host_reset_and_restore(hba);
set_dev_active:
+ /* Can also get here needing to exit DeepSleep */
+ if (ufshcd_is_ufs_dev_deepsleep(hba)) {
+ ufshcd_vops_device_reset(hba);
+ ufshcd_host_reset_and_restore(hba);
+ }
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
@@ -8617,7 +8777,7 @@ out:
hba->pm_op_in_progress = 0;
if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
+ ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
return ret;
}
@@ -8661,6 +8821,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto disable_vreg;
+ /* For DeepSleep, the only supported option is to have the link off */
+ WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
+
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
@@ -8674,6 +8837,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/*
* A full initialization of the host and the device is
* required since the link was put to off during suspend.
+ * Note, in the case of DeepSleep, the device will exit
+ * DeepSleep due to device reset.
*/
ret = ufshcd_reset_and_restore(hba);
/*
@@ -8736,7 +8901,7 @@ disable_irq_and_vops_clks:
out:
hba->pm_op_in_progress = 0;
if (ret)
- ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
@@ -8753,6 +8918,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
+ down(&hba->eh_sem);
if (!hba || !hba->is_powered)
return 0;
@@ -8783,6 +8949,8 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = true;
+ else
+ up(&hba->eh_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -8799,8 +8967,10 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba)
+ if (!hba) {
+ up(&hba->eh_sem);
return -EINVAL;
+ }
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
@@ -8816,6 +8986,7 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
+ up(&hba->eh_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -8907,6 +9078,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
+ down(&hba->eh_sem);
if (!hba->is_powered)
goto out;
@@ -8919,6 +9091,8 @@ int ufshcd_shutdown(struct ufs_hba *hba)
out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+ hba->is_powered = false;
+ up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
@@ -9114,6 +9288,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ sema_init(&hba->eh_sem, 1);
+
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -9185,7 +9361,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
- ufshcd_print_host_regs(hba);
+ ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
goto free_tmf_queue;
}