diff options
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 515 |
1 files changed, 426 insertions, 89 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 698e8d20b4ba..5db18f444ea9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -48,6 +48,8 @@ #include "unipro.h" #include "ufs-sysfs.h" #include "ufs_bsg.h" +#include <asm/unaligned.h> +#include <linux/blkdev.h> #define CREATE_TRACE_POINTS #include <trace/events/ufs.h> @@ -92,6 +94,9 @@ /* default delay of autosuspend: 2000 ms */ #define RPM_AUTOSUSPEND_DELAY_MS 2000 +/* Default delay of RPM device flush delayed work */ +#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000 + /* Default value of wait time before gating device ref clock */ #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ @@ -251,6 +256,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); +static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba); +static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba); +static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable); +static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set); +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); + static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) { return tag >= 0 && tag < hba->nutrs; @@ -272,6 +283,25 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } +static inline void ufshcd_wb_config(struct ufs_hba *hba) +{ + int ret; + + if (!ufshcd_is_wb_allowed(hba)) + return; + + ret = ufshcd_wb_ctrl(hba, true); + if (ret) + dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret); + else + dev_info(hba->dev, "%s: Write Booster Configured\n", __func__); + ret = ufshcd_wb_toggle_flush_during_h8(hba, true); + if (ret) + dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n", + __func__, ret); + ufshcd_wb_toggle_flush(hba, true); +} + static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) { if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) @@ -535,21 +565,21 @@ void ufshcd_delay_us(unsigned long us, unsigned long tolerance) } EXPORT_SYMBOL_GPL(ufshcd_delay_us); -/* +/** * ufshcd_wait_for_register - wait for register value to change - * @hba - per-adapter interface - * @reg - mmio register offset - * @mask - mask to apply to read register value - * @val - wait condition - * @interval_us - polling interval in microsecs - * @timeout_ms - timeout in millisecs - * @can_sleep - perform sleep or just spin + * @hba: per-adapter interface + * @reg: mmio register offset + * @mask: mask to apply to the read register value + * @val: value to wait for + * @interval_us: polling interval in microseconds + * @timeout_ms: timeout in milliseconds * - * Returns -ETIMEDOUT on error, zero on success + * Return: + * -ETIMEDOUT on error, zero on success. */ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, u32 val, unsigned long interval_us, - unsigned long timeout_ms, bool can_sleep) + unsigned long timeout_ms) { int err = 0; unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); @@ -558,10 +588,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, val = val & mask; while ((ufshcd_readl(hba, reg) & mask) != val) { - if (can_sleep) - usleep_range(interval_us, interval_us + 50); - else - udelay(interval_us); + usleep_range(interval_us, interval_us + 50); if (time_after(jiffies, timeout)) { if ((ufshcd_readl(hba, reg) & mask) != val) err = -ETIMEDOUT; @@ -1150,10 +1177,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) /* scale up the gear after scaling up clocks */ if (scale_up) { ret = ufshcd_scale_gear(hba, true); - if (ret) + if (ret) { ufshcd_scale_clks(hba, false); + goto out_unprepare; + } } + /* Enable Write Booster if we have scaled up else disable it */ + up_write(&hba->clk_scaling_lock); + ufshcd_wb_ctrl(hba, scale_up); + down_write(&hba->clk_scaling_lock); + out_unprepare: ufshcd_clock_scaling_unprepare(hba); out: @@ -1319,23 +1353,6 @@ start_window: return 0; } -static struct devfreq_dev_profile ufs_devfreq_profile = { - .polling_ms = 100, - .target = ufshcd_devfreq_target, - .get_dev_status = ufshcd_devfreq_get_dev_status, -}; - -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) -static struct devfreq_simple_ondemand_data ufs_ondemand_data = { - .upthreshold = 70, - .downdifferential = 5, -}; - -static void *gov_data = &ufs_ondemand_data; -#else -static void *gov_data; /* NULL */ -#endif - static int ufshcd_devfreq_init(struct ufs_hba *hba) { struct list_head *clk_list = &hba->clk_list_head; @@ -1351,12 +1368,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba) dev_pm_opp_add(hba->dev, clki->min_freq, 0); dev_pm_opp_add(hba->dev, clki->max_freq, 0); - ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile, - gov_data); + ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, + &hba->vps->ondemand_data); devfreq = devfreq_add_device(hba->dev, - &ufs_devfreq_profile, + &hba->vps->devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, - gov_data); + &hba->vps->ondemand_data); if (IS_ERR(devfreq)) { ret = PTR_ERR(devfreq); dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); @@ -2560,7 +2577,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) */ err = ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000, true); + mask, ~mask, 1000, 1000); return err; } @@ -2747,13 +2764,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, } static int ufshcd_query_flag_retry(struct ufs_hba *hba, - enum query_opcode opcode, enum flag_idn idn, bool *flag_res) + enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) { int ret; int retries; for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { - ret = ufshcd_query_flag(hba, opcode, idn, flag_res); + ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); if (ret) dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", @@ -2774,16 +2791,17 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @hba: per-adapter instance * @opcode: flag query to perform * @idn: flag idn to access + * @index: flag index to access * @flag_res: the flag value after the query request completes * * Returns 0 for success, non-zero in case of failure */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, - enum flag_idn idn, bool *flag_res) + enum flag_idn idn, u8 index, bool *flag_res) { struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; - int err, index = 0, selector = 0; + int err, selector = 0; int timeout = QUERY_REQ_TIMEOUT; BUG_ON(!hba); @@ -3223,7 +3241,7 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba, struct uc_string_id { u8 len; u8 type; - wchar_t uc[0]; + wchar_t uc[]; } __packed; /* replace non-printable or non-ASCII characters with spaces */ @@ -4137,10 +4155,10 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { int i; int err; - bool flag_res = 1; + bool flag_res = true; err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, NULL); + QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL); if (err) { dev_err(hba->dev, "%s setting fDeviceInit flag failed with error %d\n", @@ -4151,7 +4169,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) /* poll for max. 1000 iterations for fDeviceInit flag to clear */ for (i = 0; i < 1000 && !err && flag_res; i++) err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); if (err) dev_err(hba->dev, @@ -4229,16 +4247,23 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); /** * ufshcd_hba_stop - Send controller to reset state * @hba: per adapter instance - * @can_sleep: perform sleep or just spin */ -static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) +static inline void ufshcd_hba_stop(struct ufs_hba *hba) { + unsigned long flags; int err; + /* + * Obtain the host lock to prevent that the controller is disabled + * while the UFS interrupt handler is active on another CPU. + */ + spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, CONTROLLER_DISABLE, - 10, 1, can_sleep); + 10, 1); if (err) dev_err(hba->dev, "%s: Controller disable failed\n", __func__); } @@ -4259,7 +4284,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba) if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ - ufshcd_hba_stop(hba, true); + ufshcd_hba_stop(hba); /* UniPro link is disabled at this point */ ufshcd_set_link_off(hba); @@ -4279,7 +4304,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba) * instruction might be read back. * This delay can be changed based on the controller. */ - ufshcd_delay_us(hba->hba_enable_delay_us, 100); + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); /* wait for the host controller to complete initialization */ retry = 50; @@ -4966,7 +4991,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) goto out; err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_BKOPS_EN, NULL); + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); if (err) { dev_err(hba->dev, "%s: failed to enable bkops %d\n", __func__, err); @@ -5016,7 +5041,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) } err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, - QUERY_FLAG_IDN_BKOPS_EN, NULL); + QUERY_FLAG_IDN_BKOPS_EN, 0, NULL); if (err) { dev_err(hba->dev, "%s: failed to disable bkops %d\n", __func__, err); @@ -5161,6 +5186,190 @@ out: __func__, err); } +static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) +{ + int ret; + u8 index; + enum query_opcode opcode; + + if (!ufshcd_is_wb_allowed(hba)) + return 0; + + if (!(enable ^ hba->wb_enabled)) + return 0; + if (enable) + opcode = UPIU_QUERY_OPCODE_SET_FLAG; + else + opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_flag_retry(hba, opcode, + QUERY_FLAG_IDN_WB_EN, index, NULL); + if (ret) { + dev_err(hba->dev, "%s write booster %s failed %d\n", + __func__, enable ? "enable" : "disable", ret); + return ret; + } + + hba->wb_enabled = enable; + dev_dbg(hba->dev, "%s write booster %s %d\n", + __func__, enable ? "enable" : "disable", ret); + + return ret; +} + +static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) +{ + int val; + u8 index; + + if (set) + val = UPIU_QUERY_OPCODE_SET_FLAG; + else + val = UPIU_QUERY_OPCODE_CLEAR_FLAG; + + index = ufshcd_wb_get_query_index(hba); + return ufshcd_query_flag_retry(hba, val, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8, + index, NULL); +} + +static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) +{ + if (enable) + ufshcd_wb_buf_flush_enable(hba); + else + ufshcd_wb_buf_flush_disable(hba); + +} + +static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba) +{ + int ret; + u8 index; + + if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled) + return 0; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, + index, NULL); + if (ret) + dev_err(hba->dev, "%s WB - buf flush enable failed %d\n", + __func__, ret); + else + hba->wb_buf_flush_enabled = true; + + dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret); + return ret; +} + +static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba) +{ + int ret; + u8 index; + + if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled) + return 0; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, + index, NULL); + if (ret) { + dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n", + __func__, ret); + } else { + hba->wb_buf_flush_enabled = false; + dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret); + } + + return ret; +} + +static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, + u32 avail_buf) +{ + u32 cur_buf; + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE, + index, 0, &cur_buf); + if (ret) { + dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!cur_buf) { + dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", + cur_buf); + return false; + } + /* Let it continue to flush when available buffer exceeds threshold */ + if (avail_buf < hba->vps->wb_flush_threshold) + return true; + + return false; +} + +static bool ufshcd_wb_need_flush(struct ufs_hba *hba) +{ + int ret; + u32 avail_buf; + u8 index; + + if (!ufshcd_is_wb_allowed(hba)) + return false; + /* + * The ufs device needs the vcc to be ON to flush. + * With user-space reduction enabled, it's enough to enable flush + * by checking only the available buffer. The threshold + * defined here is > 90% full. + * With user-space preserved enabled, the current-buffer + * should be checked too because the wb buffer size can reduce + * when disk tends to be full. This info is provided by current + * buffer (dCurrentWriteBoosterBufferSize). There's no point in + * keeping vcc on when current buffer is empty. + */ + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE, + index, 0, &avail_buf); + if (ret) { + dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", + __func__, ret); + return false; + } + + if (!hba->dev_info.b_presrv_uspc_en) { + if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10)) + return true; + return false; + } + + return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); +} + +static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(to_delayed_work(work), + struct ufs_hba, + rpm_dev_flush_recheck_work); + /* + * To prevent unnecessary VCC power drain after device finishes + * WriteBooster buffer flush or Auto BKOPs, force runtime resume + * after a certain delay to recheck the threshold by next runtime + * suspend. + */ + pm_runtime_get_sync(hba->dev); + pm_runtime_put_sync(hba->dev); +} + /** * ufshcd_exception_event_handler - handle exceptions raised by device * @work: pointer to work data @@ -5723,7 +5932,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, - mask, 0, 1000, 1000, true); + mask, 0, 1000, 1000); out: return err; } @@ -6299,8 +6508,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) * Stop the host controller and complete the requests * cleared by h/w */ + ufshcd_hba_stop(hba); + spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_hba_stop(hba, false); hba->silence_err_logs = true; ufshcd_complete_requests(hba); hba->silence_err_logs = false; @@ -6603,6 +6813,93 @@ out: return ret; } +static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + u8 lun; + u32 d_lu_wb_buf_alloc; + + if (!ufshcd_is_wb_allowed(hba)) + return; + + if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) + goto wb_disabled; + + hba->dev_info.d_ext_ufs_feature_sup = + get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + + if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP)) + goto wb_disabled; + + /* + * WB may be supported but not configured while provisioning. + * The spec says, in dedicated wb buffer mode, + * a max of 1 lun would have wb buffer configured. + * Now only shared buffer mode is supported. + */ + hba->dev_info.b_wb_buffer_type = + desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + + hba->dev_info.b_presrv_uspc_en = + desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; + + if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) { + hba->dev_info.d_wb_alloc_units = + get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS); + if (!hba->dev_info.d_wb_alloc_units) + goto wb_disabled; + } else { + for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { + d_lu_wb_buf_alloc = 0; + ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS, + (u8 *)&d_lu_wb_buf_alloc, + sizeof(d_lu_wb_buf_alloc)); + if (d_lu_wb_buf_alloc) { + hba->dev_info.wb_dedicated_lu = lun; + break; + } + } + + if (!d_lu_wb_buf_alloc) + goto wb_disabled; + } + return; + +wb_disabled: + hba->caps &= ~UFSHCD_CAP_WB_EN; +} + +void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups) +{ + struct ufs_dev_fix *f; + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (!fixups) + return; + + for (f = fixups; f->quirk; f++) { + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} +EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks); + +static void ufs_fixup_device_setup(struct ufs_hba *hba) +{ + /* fix by general quirk table */ + ufshcd_fixup_dev_quirks(hba, ufs_fixups); + + /* allow vendors to fix quirks */ + ufshcd_vops_fixup_dev_quirks(hba); +} + static int ufs_get_device_desc(struct ufs_hba *hba) { int err; @@ -6639,6 +6936,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + err = ufshcd_read_string_desc(hba, model_index, &dev_info->model, SD_ASCII_STD); if (err < 0) { @@ -6647,6 +6945,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba) goto out; } + ufs_fixup_device_setup(hba); + + /* + * Probe WB only for UFS-3.1 devices or UFS devices with quirk + * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled + */ + if (dev_info->wspecversion >= 0x310 || + dev_info->wspecversion == 0x220 || + (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)) + ufshcd_wb_probe(hba, desc_buf); + /* * ufshcd_read_string_desc returns size of the string * reset the error value @@ -6666,21 +6975,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba) dev_info->model = NULL; } -static void ufs_fixup_device_setup(struct ufs_hba *hba) -{ - struct ufs_dev_fix *f; - struct ufs_dev_info *dev_info = &hba->dev_info; - - for (f = ufs_fixups; f->quirk; f++) { - if ((f->wmanufacturerid == dev_info->wmanufacturerid || - f->wmanufacturerid == UFS_ANY_VENDOR) && - ((dev_info->model && - STR_PRFX_EQUAL(f->model, dev_info->model)) || - !strcmp(f->model, UFS_ANY_MODEL))) - hba->dev_quirks |= f->quirk; - } -} - /** * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro * @hba: per-adapter instance @@ -6996,9 +7290,6 @@ static int ufshcd_device_params_init(struct ufs_hba *hba) bool flag; int ret; - /* Clear any previous UFS device information */ - memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - /* Init check for device descriptor sizes */ ufshcd_init_desc_sizes(hba); @@ -7017,10 +7308,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba) ufshcd_get_ref_clk_gating_wait(hba); - ufs_fixup_device_setup(hba); - if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag)) hba->dev_info.f_power_on_wp_en = flag; /* Probe maximum power mode co-supported by both UFS host and device */ @@ -7149,6 +7438,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) /* set the state as operational after switching to desired gear */ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + ufshcd_wb_config(hba); /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); @@ -7195,6 +7485,16 @@ static const struct attribute_group *ufshcd_driver_groups[] = { NULL, }; +static struct ufs_hba_variant_params ufs_hba_vps = { + .hba_enable_delay_us = 1000, + .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40), + .devfreq_profile.polling_ms = 100, + .devfreq_profile.target = ufshcd_devfreq_target, + .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status, + .ondemand_data.upthreshold = 70, + .ondemand_data.downdifferential = 5, +}; + static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, @@ -7774,7 +8074,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, * Change controller state to "reset state" which * should also put the link in off/reset state */ - ufshcd_hba_stop(hba, true); + ufshcd_hba_stop(hba); /* * TODO: Check if we need any delay to make sure that * controller is reset @@ -7809,6 +8109,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) * * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway * in low power state which would save some power. + * + * If Write Booster is enabled and the device needs to flush the WB + * buffer OR if bkops status is urgent for WB, keep Vcc on. */ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && !hba->dev_info.is_lu_power_on_wp) { @@ -7938,16 +8241,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* make sure that auto bkops is disabled */ ufshcd_disable_auto_bkops(hba); } - } + /* + * If device needs to do BKOP or WB buffer flush during + * Hibern8, keep device power mode as "active power mode" + * and VCC supply. + */ + hba->dev_info.b_rpm_dev_flush_capable = + hba->auto_bkops_enabled || + (((req_link_state == UIC_LINK_HIBERN8_STATE) || + ((req_link_state == UIC_LINK_ACTIVE_STATE) && + ufshcd_is_auto_hibern8_enabled(hba))) && + ufshcd_wb_need_flush(hba)); + } + + if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { + if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || + !ufshcd_is_runtime_pm(pm_op)) { + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + } - if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && - ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || - !ufshcd_is_runtime_pm(pm_op))) { - /* ensure that bkops is disabled */ - ufshcd_disable_auto_bkops(hba); - ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); - if (ret) - goto enable_gating; + if (!hba->dev_info.b_rpm_dev_flush_capable) { + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto enable_gating; + } } flush_work(&hba->eeh_work); @@ -8000,9 +8318,16 @@ enable_gating: if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); hba->clk_gating.is_suspended = false; + hba->dev_info.b_rpm_dev_flush_capable = false; ufshcd_release(hba); out: + if (hba->dev_info.b_rpm_dev_flush_capable) { + schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, + msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); + } + hba->pm_op_in_progress = 0; + if (ret) ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret); return ret; @@ -8055,9 +8380,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) else goto vendor_suspend; } else if (ufshcd_is_link_off(hba)) { - ret = ufshcd_host_reset_and_restore(hba); /* - * ufshcd_host_reset_and_restore() should have already + * A full initialization of the host and the device is + * required since the link was put to off during suspend. + */ + ret = ufshcd_reset_and_restore(hba); + /* + * ufshcd_reset_and_restore() should have already * set the link state as active */ if (ret || !ufshcd_is_link_active(hba)) @@ -8087,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + if (hba->dev_info.b_rpm_dev_flush_capable) { + hba->dev_info.b_rpm_dev_flush_capable = false; + cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); + } + /* Schedule clock gating in case of no access to UFS device yet */ ufshcd_release(hba); @@ -8313,7 +8647,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); - ufshcd_hba_stop(hba, true); + ufshcd_hba_stop(hba); ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); @@ -8422,7 +8756,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; - hba->hba_enable_delay_us = 1000; + hba->vps = &ufs_hba_vps; err = ufshcd_hba_init(hba); if (err) @@ -8560,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); + INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, + ufshcd_rpm_dev_flush_recheck_work); + /* Set the default auto-hiberate idle timer value to 150 ms */ if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | |