summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/pm/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c790
1 files changed, 194 insertions, 596 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5fa65f191a37..2627870a786e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -23,8 +23,6 @@
* Alex Deucher <alexdeucher@gmail.com>
*/
-#include <drm/drm_debugfs.h>
-
#include "amdgpu.h"
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
@@ -125,6 +123,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_pm_state_type pm;
int ret;
@@ -137,12 +136,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- if (adev->smu.ppt_funcs->get_current_power_state)
- pm = smu_get_current_power_state(&adev->smu);
- else
- pm = adev->pm.dpm.user_state;
- } else if (adev->powerplay.pp_funcs->get_current_power_state) {
+ if (pp_funcs->get_current_power_state) {
pm = amdgpu_dpm_get_current_power_state(adev);
} else {
pm = adev->pm.dpm.user_state;
@@ -281,9 +275,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
+ if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
@@ -300,6 +292,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+ (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
"unknown");
}
@@ -310,6 +303,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
@@ -335,6 +329,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
} else {
return -EINVAL;
}
@@ -345,9 +341,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- current_level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
+ if (pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (current_level == level) {
@@ -377,14 +371,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_force_performance_level(&adev->smu, level);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->force_performance_level) {
+ if (pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
mutex_unlock(&adev->pm.mutex);
@@ -415,6 +402,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
int i, buf_len, ret;
@@ -427,11 +415,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_get_power_num_states(&adev->smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
+ if (pp_funcs->get_pp_num_states) {
amdgpu_dpm_get_pp_num_states(adev, &data);
} else {
memset(&data, 0, sizeof(data));
@@ -457,8 +441,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
- struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
@@ -471,13 +455,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- pm = smu_get_current_power_state(smu);
- ret = smu_get_power_num_states(smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_current_power_state
- && adev->powerplay.pp_funcs->get_pp_num_states) {
+ if (pp_funcs->get_current_power_state
+ && pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
}
@@ -592,13 +571,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else if (adev->powerplay.pp_funcs->get_pp_table) {
+ if (adev->powerplay.pp_funcs->get_pp_table) {
size = amdgpu_dpm_get_pp_table(adev, &table);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -636,15 +609,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
- } else if (adev->powerplay.pp_funcs->set_pp_table)
- amdgpu_dpm_set_pp_table(adev, buf, count);
+ ret = amdgpu_dpm_set_pp_table(adev, buf, count);
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -842,53 +812,42 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_od_edit_dpm_table(&adev->smu, type,
- parameter, parameter_size);
-
+ if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+ ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+ parameter,
+ parameter_size);
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
- } else {
-
- if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
- ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
- parameter,
- parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ }
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
+ if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size);
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
+ }
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_READJUST_POWER_STATE,
- NULL);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return count;
+ } else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
}
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -913,18 +872,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
- } else if (adev->powerplay.pp_funcs->print_clock_levels) {
+ if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
} else {
size = snprintf(buf, PAGE_SIZE, "\n");
}
@@ -1012,9 +966,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ if (adev->powerplay.pp_funcs->get_ppfeature_status)
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
else
size = snprintf(buf, PAGE_SIZE, "\n");
@@ -1055,8 +1007,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
* NOTE: change to the dcefclk max dpm level is not supported now
*/
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
+static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
+ enum pp_clock_type type,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
@@ -1073,10 +1025,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ size = amdgpu_dpm_print_clock_levels(adev, type, buf);
else
size = snprintf(buf, PAGE_SIZE, "\n");
@@ -1121,8 +1071,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
return 0;
}
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
+static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
+ enum pp_clock_type type,
const char *buf,
size_t count)
{
@@ -1144,10 +1094,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ ret = amdgpu_dpm_force_clock_level(adev, type, mask);
+ else
+ ret = 0;
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1158,35 +1108,26 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return count;
}
-static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
+}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
+}
- return size;
+static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -1194,67 +1135,14 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- uint32_t mask = 0;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -1262,69 +1150,14 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1332,67 +1165,14 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
@@ -1400,65 +1180,14 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
@@ -1466,67 +1195,14 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1534,69 +1210,14 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
+ return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -1604,38 +1225,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
- int ret;
- uint32_t mask = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
+ return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
}
static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
@@ -1822,9 +1412,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- size = smu_get_power_profile_mode(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_power_profile_mode)
+ if (adev->powerplay.pp_funcs->get_power_profile_mode)
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
else
size = snprintf(buf, PAGE_SIZE, "\n");
@@ -1888,9 +1476,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
- else if (adev->powerplay.pp_funcs->set_power_profile_mode)
+ if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
pm_runtime_mark_last_busy(ddev->dev);
@@ -2147,9 +1733,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
- else if (adev->powerplay.pp_funcs->get_gpu_metrics)
+ if (adev->powerplay.pp_funcs->get_gpu_metrics)
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -2169,7 +1753,7 @@ out:
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
@@ -2513,22 +2097,18 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return -EINVAL;
}
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return sprintf(buf, "%i\n", pwm_mode);
+ return sprintf(buf, "%u\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -2553,18 +2133,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, value);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return -EINVAL;
}
+ amdgpu_dpm_set_fan_control_mode(adev, value);
+
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2603,11 +2179,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -2624,9 +2196,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_percent(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
+ if (adev->powerplay.pp_funcs->set_fan_speed_percent)
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
else
err = -EINVAL;
@@ -2657,9 +2227,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_percent(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
+ if (adev->powerplay.pp_funcs->get_fan_speed_percent)
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
else
err = -EINVAL;
@@ -2692,9 +2260,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
else
err = -EINVAL;
@@ -2785,9 +2351,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
else
err = -EINVAL;
@@ -2819,10 +2383,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -2837,9 +2398,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_rpm(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
+ if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
else
err = -EINVAL;
@@ -2870,18 +2429,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return -EINVAL;
}
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2918,16 +2473,12 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, pwm_mode);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return -EINVAL;
}
+ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -3059,8 +2610,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index;
uint32_t limit = limit_type << 24;
+ uint32_t max_limit = 0;
ssize_t size;
int r;
@@ -3076,9 +2629,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (pp_funcs && pp_funcs->get_power_limit) {
+ pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+ &limit, &max_limit, true);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000);
} else {
size = snprintf(buf, PAGE_SIZE, "\n");
}
@@ -3094,6 +2648,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index;
uint32_t limit = limit_type << 24;
ssize_t size;
@@ -3111,8 +2666,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
+ } else if (pp_funcs && pp_funcs->get_power_limit) {
+ pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+ &limit, NULL, false);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
size = snprintf(buf, PAGE_SIZE, "\n");
@@ -3124,6 +2680,42 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
return size;
}
+static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int limit_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit = limit_type << 24;
+ ssize_t size;
+ int r;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (pp_funcs && pp_funcs->get_power_limit) {
+ pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+ &limit, NULL, true);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else {
+ size = snprintf(buf, PAGE_SIZE, "\n");
+ }
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return size;
+}
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3140,6 +2732,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
@@ -3163,10 +2756,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
}
- if (is_support_sw_smu(adev))
- err = smu_set_power_limit(&adev->smu, value);
- else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
- err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
+ if (pp_funcs && pp_funcs->set_power_limit)
+ err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
else
err = -EINVAL;
@@ -3315,9 +2906,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - pwm1_max: pulse width modulation fan control maximum level (255)
*
- * - fan1_min: an minimum value Unit: revolution/min (RPM)
+ * - fan1_min: a minimum value Unit: revolution/min (RPM)
*
- * - fan1_max: an maxmum value Unit: revolution/max (RPM)
+ * - fan1_max: a maximum value Unit: revolution/max (RPM)
*
* - fan1_input: fan speed in RPM
*
@@ -3367,11 +2958,13 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
@@ -3411,11 +3004,13 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_default.dev_attr.attr,
&sensor_dev_attr_power1_label.dev_attr.attr,
&sensor_dev_attr_power2_average.dev_attr.attr,
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
&sensor_dev_attr_power2_cap.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_default.dev_attr.attr,
&sensor_dev_attr_power2_label.dev_attr.attr,
&sensor_dev_attr_freq1_input.dev_attr.attr,
&sensor_dev_attr_freq1_label.dev_attr.attr,
@@ -3514,7 +3109,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
return 0;
if (((adev->family == AMDGPU_FAMILY_SI) ||
@@ -3580,6 +3176,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
attr == &sensor_dev_attr_power1_label.dev_attr.attr))
return 0;
@@ -3784,11 +3381,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
(flags & clocks[i].flag) ? "On" : "Off");
}
-static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
u32 flags = 0;
int r;
@@ -3836,16 +3432,18 @@ out:
return r;
}
-static const struct drm_info_list amdgpu_pm_info_list[] = {
- {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
+
#endif
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
+ &amdgpu_debugfs_pm_info_fops);
+
#endif
}