diff options
author | Jonathan Kim <jonathan.kim@amd.com> | 2020-06-18 10:40:14 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2020-10-07 14:44:16 -0400 |
commit | 576e0ec26b68ff4cd58d1222a5988a4d29711e2a (patch) | |
tree | a5ab146e40e316a89feb2eeaafcd7dd3df1867d4 /drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | |
parent | 4b74ed75ae1303022c7a1da07e941302da31db46 (diff) | |
download | linux-576e0ec26b68ff4cd58d1222a5988a4d29711e2a.tar.bz2 |
drm/amdgpu: fix xgmi perfmon a-b-a problem
Mapping hw counters per event config will cause ABA problems so map per
event instead.
v2: Discontinue starting perf counters if add fails. Make it clear what's
happening with pmc_start.
Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
Reviewed-by: Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 69af462db34d..1b0ec715c8ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -64,6 +64,7 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); + int target_cntr = 0; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -73,17 +74,24 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - if (!(flags & PERF_EF_RELOAD)) - pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1); + if (!(flags & PERF_EF_RELOAD)) { + target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, + hwc->config, 0 /* unused */, + 1 /* add counter */); + if (target_cntr < 0) + break; + + hwc->idx = target_cntr; + } - pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0); + pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, + hwc->idx, 0); break; default: break; } perf_event_update_userpage(event); - } /* read perf counter */ @@ -101,8 +109,8 @@ static void amdgpu_perf_read(struct perf_event *event) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config, - &count); + pe->adev->df.funcs->pmc_get_count(pe->adev, + hwc->config, hwc->idx, &count); break; default: count = 0; @@ -126,7 +134,8 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0); + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, + 0); break; default: break; @@ -142,12 +151,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) hwc->state |= PERF_HES_UPTODATE; } -/* add perf counter */ +/* add perf counter */ static int amdgpu_perf_add(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - int retval; - + int retval = 0, target_cntr; struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); @@ -156,8 +164,14 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - retval = pe->adev->df.funcs->pmc_start(pe->adev, - hwc->config, 1); + target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, + hwc->config, 0 /* unused */, + 1 /* add counter */); + if (target_cntr < 0) + retval = target_cntr; + else + hwc->idx = target_cntr; + break; default: return 0; @@ -170,7 +184,6 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) amdgpu_perf_start(event, PERF_EF_RELOAD); return retval; - } /* delete perf counter */ @@ -185,7 +198,8 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1); + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, + 1); break; default: break; |