diff options
author | Dave Airlie <airlied@redhat.com> | 2018-01-09 10:09:13 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-01-09 10:09:13 +1000 |
commit | bd3c0094a143300b74f3cc8c9cf2b21ed686047f (patch) | |
tree | cb97fbd1f0a7eed1c3bb2d3e8b9358ee90bf0780 /drivers/gpu/drm/amd | |
parent | b0caa1333b6d2d928a00304e9fb6674526c37b79 (diff) | |
parent | 104bd2ca1124dfd9aa904d5f5a96253ef2b580f6 (diff) | |
download | linux-bd3c0094a143300b74f3cc8c9cf2b21ed686047f.tar.bz2 |
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last few updates for 4.16:
- Misc fixes for amdgpu
- Enable swapout for reserved BOs during allocation for ttm
- Misc cleanups for ttm
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (24 commits)
drm/amdgpu: Correct the IB size of bo update mapping.
drm/ttm: enable swapout for reserved BOs during allocation
drm/ttm: add new function to check if bo is allowable to evict or swapout
drm/ttm: use an operation ctx for ttm_tt_bind
drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2)
drm/ttm: use an operation ctx for ttm_mem_global_alloc_page
drm/ttm: use an operation ctx for ttm_mem_global_alloc
drm/ttm: call ttm_bo_swapout directly when ttm shrink
drm/vmwgfx: remove the default io_mem_pfn set
drm/virtio: remove the default io_mem_pfn set
drm/radeon: remove the default io_mem_pfn set
drm/qxl: remove the default io_mem_pfn set
drm/nouveau: remove the default io_mem_pfn set
drm/mgag200: remove the default io_mem_pfn set
drm/cirrus: remove the default io_mem_pfn set
drm/bochs: remove the default io_mem_pfn set
drm/ast: remove the default io_mem_pfn set
drm/ttm: add ttm_bo_io_mem_pfn to check io_mem_pfn
drm/amdgpu: fix VM faults with per VM BOs
drm/ttm: drop the spin in delayed delete if the trylock doesn't work
...
Diffstat (limited to 'drivers/gpu/drm/amd')
44 files changed, 810 insertions, 668 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d8da12c114b1..d6e5b7273853 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -52,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o + amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ + amdgpu_ids.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e40c5df55f95..d5a2eefd6c3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -351,7 +351,7 @@ struct amdgpu_gart_funcs { /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); - uint32_t (*get_invalidate_req)(unsigned int vm_id); + uint32_t (*get_invalidate_req)(unsigned int vmid); }; /* provided by the ih block */ @@ -1125,7 +1125,7 @@ struct amdgpu_job { void *owner; uint64_t fence_ctx; /* the fence_context this job uses */ bool vm_needs_flush; - unsigned vm_id; + unsigned vmid; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; @@ -1850,7 +1850,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) +#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 3d60e1fd9b68..a9e6aea0e5f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -176,8 +176,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_local_mem_info = get_local_mem_info, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_vm_alloc_pasid, - .free_pasid = amdgpu_vm_free_pasid, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 66b513e96437..b127259d7d85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -135,8 +135,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_local_mem_info = get_local_mem_info, .get_gpu_clock_counter = get_gpu_clock_counter, .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, - .alloc_pasid = amdgpu_vm_alloc_pasid, - .free_pasid = amdgpu_vm_free_pasid, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_pipeline = kgd_init_pipeline, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 13607e28c1c9..4466f3535e2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -801,6 +801,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, else strcpy(fw_name, "amdgpu/vega10_smc.bin"); break; + case CHIP_CARRIZO: + case CHIP_STONEY: case CHIP_RAVEN: adev->pm.fw_version = info->version; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0cf86eb357d6..a162d87ca0c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } - if (vm && !job->vm_id) { + if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; - amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, + amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0, need_ctx_switch); need_ctx_switch = false; } @@ -229,9 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, r = amdgpu_fence_emit(ring, f); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); - if (job && job->vm_id) - amdgpu_vm_reset_id(adev, ring->funcs->vmhub, - job->vm_id); + if (job && job->vmid) + amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c new file mode 100644 index 000000000000..16884a0b677b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -0,0 +1,459 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_ids.h" + +#include <linux/idr.h> +#include <linux/dma-fence-array.h> +#include <drm/drmP.h> + +#include "amdgpu.h" +#include "amdgpu_trace.h" + +/* + * PASID manager + * + * PASIDs are global address space identifiers that can be shared + * between the GPU, an IOMMU and the driver. VMs on different devices + * may use the same PASID if they share the same address + * space. Therefore PASIDs are allocated using a global IDA. VMs are + * looked up from the PASID per amdgpu_device. + */ +static DEFINE_IDA(amdgpu_pasid_ida); + +/** + * amdgpu_pasid_alloc - Allocate a PASID + * @bits: Maximum width of the PASID in bits, must be at least 1 + * + * Allocates a PASID of the given width while keeping smaller PASIDs + * available if possible. + * + * Returns a positive integer on success. Returns %-EINVAL if bits==0. + * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on + * memory allocation failure. + */ +int amdgpu_pasid_alloc(unsigned int bits) +{ + int pasid = -EINVAL; + + for (bits = min(bits, 31U); bits > 0; bits--) { + pasid = ida_simple_get(&amdgpu_pasid_ida, + 1U << (bits - 1), 1U << bits, + GFP_KERNEL); + if (pasid != -ENOSPC) + break; + } + + return pasid; +} + +/** + * amdgpu_pasid_free - Free a PASID + * @pasid: PASID to free + */ +void amdgpu_pasid_free(unsigned int pasid) +{ + ida_simple_remove(&amdgpu_pasid_ida, pasid); +} + +/* + * VMID manager + * + * VMIDs are a per VMHUB identifier for page tables handling. + */ + +/** + * amdgpu_vmid_had_gpu_reset - check if reset occured since last use + * + * @adev: amdgpu_device pointer + * @id: VMID structure + * + * Check if GPU reset occured since last use of the VMID. + */ +bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vmid *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter); +} + +/* idr_mgr->lock must be held */ +static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + uint64_t fence_context = adev->fence_context + ring->idx; + struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct dma_fence *updates = sync->last_vm_update; + int r = 0; + struct dma_fence *flushed, *tmp; + bool needs_flush = vm->use_cpu_for_update; + + flushed = id->flushed_updates; + if ((amdgpu_vmid_had_gpu_reset(adev, id)) || + (atomic64_read(&id->owner) != vm->entity.fence_context) || + (job->vm_pd_addr != id->pd_gpu_addr) || + (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) || + (!id->last_flush || (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush)))) { + needs_flush = true; + /* to prevent one context starved by another context */ + id->pd_gpu_addr = 0; + tmp = amdgpu_sync_peek_fence(&id->active, ring); + if (tmp) { + r = amdgpu_sync_fence(adev, sync, tmp, false); + return r; + } + } + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto out; + + if (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) { + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + } + id->pd_gpu_addr = job->vm_pd_addr; + atomic64_set(&id->owner, vm->entity.fence_context); + job->vm_needs_flush = needs_flush; + if (needs_flush) { + dma_fence_put(id->last_flush); + id->last_flush = NULL; + } + job->vmid = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); +out: + return r; +} + +/** + * amdgpu_vm_grab_id - allocate the next free VMID + * + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse + * + * Allocate an id for the vm, adding fences to the sync obj as necessary. + */ +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync, struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + uint64_t fence_context = adev->fence_context + ring->idx; + struct dma_fence *updates = sync->last_vm_update; + struct amdgpu_vmid *id, *idle; + struct dma_fence **fences; + unsigned i; + int r = 0; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) { + r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); + mutex_unlock(&id_mgr->lock); + return r; + } + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); + if (!fences) { + mutex_unlock(&id_mgr->lock); + return -ENOMEM; + } + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &id_mgr->ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &id_mgr->ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct dma_fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + dma_fence_get(fences[j]); + + array = dma_fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + dma_fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } + + + r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); + dma_fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&id_mgr->lock); + return 0; + + } + kfree(fences); + + job->vm_needs_flush = vm->use_cpu_for_update; + /* Check if we can use a VMID already assigned to this VM */ + list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { + struct dma_fence *flushed; + bool needs_flush = vm->use_cpu_for_update; + + /* Check all the prerequisites to using this VMID */ + if (amdgpu_vmid_had_gpu_reset(adev, id)) + continue; + + if (atomic64_read(&id->owner) != vm->entity.fence_context) + continue; + + if (job->vm_pd_addr != id->pd_gpu_addr) + continue; + + if (!id->last_flush || + (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush))) + needs_flush = true; + + flushed = id->flushed_updates; + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) + needs_flush = true; + + /* Concurrent flushes are only possible starting with Vega10 */ + if (adev->asic_type < CHIP_VEGA10 && needs_flush) + continue; + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto error; + + if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + } + + if (needs_flush) + goto needs_flush; + else + goto no_flush_needed; + + }; + + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; + + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto error; + + id->pd_gpu_addr = job->vm_pd_addr; + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + atomic64_set(&id->owner, vm->entity.fence_context); + +needs_flush: + job->vm_needs_flush = true; + dma_fence_put(id->last_flush); + id->last_flush = NULL; + +no_flush_needed: + list_move_tail(&id->list, &id_mgr->ids_lru); + + job->vmid = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); + +error: + mutex_unlock(&id_mgr->lock); + return r; +} + +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vmid_mgr *id_mgr; + struct amdgpu_vmid *idle; + int r = 0; + + id_mgr = &adev->vm_manager.id_mgr[vmhub]; + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) + goto unlock; + if (atomic_inc_return(&id_mgr->reserved_vmid_num) > + AMDGPU_VM_MAX_RESERVED_VMID) { + DRM_ERROR("Over limitation of reserved vmid\n"); + atomic_dec(&id_mgr->reserved_vmid_num); + r = -EINVAL; + goto unlock; + } + /* Select the first entry VMID */ + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); + list_del_init(&idle->list); + vm->reserved_vmid[vmhub] = idle; + mutex_unlock(&id_mgr->lock); + + return 0; +unlock: + mutex_unlock(&id_mgr->lock); + return r; +} + +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) { + list_add(&vm->reserved_vmid[vmhub]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[vmhub] = NULL; + atomic_dec(&id_mgr->reserved_vmid_num); + } + mutex_unlock(&id_mgr->lock); +} + +/** + * amdgpu_vmid_reset - reset VMID to zero + * + * @adev: amdgpu device structure + * @vmid: vmid number to use + * + * Reset saved GDW, GWS and OA to force switch on next flush. + */ +void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, + unsigned vmid) +{ + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id = &id_mgr->ids[vmid]; + + atomic64_set(&id->owner, 0); + id->gds_base = 0; + id->gds_size = 0; + id->gws_base = 0; + id->gws_size = 0; + id->oa_base = 0; + id->oa_size = 0; +} + +/** + * amdgpu_vmid_reset_all - reset VMID to zero + * + * @adev: amdgpu device structure + * + * Reset VMID to force flush on next use + */ +void amdgpu_vmid_reset_all(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + for (j = 1; j < id_mgr->num_ids; ++j) + amdgpu_vmid_reset(adev, i, j); + } +} + +/** + * amdgpu_vmid_mgr_init - init the VMID manager + * + * @adev: amdgpu_device pointer + * + * Initialize the VM manager structures + */ +void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + mutex_init(&id_mgr->lock); + INIT_LIST_HEAD(&id_mgr->ids_lru); + atomic_set(&id_mgr->reserved_vmid_num, 0); + + /* skip over VMID 0, since it is the system VM */ + for (j = 1; j < id_mgr->num_ids; ++j) { + amdgpu_vmid_reset(adev, i, j); + amdgpu_sync_create(&id_mgr->ids[i].active); + list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); + } + } + + adev->vm_manager.fence_context = + dma_fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; +} + +/** + * amdgpu_vmid_mgr_fini - cleanup VM manager + * + * @adev: amdgpu_device pointer + * + * Cleanup the VM manager and free resources. + */ +void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vmid_mgr *id_mgr = + &adev->vm_manager.id_mgr[i]; + + mutex_destroy(&id_mgr->lock); + for (j = 0; j < AMDGPU_NUM_VMID; ++j) { + struct amdgpu_vmid *id = &id_mgr->ids[j]; + + amdgpu_sync_free(&id->active); + dma_fence_put(id->flushed_updates); + dma_fence_put(id->last_flush); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h new file mode 100644 index 000000000000..ad931fa570b3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -0,0 +1,91 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_IDS_H__ +#define __AMDGPU_IDS_H__ + +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/dma-fence.h> + +#include "amdgpu_sync.h" + +/* maximum number of VMIDs */ +#define AMDGPU_NUM_VMID 16 + +struct amdgpu_device; +struct amdgpu_vm; +struct amdgpu_ring; +struct amdgpu_sync; +struct amdgpu_job; + +struct amdgpu_vmid { + struct list_head list; + struct amdgpu_sync active; + struct dma_fence *last_flush; + atomic64_t owner; + + uint64_t pd_gpu_addr; + /* last flushed PD/PT update */ + struct dma_fence *flushed_updates; + + uint32_t current_gpu_reset_count; + + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_base; + uint32_t oa_size; +}; + +struct amdgpu_vmid_mgr { + struct mutex lock; + unsigned num_ids; + struct list_head ids_lru; + struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; + atomic_t reserved_vmid_num; +}; + +int amdgpu_pasid_alloc(unsigned int bits); +void amdgpu_pasid_free(unsigned int pasid); + +bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vmid *id); +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub); +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub); +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync, struct dma_fence *fence, + struct amdgpu_job *job); +void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, + unsigned vmid); +void amdgpu_vmid_reset_all(struct amdgpu_device *adev); + +void amdgpu_vmid_mgr_init(struct amdgpu_device *adev); +void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index ada89358e220..29cf10927a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -105,8 +105,8 @@ struct amdgpu_iv_entry { unsigned client_id; unsigned src_id; unsigned ring_id; - unsigned vm_id; - unsigned vm_id_src; + unsigned vmid; + unsigned vmid_src; uint64_t timestamp; unsigned timestamp_src; unsigned pas_id; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 56d9ee5013a9..2bd56760c744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -158,12 +158,12 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, } } - while (fence == NULL && vm && !job->vm_id) { + while (fence == NULL && vm && !job->vmid) { struct amdgpu_ring *ring = job->ring; - r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->finished, - job); + r = amdgpu_vmid_grab(vm, ring, &job->sync, + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 010f69084af5..102dad3edf6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -121,11 +121,11 @@ struct amdgpu_ring_funcs { /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch); + unsigned vmid, bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); - void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, + void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 06525f2c36c3..cace7a93fc94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -82,8 +82,8 @@ TRACE_EVENT(amdgpu_iv, __field(unsigned, client_id) __field(unsigned, src_id) __field(unsigned, ring_id) - __field(unsigned, vm_id) - __field(unsigned, vm_id_src) + __field(unsigned, vmid) + __field(unsigned, vmid_src) __field(uint64_t, timestamp) __field(unsigned, timestamp_src) __field(unsigned, pas_id) @@ -93,8 +93,8 @@ TRACE_EVENT(amdgpu_iv, __entry->client_id = iv->client_id; __entry->src_id = iv->src_id; __entry->ring_id = iv->ring_id; - __entry->vm_id = iv->vm_id; - __entry->vm_id_src = iv->vm_id_src; + __entry->vmid = iv->vmid; + __entry->vmid_src = iv->vmid_src; __entry->timestamp = iv->timestamp; __entry->timestamp_src = iv->timestamp_src; __entry->pas_id = iv->pas_id; @@ -103,9 +103,9 @@ TRACE_EVENT(amdgpu_iv, __entry->src_data[2] = iv->src_data[2]; __entry->src_data[3] = iv->src_data[3]; ), - TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", + TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", __entry->client_id, __entry->src_id, - __entry->ring_id, __entry->vm_id, + __entry->ring_id, __entry->vmid, __entry->timestamp, __entry->pas_id, __entry->src_data[0], __entry->src_data[1], __entry->src_data[2], __entry->src_data[3]) @@ -219,7 +219,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_STRUCT__entry( __field(struct amdgpu_vm *, vm) __field(u32, ring) - __field(u32, vm_id) + __field(u32, vmid) __field(u32, vm_hub) __field(u64, pd_addr) __field(u32, needs_flush) @@ -228,13 +228,13 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_fast_assign( __entry->vm = vm; __entry->ring = ring->idx; - __entry->vm_id = job->vm_id; + __entry->vmid = job->vmid; __entry->vm_hub = ring->funcs->vmhub, __entry->pd_addr = job->vm_pd_addr; __entry->needs_flush = job->vm_needs_flush; ), TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", - __entry->vm, __entry->ring, __entry->vm_id, + __entry->vm, __entry->ring, __entry->vmid, __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) ); @@ -357,24 +357,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes, ); TRACE_EVENT(amdgpu_vm_flush, - TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id, + TP_PROTO(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr), - TP_ARGS(ring, vm_id, pd_addr), + TP_ARGS(ring, vmid, pd_addr), TP_STRUCT__entry( __field(u32, ring) - __field(u32, vm_id) + __field(u32, vmid) __field(u32, vm_hub) __field(u64, pd_addr) ), TP_fast_assign( __entry->ring = ring->idx; - __entry->vm_id = vm_id; + __entry->vmid = vmid; __entry->vm_hub = ring->funcs->vmhub; __entry->pd_addr = pd_addr; ), TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx", - __entry->ring, __entry->vm_id, + __entry->ring, __entry->vmid, __entry->vm_hub,__entry->pd_addr) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f1b7d987bd57..e4bb435e614b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -497,7 +497,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } - r = ttm_tt_bind(bo->ttm, &tmp_mem); + r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); if (unlikely(r)) { goto out_cleanup; } @@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, return >t->ttm.ttm; } -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev); + return ttm_dma_populate(>t->ttm, adev->dev, ctx); } #endif - return ttm_populate_and_map_pages(adev->dev, >t->ttm); + return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); } static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 9857d482c942..55a726a322e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -991,7 +991,7 @@ out: * */ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 162cae94e3b1..0fd378ae92c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch); + unsigned vmid, bool ctx_switch); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 398abbcbf029..d4510807a692 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -34,52 +34,6 @@ #include "amdgpu_trace.h" /* - * PASID manager - * - * PASIDs are global address space identifiers that can be shared - * between the GPU, an IOMMU and the driver. VMs on different devices - * may use the same PASID if they share the same address - * space. Therefore PASIDs are allocated using a global IDA. VMs are - * looked up from the PASID per amdgpu_device. - */ -static DEFINE_IDA(amdgpu_vm_pasid_ida); - -/** - * amdgpu_vm_alloc_pasid - Allocate a PASID - * @bits: Maximum width of the PASID in bits, must be at least 1 - * - * Allocates a PASID of the given width while keeping smaller PASIDs - * available if possible. - * - * Returns a positive integer on success. Returns %-EINVAL if bits==0. - * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on - * memory allocation failure. - */ -int amdgpu_vm_alloc_pasid(unsigned int bits) -{ - int pasid = -EINVAL; - - for (bits = min(bits, 31U); bits > 0; bits--) { - pasid = ida_simple_get(&amdgpu_vm_pasid_ida, - 1U << (bits - 1), 1U << bits, - GFP_KERNEL); - if (pasid != -ENOSPC) - break; - } - - return pasid; -} - -/** - * amdgpu_vm_free_pasid - Free a PASID - * @pasid: PASID to free - */ -void amdgpu_vm_free_pasid(unsigned int pasid) -{ - ida_simple_remove(&amdgpu_vm_pasid_ida, pasid); -} - -/* * GPUVM * GPUVM is similar to the legacy gart on older asics, however * rather than there being a single global gart table @@ -448,286 +402,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, } /** - * amdgpu_vm_had_gpu_reset - check if reset occured since last use - * - * @adev: amdgpu_device pointer - * @id: VMID structure - * - * Check if GPU reset occured since last use of the VMID. - */ -static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, - struct amdgpu_vm_id *id) -{ - return id->current_gpu_reset_count != - atomic_read(&adev->gpu_reset_counter); -} - -static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) -{ - return !!vm->reserved_vmid[vmhub]; -} - -/* idr_mgr->lock must be held */ -static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, - struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, - struct amdgpu_job *job) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; - uint64_t fence_context = adev->fence_context + ring->idx; - struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence *updates = sync->last_vm_update; - int r = 0; - struct dma_fence *flushed, *tmp; - bool needs_flush = vm->use_cpu_for_update; - - flushed = id->flushed_updates; - if ((amdgpu_vm_had_gpu_reset(adev, id)) || - (atomic64_read(&id->owner) != vm->client_id) || - (job->vm_pd_addr != id->pd_gpu_addr) || - (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) || - (!id->last_flush || (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush)))) { - needs_flush = true; - /* to prevent one context starved by another context */ - id->pd_gpu_addr = 0; - tmp = amdgpu_sync_peek_fence(&id->active, ring); - if (tmp) { - r = amdgpu_sync_fence(adev, sync, tmp, false); - return r; - } - } - - /* Good we can use this VMID. Remember this submission as - * user of the VMID. - */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto out; - - if (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - } - id->pd_gpu_addr = job->vm_pd_addr; - atomic64_set(&id->owner, vm->client_id); - job->vm_needs_flush = needs_flush; - if (needs_flush) { - dma_fence_put(id->last_flush); - id->last_flush = NULL; - } - job->vm_id = id - id_mgr->ids; - trace_amdgpu_vm_grab_id(vm, ring, job); -out: - return r; -} - -/** - * amdgpu_vm_grab_id - allocate the next free VMID - * - * @vm: vm to allocate id for - * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse - * - * Allocate an id for the vm, adding fences to the sync obj as necessary. - */ -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - uint64_t fence_context = adev->fence_context + ring->idx; - struct dma_fence *updates = sync->last_vm_update; - struct amdgpu_vm_id *id, *idle; - struct dma_fence **fences; - unsigned i; - int r = 0; - - mutex_lock(&id_mgr->lock); - if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { - r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); - mutex_unlock(&id_mgr->lock); - return r; - } - fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); - if (!fences) { - mutex_unlock(&id_mgr->lock); - return -ENOMEM; - } - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry(idle, &id_mgr->ids_lru, list) { - fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); - if (!fences[i]) - break; - ++i; - } - - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&idle->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - r = -ENOMEM; - goto error; - } - - - r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); - dma_fence_put(&array->base); - if (r) - goto error; - - mutex_unlock(&id_mgr->lock); - return 0; - - } - kfree(fences); - - job->vm_needs_flush = vm->use_cpu_for_update; - /* Check if we can use a VMID already assigned to this VM */ - list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { - struct dma_fence *flushed; - bool needs_flush = vm->use_cpu_for_update; - - /* Check all the prerequisites to using this VMID */ - if (amdgpu_vm_had_gpu_reset(adev, id)) - continue; - - if (atomic64_read(&id->owner) != vm->client_id) - continue; - - if (job->vm_pd_addr != id->pd_gpu_addr) - continue; - - if (!id->last_flush || - (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush))) - needs_flush = true; - - flushed = id->flushed_updates; - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) - needs_flush = true; - - /* Concurrent flushes are only possible starting with Vega10 */ - if (adev->asic_type < CHIP_VEGA10 && needs_flush) - continue; - - /* Good we can use this VMID. Remember this submission as - * user of the VMID. - */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto error; - - if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - } - - if (needs_flush) - goto needs_flush; - else - goto no_flush_needed; - - }; - - /* Still no ID to use? Then use the idle one found earlier */ - id = idle; - - /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto error; - - id->pd_gpu_addr = job->vm_pd_addr; - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - atomic64_set(&id->owner, vm->client_id); - -needs_flush: - job->vm_needs_flush = true; - dma_fence_put(id->last_flush); - id->last_flush = NULL; - -no_flush_needed: - list_move_tail(&id->list, &id_mgr->ids_lru); - - job->vm_id = id - id_mgr->ids; - trace_amdgpu_vm_grab_id(vm, ring, job); - -error: - mutex_unlock(&id_mgr->lock); - return r; -} - -static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub) -{ - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - - mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) { - list_add(&vm->reserved_vmid[vmhub]->list, - &id_mgr->ids_lru); - vm->reserved_vmid[vmhub] = NULL; - atomic_dec(&id_mgr->reserved_vmid_num); - } - mutex_unlock(&id_mgr->lock); -} - -static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub) -{ - struct amdgpu_vm_id_manager *id_mgr; - struct amdgpu_vm_id *idle; - int r = 0; - - id_mgr = &adev->vm_manager.id_mgr[vmhub]; - mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) - goto unlock; - if (atomic_inc_return(&id_mgr->reserved_vmid_num) > - AMDGPU_VM_MAX_RESERVED_VMID) { - DRM_ERROR("Over limitation of reserved vmid\n"); - atomic_dec(&id_mgr->reserved_vmid_num); - r = -EINVAL; - goto unlock; - } - /* Select the first entry VMID */ - idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); - list_del_init(&idle->list); - vm->reserved_vmid[vmhub] = idle; - mutex_unlock(&id_mgr->lock); - - return 0; -unlock: - mutex_unlock(&id_mgr->lock); - return r; -} - -/** * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug * * @adev: amdgpu_device pointer @@ -767,14 +441,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id; bool gds_switch_needed; bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; - if (job->vm_id == 0) + if (job->vmid == 0) return false; - id = &id_mgr->ids[job->vm_id]; + id = &id_mgr->ids[job->vmid]; gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || @@ -783,7 +457,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, id->oa_base != job->oa_base || id->oa_size != job->oa_size); - if (amdgpu_vm_had_gpu_reset(adev, id)) + if (amdgpu_vmid_had_gpu_reset(adev, id)) return true; return vm_flush_needed || gds_switch_needed; @@ -798,7 +472,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) * amdgpu_vm_flush - hardware flush the vm * * @ring: ring to use for flush - * @vm_id: vmid number to use + * @vmid: vmid number to use * @pd_addr: address of the page directory * * Emit a VM flush when it is necessary. @@ -807,8 +481,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id]; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || @@ -820,7 +494,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ unsigned patch_offset = 0; int r; - if (amdgpu_vm_had_gpu_reset(adev, id)) { + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; } @@ -837,8 +511,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ if (ring->funcs->emit_vm_flush && vm_flush_needed) { struct dma_fence *fence; - trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr); - amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); + amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); r = amdgpu_fence_emit(ring, &fence); if (r) @@ -858,7 +532,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->gws_size = job->gws_size; id->oa_base = job->oa_base; id->oa_size = job->oa_size; - amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, + amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, job->gds_size, job->gws_base, job->gws_size, job->oa_base, job->oa_size); @@ -876,49 +550,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } /** - * amdgpu_vm_reset_id - reset VMID to zero - * - * @adev: amdgpu device structure - * @vm_id: vmid number to use - * - * Reset saved GDW, GWS and OA to force switch on next flush. - */ -void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, - unsigned vmid) -{ - struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; - - atomic64_set(&id->owner, 0); - id->gds_base = 0; - id->gds_size = 0; - id->gws_base = 0; - id->gws_size = 0; - id->oa_base = 0; - id->oa_size = 0; -} - -/** - * amdgpu_vm_reset_all_id - reset VMID to zero - * - * @adev: amdgpu device structure - * - * Reset VMID to force flush on next use - */ -void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) -{ - unsigned i, j; - - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; - - for (j = 1; j < id_mgr->num_ids; ++j) - amdgpu_vm_reset_id(adev, i, j); - } -} - -/** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm @@ -1569,13 +1200,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * * The second command is for the shadow pagetables. */ - ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2; + if (vm->root.base.bo->shadow) + ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2; + else + ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1); /* padding, etc. */ ndw = 64; /* one PDE write for each huge page */ - ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; + if (vm->root.base.bo->shadow) + ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2; + else + ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; if (pages_addr) { /* copy commands needed */ @@ -2114,8 +1751,26 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - if (bo) - list_add_tail(&bo_va->base.bo_list, &bo->va); + if (!bo) + return bo_va; + + list_add_tail(&bo_va->base.bo_list, &bo->va); + + if (bo->tbo.resv != vm->root.base.bo->tbo.resv) + return bo_va; + + if (bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + return bo_va; + + /* + * We checked all the prerequisites, but it looks like this per VM BO + * is currently evicted. add the BO to the evicted list to make sure it + * is validated on next VM use to avoid fault. + * */ + spin_lock(&vm->status_lock); + list_move_tail(&bo_va->base.vm_status, &vm->evicted); + spin_unlock(&vm->status_lock); return bo_va; } @@ -2625,7 +2280,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t init_pde_value = 0; vm->va = RB_ROOT_CACHED; - vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); @@ -2819,7 +2473,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&root); dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) - amdgpu_vm_free_reserved_vmid(adev, vm, i); + amdgpu_vmid_free_reserved(adev, vm, i); } /** @@ -2861,23 +2515,9 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, */ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { - unsigned i, j; - - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; + unsigned i; - mutex_init(&id_mgr->lock); - INIT_LIST_HEAD(&id_mgr->ids_lru); - atomic_set(&id_mgr->reserved_vmid_num, 0); - - /* skip over VMID 0, since it is the system VM */ - for (j = 1; j < id_mgr->num_ids; ++j) { - amdgpu_vm_reset_id(adev, i, j); - amdgpu_sync_create(&id_mgr->ids[i].active); - list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); - } - } + amdgpu_vmid_mgr_init(adev); adev->vm_manager.fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); @@ -2885,7 +2525,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->vm_manager.seqno[i] = 0; atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); - atomic64_set(&adev->vm_manager.client_counter, 0); spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); @@ -2918,24 +2557,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { - unsigned i, j; - WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); idr_destroy(&adev->vm_manager.pasid_idr); - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; - - mutex_destroy(&id_mgr->lock); - for (j = 0; j < AMDGPU_NUM_VM; ++j) { - struct amdgpu_vm_id *id = &id_mgr->ids[j]; - - amdgpu_sync_free(&id->active); - dma_fence_put(id->flushed_updates); - dma_fence_put(id->last_flush); - } - } + amdgpu_vmid_mgr_fini(adev); } int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -2948,13 +2573,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, - AMDGPU_GFXHUB); + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index edd2ea52dc00..21a80f1bb2b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -31,6 +31,7 @@ #include "amdgpu_sync.h" #include "amdgpu_ring.h" +#include "amdgpu_ids.h" struct amdgpu_bo_va; struct amdgpu_job; @@ -40,9 +41,6 @@ struct amdgpu_bo_list_entry; * GPUVM handling */ -/* maximum number of VMIDs */ -#define AMDGPU_NUM_VM 16 - /* Maximum number of PTEs the hardware can write with one command */ #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF @@ -193,11 +191,9 @@ struct amdgpu_vm { /* Scheduler entity for page table updates */ struct drm_sched_entity entity; - /* client id and PASID (TODO: replace client_id with PASID) */ - u64 client_id; unsigned int pasid; /* dedicated to vm */ - struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; + struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; @@ -212,37 +208,9 @@ struct amdgpu_vm { unsigned int fault_credit; }; -struct amdgpu_vm_id { - struct list_head list; - struct amdgpu_sync active; - struct dma_fence *last_flush; - atomic64_t owner; - - uint64_t pd_gpu_addr; - /* last flushed PD/PT update */ - struct dma_fence *flushed_updates; - - uint32_t current_gpu_reset_count; - - uint32_t gds_base; - uint32_t gds_size; - uint32_t gws_base; - uint32_t gws_size; - uint32_t oa_base; - uint32_t oa_size; -}; - -struct amdgpu_vm_id_manager { - struct mutex lock; - unsigned num_ids; - struct list_head ids_lru; - struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; - atomic_t reserved_vmid_num; -}; - struct amdgpu_vm_manager { /* Handling of VMIDs */ - struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS]; + struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; /* Handling of VM fences */ u64 fence_context; @@ -260,8 +228,6 @@ struct amdgpu_vm_manager { struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; unsigned vm_pte_num_rings; atomic_t vm_pte_next_ring; - /* client id counter */ - atomic64_t client_counter; /* partial resident texture handling */ spinlock_t prt_lock; @@ -280,8 +246,6 @@ struct amdgpu_vm_manager { spinlock_t pasid_lock; }; -int amdgpu_vm_alloc_pasid(unsigned int bits); -void amdgpu_vm_free_pasid(unsigned int pasid); void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -299,13 +263,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); -int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); -void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, - unsigned vmid); -void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index a870b354e3f7..d5a05c19708f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index e406c93d01d6..6e8278e689b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 extra_bits = vm_id & 0xf; + u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); @@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (CIK). */ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); /* flush TLB */ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index fa61d649bb44..f576e9cbbc61 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index edef17d93527..9870d83b68c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); @@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id )); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid )); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9c62ebd5a19c..a066c5eda135 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2254,7 +2254,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -2269,7 +2269,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2283,9 +2283,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, @@ -3239,19 +3239,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using the CP (CIK). */ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); - if (vm_id < 8) { + if (vmid < 8) { amdgpu_ring_write(ring, - (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { amdgpu_ring_write(ring, - (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e18c2e62a20b..4e694ae9f308 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6245,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -6254,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); @@ -6275,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, @@ -6328,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); @@ -6336,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0)) | WR_CONFIRM); - if (vm_id < 8) { + if (vmid < 8) { amdgpu_ring_write(ring, - (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { amdgpu_ring_write(ring, - (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -6353,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9f7be230734c..55670dbacace 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3594,7 +3594,7 @@ static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -3603,7 +3603,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); @@ -3625,9 +3625,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ @@ -3683,11 +3683,11 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -3695,11 +3695,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; gfx_v9_0_write_data_to_reg(ring, usepfp, true, - hub->ctx0_ptb_addr_lo32 + (2 * vm_id), + hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); gfx_v9_0_write_data_to_reg(ring, usepfp, true, - hub->ctx0_ptb_addr_hi32 + (2 * vm_id), + hub->ctx0_ptb_addr_hi32 + (2 * vmid), upper_32_bits(pd_addr)); gfx_v9_0_write_data_to_reg(ring, usepfp, true, @@ -3707,7 +3707,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for the invalidate to complete */ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack + - eng, 0, 1 << vm_id, 1 << vm_id, 0x20); + eng, 0, 1 << vmid, 1 << vmid, 0x20); /* compute doesn't have PFP */ if (usepfp) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1a73c43f32d..8e28270d1ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -956,7 +956,7 @@ static int gmc_v6_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 356a9a71b8cf..86e9d682c59e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1107,7 +1107,7 @@ static int gmc_v7_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fce45578f5fd..9a813d834f1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1212,7 +1212,7 @@ static int gmc_v8_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b776df4c999f..eb8b1bb66389 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -248,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; + struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; uint32_t status = 0; u64 addr; @@ -262,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (printk_ratelimit()) { dev_err(adev->dev, - "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", - entry->vm_id_src ? "mmhub" : "gfxhub", - entry->src_id, entry->ring_id, entry->vm_id, + "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n", + entry->vmid_src ? "mmhub" : "gfxhub", + entry->src_id, entry->ring_id, entry->vmid, entry->pas_id); dev_err(adev->dev, " at page 0x%016llx from %d\n", addr, entry->client_id); @@ -288,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; } -static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) +static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) { u32 req = 0; - /* invalidate using legacy mode on vm_id*/ + /* invalidate using legacy mode on vmid*/ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vm_id); + PER_VMID_INVALIDATE_REQ, 1 << vmid); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); @@ -1056,7 +1056,7 @@ static int gmc_v9_0_resume(void *handle) if (r) return r; - amdgpu_vm_reset_all_ids(adev); + amdgpu_vmid_reset_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index bd592cb39f37..c4e4be3dd31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 401552bae7f5..d4787ad4d346 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); @@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 0735d4d0e56a..521978c40537 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -417,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -1127,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - if (vm_id < 8) { - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, pd_addr >> 12); @@ -1142,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for flush */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 73477c5ed9b4..e92fb372bc99 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -330,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 vmid = vm_id & 0xf; - /* IB packet must end on a 8 DW boundary */ sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | - SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); @@ -1135,10 +1133,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VEGA10). */ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1147,12 +1145,12 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2); + amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); - amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2); + amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); /* flush TLB */ @@ -1167,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); /* reference */ - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* reference */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 9adca5d8b045..9a29c1399091 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. * Pad as necessary with NOPs. */ while ((lower_32_bits(ring->wptr) & 7) != 5) amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); - amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); + amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); @@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * using sDMA (VI). */ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - if (vm_id < 8) - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + if (vmid < 8) + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); else - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); amdgpu_ring_write(ring, pd_addr >> 12); /* bits 0-7 are the VM contexts0-7 */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for invalidate to complete */ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0xff << 16); /* retry */ - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, 0); /* value */ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index d2c6b80309c8..60dad63098a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; adev->irq.ih.rptr += 16; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index aa4e320e31f8..5995ffc183de 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; - entry->vm_id = (dw[2] >> 8) & 0xff; + entry->vmid = (dw[2] >> 8) & 0xff; entry->pas_id = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index b13ae34be1c2..8ab10c220910 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index a4b0f1d842b7..c1fe30cdba32 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 86123448a8ff..59271055a30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { uint32_t reg; - if (vm_id < 8) - reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + if (vmid < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; else - reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, reg << 2); @@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0x8); @@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0xC); } @@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) } static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); } static bool uvd_v6_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 4ec4447d33c0..6b95f4f344b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1218,13 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); @@ -1246,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); @@ -1291,10 +1291,10 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, } static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; uint32_t data0, data1, mask; @@ -1302,15 +1302,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr |= flags; - data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data1 = upper_32_bits(pd_addr); uvd_v7_0_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); uvd_v7_0_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); @@ -1322,8 +1322,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ data0 = (hub->vm_inv_eng0_ack + eng) << 2; - data1 = 1 << vm_id; - mask = 1 << vm_id; + data1 = 1 << vmid; + mask = 1 << vmid; uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); } @@ -1343,10 +1343,10 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) } static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1354,15 +1354,15 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -1374,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } #if 0 diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index cf81065e3c5a..a5355eb689f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -834,24 +834,24 @@ out: } static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, VCE_CMD_END); } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 308949d6edde..7cf2eef68cf2 100755 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -938,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle, #endif static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); @@ -965,10 +965,10 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring) } static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -976,15 +976,15 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, pd_addr |= flags; amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); - amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index deb3fba790a5..b99e15c43e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -833,13 +833,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) */ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); @@ -888,10 +888,10 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring, } static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; uint32_t data0, data1, mask; @@ -899,15 +899,15 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr |= flags; - data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data1 = upper_32_bits(pd_addr); vcn_v1_0_dec_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); vcn_v1_0_dec_vm_reg_write(ring, data0, data1); - data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; + data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); @@ -919,8 +919,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ data0 = (hub->vm_inv_eng0_ack + eng) << 2; - data1 = 1 << vm_id; - mask = 1 << vm_id; + data1 = 1 << vmid; + mask = 1 << vmid; vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); } @@ -1011,20 +1011,20 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) * Write enc ring commands to execute the indirect buffer */ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, VCN_ENC_CMD_IB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; @@ -1033,17 +1033,17 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, - (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); + (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, lower_32_bits(pd_addr)); @@ -1055,8 +1055,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, /* wait for flush */ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); - amdgpu_ring_write(ring, 1 << vm_id); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); + amdgpu_ring_write(ring, 1 << vmid); } static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index e1d7dae0989b..b69ceafb7888 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -327,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, entry->client_id = dw[0] & 0xff; entry->src_id = (dw[0] >> 8) & 0xff; entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vm_id = (dw[0] >> 24) & 0xf; - entry->vm_id_src = (dw[0] >> 31); + entry->vmid = (dw[0] >> 24) & 0xf; + entry->vmid_src = (dw[0] >> 31); entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); entry->timestamp_src = dw[2] >> 31; entry->pas_id = dw[3] & 0xffff; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 78ab0556e48f..4d3aff381bca 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -709,6 +709,19 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr) { int ret = 0; uint32_t fw_to_check = 0; + struct cgs_firmware_info info = {0}; + uint32_t index = SMN_MP1_SRAM_START_ADDR + + SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, Version); + + + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); + hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA); + info.version = hwmgr->smu_version >> 8; + cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); fw_to_check = UCODE_ID_RLC_G_MASK | UCODE_ID_SDMA0_MASK | |