diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 215 |
1 files changed, 118 insertions, 97 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a170e37fbe7..d71d4cb68f9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -45,7 +45,7 @@ #include "amdgpu_atombios.h" -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); +static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); static int gmc_v8_0_wait_for_idle(void *handle); @@ -236,16 +236,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); - err = request_firmware(&adev->mc.fw, fw_name, adev->dev); + err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); if (err) goto out; - err = amdgpu_ucode_validate(adev->mc.fw); + err = amdgpu_ucode_validate(adev->gmc.fw); out: if (err) { pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->mc.fw); - adev->mc.fw = NULL; + release_firmware(adev->gmc.fw); + adev->gmc.fw = NULL; } return err; } @@ -274,19 +274,19 @@ static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev) if (amdgpu_sriov_bios(adev)) return 0; - if (!adev->mc.fw) + if (!adev->gmc.fw) return -EINVAL; - hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; + hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; amdgpu_ucode_print_mc_hdr(&hdr->header); - adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); io_mc_regs = (const __le32 *) - (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_data = (const __le32 *) - (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); @@ -350,19 +350,19 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) if (vbios_version == 0) return 0; - if (!adev->mc.fw) + if (!adev->gmc.fw) return -EINVAL; - hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; + hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; amdgpu_ucode_print_mc_hdr(&hdr->header); - adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); io_mc_regs = (const __le32 *) - (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_data = (const __le32 *) - (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); data = RREG32(mmMC_SEQ_MISC0); data &= ~(0x40); @@ -398,7 +398,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) } static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, - struct amdgpu_mc *mc) + struct amdgpu_gmc *mc) { u64 base = 0; @@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_device_gart_location(adev, mc); } @@ -449,18 +449,18 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->mc.vram_start >> 12); + adev->gmc.vram_start >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->mc.vram_end >> 12); + adev->gmc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); if (amdgpu_sriov_vf(adev)) { - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF); WREG32(mmMC_VM_FB_LOCATION, tmp); /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); } @@ -495,8 +495,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { int r; - adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); - if (!adev->mc.vram_width) { + adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); + if (!adev->gmc.vram_width) { u32 tmp; int chansize, numchan; @@ -538,31 +538,31 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) numchan = 16; break; } - adev->mc.vram_width = numchan * chansize; + adev->gmc.vram_width = numchan * chansize; } /* size in MB on si */ - adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_device_resize_fb_bar(adev); if (r) return r; } - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); + adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 if (adev->flags & AMD_IS_APU) { - adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; - adev->mc.aper_size = adev->mc.real_vram_size; + adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; + adev->gmc.aper_size = adev->gmc.real_vram_size; } #endif /* In case the PCI BAR is larger than the actual amount of vram */ - adev->mc.visible_vram_size = adev->mc.aper_size; - if (adev->mc.visible_vram_size > adev->mc.real_vram_size) - adev->mc.visible_vram_size = adev->mc.real_vram_size; + adev->gmc.visible_vram_size = adev->gmc.aper_size; + if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) + adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ if (amdgpu_gart_size == -1) { @@ -571,20 +571,20 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) case CHIP_POLARIS10: /* all engines support GPUVM */ case CHIP_POLARIS12: /* all engines support GPUVM */ default: - adev->mc.gart_size = 256ULL << 20; + adev->gmc.gart_size = 256ULL << 20; break; case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ - adev->mc.gart_size = 1024ULL << 20; + adev->gmc.gart_size = 1024ULL << 20; break; } } else { - adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; } - gmc_v8_0_vram_gtt_location(adev, &adev->mc); + gmc_v8_0_vram_gtt_location(adev, &adev->gmc); return 0; } @@ -597,25 +597,45 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) */ /** - * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback + * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback * * @adev: amdgpu_device pointer * @vmid: vm instance to flush * * Flush the TLB for the requested page table (CIK). */ -static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, +static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) { - /* flush hdp cache */ - WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); - /* bits 0-15 are the VM contexts0-15 */ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); } +static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + uint32_t reg; + + if (vmid < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; + else + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; + amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); + + /* bits 0-15 are the VM contexts0-15 */ + amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); + + return pd_addr; +} + +static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, + unsigned pasid) +{ + amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); +} + /** - * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO + * gmc_v8_0_set_pte_pde - update the page tables using MMIO * * @adev: amdgpu_device pointer * @cpu_pt_addr: cpu address of the page table @@ -625,11 +645,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, * * Update the page tables using the CPU. */ -static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, - void *cpu_pt_addr, - uint32_t gpu_page_idx, - uint64_t addr, - uint64_t flags) +static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, + uint32_t gpu_page_idx, uint64_t addr, + uint64_t flags) { void __iomem *ptr = (void *)cpu_pt_addr; uint64_t value; @@ -723,9 +741,9 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) { u32 tmp; - if (enable && !adev->mc.prt_warning) { + if (enable && !adev->gmc.prt_warning) { dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); - adev->mc.prt_warning = true; + adev->gmc.prt_warning = true; } tmp = RREG32(mmVM_PRT_CNTL); @@ -747,7 +765,8 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) if (enable) { uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; - uint32_t high = adev->vm_manager.max_pfn; + uint32_t high = adev->vm_manager.max_pfn - + (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); @@ -837,11 +856,11 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, - (u32)(adev->dummy_page.addr >> 12)); + (u32)(adev->dummy_page_addr >> 12)); WREG32(mmVM_CONTEXT0_CNTL2, 0); tmp = RREG32(mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); @@ -871,7 +890,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) /* enable context1-15 */ WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, - (u32)(adev->dummy_page.addr >> 12)); + (u32)(adev->dummy_page_addr >> 12)); WREG32(mmVM_CONTEXT1_CNTL2, 4); tmp = RREG32(mmVM_CONTEXT1_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); @@ -891,9 +910,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) else gmc_v8_0_set_fault_enable_default(adev, true); - gmc_v8_0_gart_flush_gpu_tlb(adev, 0); + gmc_v8_0_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gart_size >> 20), + (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -966,21 +985,21 @@ static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) * * Print human readable fault information (CIK). */ -static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, - u32 status, u32 addr, u32 mc_client) +static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, + u32 addr, u32 mc_client, unsigned pasid) { - u32 mc_id; u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, PROTECTIONS); char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; + u32 mc_id; mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_ID); - dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", - protections, vmid, addr, + dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + protections, vmid, pasid, addr, REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_RW) ? "write" : "read", block, mc_client, mc_id); @@ -1012,16 +1031,16 @@ static int gmc_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gmc_v8_0_set_gart_funcs(adev); + gmc_v8_0_set_gmc_funcs(adev); gmc_v8_0_set_irq_funcs(adev); - adev->mc.shared_aperture_start = 0x2000000000000000ULL; - adev->mc.shared_aperture_end = - adev->mc.shared_aperture_start + (4ULL << 30) - 1; - adev->mc.private_aperture_start = - adev->mc.shared_aperture_end + 1; - adev->mc.private_aperture_end = - adev->mc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.shared_aperture_start = 0x2000000000000000ULL; + adev->gmc.shared_aperture_end = + adev->gmc.shared_aperture_start + (4ULL << 30) - 1; + adev->gmc.private_aperture_start = + adev->gmc.shared_aperture_end + 1; + adev->gmc.private_aperture_end = + adev->gmc.private_aperture_start + (4ULL << 30) - 1; return 0; } @@ -1031,7 +1050,7 @@ static int gmc_v8_0_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else return 0; } @@ -1045,7 +1064,7 @@ static int gmc_v8_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp; @@ -1054,14 +1073,14 @@ static int gmc_v8_0_sw_init(void *handle) else tmp = RREG32(mmMC_SEQ_MISC0); tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); + adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); } - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); if (r) return r; @@ -1075,9 +1094,9 @@ static int gmc_v8_0_sw_init(void *handle) * This is the max address of the GPU's * internal address space. */ - adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ + adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ - adev->mc.stolen_size = 256 * 1024; + adev->gmc.stolen_size = 256 * 1024; /* set DMA mask + need_dma32 flags. * PCIE - can handle 40-bits. @@ -1086,7 +1105,6 @@ static int gmc_v8_0_sw_init(void *handle) */ adev->need_dma32 = false; dma_bits = adev->need_dma32 ? 32 : 40; - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { adev->need_dma32 = true; @@ -1149,8 +1167,8 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_vm_manager_fini(adev); gmc_v8_0_gart_fini(adev); amdgpu_bo_fini(adev); - release_firmware(adev->mc.fw); - adev->mc.fw = NULL; + release_firmware(adev->gmc.fw); + adev->gmc.fw = NULL; return 0; } @@ -1191,7 +1209,7 @@ static int gmc_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v8_0_gart_disable(adev); return 0; @@ -1271,10 +1289,10 @@ static bool gmc_v8_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } if (srbm_soft_reset) { - adev->mc.srbm_soft_reset = srbm_soft_reset; + adev->gmc.srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->mc.srbm_soft_reset = 0; + adev->gmc.srbm_soft_reset = 0; return false; } } @@ -1283,7 +1301,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->mc.srbm_soft_reset) + if (!adev->gmc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev); @@ -1299,9 +1317,9 @@ static int gmc_v8_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->mc.srbm_soft_reset) + if (!adev->gmc.srbm_soft_reset) return 0; - srbm_soft_reset = adev->mc.srbm_soft_reset; + srbm_soft_reset = adev->gmc.srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1329,7 +1347,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->mc.srbm_soft_reset) + if (!adev->gmc.srbm_soft_reset) return 0; gmc_v8_0_mc_resume(adev); @@ -1410,7 +1428,8 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); - gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); + gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client, + entry->pasid); } return 0; @@ -1642,9 +1661,11 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .get_clockgating_state = gmc_v8_0_get_clockgating_state, }; -static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { - .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, - .set_pte_pde = gmc_v8_0_gart_set_pte_pde, +static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { + .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, + .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, + .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, + .set_pte_pde = gmc_v8_0_set_pte_pde, .set_prt = gmc_v8_0_set_prt, .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, .get_vm_pde = gmc_v8_0_get_vm_pde @@ -1655,16 +1676,16 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { .process = gmc_v8_0_process_interrupt, }; -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) +static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) { - if (adev->gart.gart_funcs == NULL) - adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; + if (adev->gmc.gmc_funcs == NULL) + adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; } static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->mc.vm_fault.num_types = 1; - adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; + adev->gmc.vm_fault.num_types = 1; + adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; } const struct amdgpu_ip_block_version gmc_v8_0_ip_block = |