diff options
author | Jani Nikula <jani.nikula@intel.com> | 2017-09-04 21:40:34 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2017-09-04 21:40:34 +0300 |
commit | d149d6ae17197ce23e2cd6bc5fcdacf7b593e55e (patch) | |
tree | 2fb8d66199080f6d7b41690f6e8616ccd79a1943 /drivers/gpu/drm/amd/amdgpu | |
parent | afe722bee4bf8afc88c6ff7d6f781515d9428595 (diff) | |
parent | 7846b12fe0b5feab5446d892f41b5140c1419109 (diff) | |
download | linux-d149d6ae17197ce23e2cd6bc5fcdacf7b593e55e.tar.bz2 |
Merge drm-upstream/drm-next into drm-intel-next-queued
Catch up with upstream while it's easy.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
64 files changed, 1405 insertions, 1436 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 51d1364cf185..12e71bbfd222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -96,6 +96,7 @@ extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; +extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; @@ -373,78 +374,10 @@ struct amdgpu_clock { }; /* - * BO. + * GEM. */ -struct amdgpu_bo_list_entry { - struct amdgpu_bo *robj; - struct ttm_validate_buffer tv; - struct amdgpu_bo_va *bo_va; - uint32_t priority; - struct page **user_pages; - int user_invalidated; -}; - -struct amdgpu_bo_va_mapping { - struct list_head list; - struct rb_node rb; - uint64_t start; - uint64_t last; - uint64_t __subtree_last; - uint64_t offset; - uint64_t flags; -}; - -/* bo virtual addresses in a specific vm */ -struct amdgpu_bo_va { - /* protected by bo being reserved */ - struct list_head bo_list; - struct dma_fence *last_pt_update; - unsigned ref_count; - - /* protected by vm mutex and spinlock */ - struct list_head vm_status; - - /* mappings for this bo_va */ - struct list_head invalids; - struct list_head valids; - - /* constant after initialization */ - struct amdgpu_vm *vm; - struct amdgpu_bo *bo; -}; #define AMDGPU_GEM_DOMAIN_MAX 0x3 - -struct amdgpu_bo { - /* Protected by tbo.reserved */ - u32 prefered_domains; - u32 allowed_domains; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; - u64 flags; - unsigned pin_count; - void *kptr; - u64 tiling_flags; - u64 metadata_flags; - void *metadata; - u32 metadata_size; - unsigned prime_shared_count; - /* list of all virtual address to which this bo - * is associated to - */ - struct list_head va; - /* Constant after initialization */ - struct drm_gem_object gem_base; - struct amdgpu_bo *parent; - struct amdgpu_bo *shadow; - - struct ttm_bo_kmap_obj dma_buf_vmap; - struct amdgpu_mn *mn; - struct list_head mn_list; - struct list_head shadow_list; -}; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) void amdgpu_gem_object_free(struct drm_gem_object *obj); @@ -678,15 +611,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT /* overlap the doorbell assignment with VCN as they are mutually exclusive * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD */ - AMDGPU_DOORBELL64_RING0_1 = 0xF8, - AMDGPU_DOORBELL64_RING2_3 = 0xF9, - AMDGPU_DOORBELL64_RING4_5 = 0xFA, - AMDGPU_DOORBELL64_RING6_7 = 0xFB, + AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, + AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, + AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, + AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, - AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC, - AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD, - AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE, - AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF, + AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, + AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, + AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, + AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF @@ -816,6 +749,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; + struct amdgpu_bo_va *csa_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; @@ -825,6 +759,14 @@ struct amdgpu_fpriv { /* * residency list */ +struct amdgpu_bo_list_entry { + struct amdgpu_bo *robj; + struct ttm_validate_buffer tv; + struct amdgpu_bo_va *bo_va; + uint32_t priority; + struct page **user_pages; + int user_invalidated; +}; struct amdgpu_bo_list { struct mutex lock; @@ -1191,10 +1133,6 @@ struct amdgpu_wb { int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb); -int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb); -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb); -void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb); void amdgpu_get_pcie_info(struct amdgpu_device *adev); @@ -1488,7 +1426,7 @@ struct amdgpu_device { bool is_atom_fw; uint8_t *bios; uint32_t bios_size; - struct amdgpu_bo *stollen_vga_memory; + struct amdgpu_bo *stolen_vga_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -1546,9 +1484,6 @@ struct amdgpu_device { struct amdgpu_mman mman; struct amdgpu_vram_scratch vram_scratch; struct amdgpu_wb wb; - atomic64_t vram_usage; - atomic64_t vram_vis_usage; - atomic64_t gtt_usage; atomic64_t num_bytes_moved; atomic64_t num_evictions; atomic64_t num_vram_cpu_page_faults; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 06879d1dcabd..a52795d9b458 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -285,19 +285,20 @@ static int acp_hw_init(void *handle) return 0; else if (r) return r; + if (adev->asic_type != CHIP_STONEY) { + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; - adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); - if (adev->acp.acp_genpd == NULL) - return -ENOMEM; - - adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; - adev->acp.acp_genpd->gpd.power_off = acp_poweroff; - adev->acp.acp_genpd->gpd.power_on = acp_poweron; + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; - adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; - pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + } adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, GFP_KERNEL); @@ -319,14 +320,29 @@ static int acp_hw_init(void *handle) return -ENOMEM; } - i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + } i2s_pdata[0].cap = DWC_I2S_PLAY; i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1 | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1; + } - i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | - DW_I2S_QUIRK_COMP_PARAM1; i2s_pdata[1].cap = DWC_I2S_RECORD; i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; @@ -373,12 +389,14 @@ static int acp_hw_init(void *handle) if (r) return r; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); - if (r) { - dev_err(dev, "Failed to add dev to genpd\n"); - return r; + if (adev->asic_type != CHIP_STONEY) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; + } } } @@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ - if (!adev->acp.acp_genpd) + if (!adev->acp.acp_cell) return 0; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); - /* If removal fails, dont giveup and try rest */ - if (ret) - dev_err(dev, "remove dev from genpd failed\n"); + if (adev->acp.acp_genpd) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); + } + kfree(adev->acp.acp_genpd); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); - kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index ef79551b4cb7..57afad79f55d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -30,10 +30,10 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "amdgpu.h" +#include "amdgpu_pm.h" #include "amd_acpi.h" #include "atom.h" -extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); /* Call the ATIF method */ /** @@ -289,7 +289,7 @@ out: * handles it. * Returns NOTIFY code */ -int amdgpu_atif_handler(struct amdgpu_device *adev, +static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { struct amdgpu_atif *atif = &adev->atif; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 37971d9402e3..5432af39a674 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -27,16 +27,15 @@ #include "amdgpu_gfx.h" #include <linux/module.h> -const struct kfd2kgd_calls *kfd2kgd; const struct kgd2kfd_calls *kgd2kfd; -bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); +bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); int amdgpu_amdkfd_init(void) { int ret; #if defined(CONFIG_HSA_AMD_MODULE) - int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); @@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void) return ret; } -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) +void amdgpu_amdkfd_fini(void) +{ + if (kgd2kfd) { + kgd2kfd->exit(); + symbol_put(kgd2kfd_init); + } +} + +void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { + const struct kfd2kgd_calls *kfd2kgd; + + if (!kgd2kfd) + return; + switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; default: - return false; + dev_info(adev->dev, "kfd not supported on this ASIC\n"); + return; } - return true; -} - -void amdgpu_amdkfd_fini(void) -{ - if (kgd2kfd) { - kgd2kfd->exit(); - symbol_put(kgd2kfd_init); - } -} - -void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) -{ - if (kgd2kfd) - adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, + adev->pdev, kfd2kgd); } void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) @@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return -ENOMEM; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, + &(*mem)->bo); if (r) { dev_err(adev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 73f83a10ae14..8d689ab7e429 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -26,6 +26,7 @@ #define AMDGPU_AMDKFD_H_INCLUDED #include <linux/types.h> +#include <linux/mmu_context.h> #include <kgd_kfd_interface.h> struct amdgpu_device; @@ -39,8 +40,6 @@ struct kgd_mem { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev); - void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, @@ -62,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); +#define read_user_wptr(mmptr, wptr, dst) \ + ({ \ + bool valid = false; \ + if ((mmptr) && (wptr)) { \ + if ((mmptr) == current->mm) { \ + valid = !get_user((dst), (wptr)); \ + } else if (current->mm == NULL) { \ + use_mm(mmptr); \ + valid = !get_user((dst), (wptr)); \ + unuse_mm(mmptr); \ + } \ + } \ + valid; \ + }) + #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 5254562fd0f9..b9dbbf9cb8b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -39,6 +39,12 @@ #include "gmc/gmc_7_1_sh_mask.h" #include "cik_structs.h" +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + enum { MAX_TRAPID = 8, /* 3 bits in the bitfield. */ MAX_WATCH_ADDRESSES = 4 @@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr); + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); @@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} static const struct kfd2kgd_calls kfd2kgd = { .init_gtt_mem_allocation = alloc_gtt_mem, @@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, .write_vmid_invalidate_request = write_vmid_invalidate_request, - .get_fw_version = get_fw_version + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) @@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, queue_id, 0); @@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr) + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t wptr_shadow, is_wptr_shadow_valid; struct cik_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, wptr_val, data; m = get_mqd(mqd); - is_wptr_shadow_valid = !get_user(wptr_shadow, wptr); - if (is_wptr_shadow_valid) - m->cp_hqd_pq_wptr = wptr_shadow; - acquire_queue(kgd, pipe_id, queue_id); - gfx_v7_0_mqd_commit(adev, m); + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + + for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Copy userspace write pointer value to register. + * Activate doorbell logic to monitor subsequent changes. + */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + if (read_user_wptr(mm, wptr, wptr_val)) + WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(mmCP_HQD_ACTIVE, data); + release_queue(kgd); return 0; @@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; - int timeout = utimeout; + enum hqd_dequeue_request_type type; + unsigned long flags, end_jiffies; + int retry; acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); - WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + + /* Workaround: If IQ timer is active and the wait time is close to or + * equal to 0, dequeueing is not safe. Wait until either the wait time + * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is + * cleared before continuing. Also, ensure wait times are set to at + * least 0x3. + */ + local_irq_save(flags); + preempt_disable(); + retry = 5000; /* wait for 500 usecs at maximum */ + while (true) { + temp = RREG32(mmCP_HQD_IQ_TIMER); + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { + pr_debug("HW is processing IQ\n"); + goto loop; + } + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) + == 3) /* SEM-rearm is safe */ + break; + /* Wait time 3 is safe for CP, but our MMIO read/write + * time is close to 1 microsecond, so check for 10 to + * leave more buffer room + */ + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) + >= 10) + break; + pr_debug("IQ timer is active\n"); + } else + break; +loop: + if (!retry) { + pr_err("CP HQD IQ timer status time out\n"); + break; + } + ndelay(100); + --retry; + } + retry = 1000; + while (true) { + temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) + break; + pr_debug("Dequeue request is pending\n"); + if (!retry) { + pr_err("CP HQD dequeue request time out\n"); + break; + } + ndelay(100); + --retry; + } + local_irq_restore(flags); + preempt_enable(); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { temp = RREG32(mmCP_HQD_ACTIVE); - if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; - if (timeout <= 0) { - pr_err("kfd: cp queue preemption time out.\n"); + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out\n"); release_queue(kgd); return -ETIME; } - msleep(20); - timeout -= 20; + usleep_range(500, 1000); } release_queue(kgd); @@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); } +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + lock_srbm(kgd, 0, 0, 0, vmid); + WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); + unlock_srbm(kgd); +} + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; @@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) switch (type) { case KGD_ENGINE_PFP: hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw->data; break; case KGD_ENGINE_ME: hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; + adev->gfx.me_fw->data; break; case KGD_ENGINE_CE: hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; + adev->gfx.ce_fw->data; break; case KGD_ENGINE_MEC1: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; + adev->gfx.mec_fw->data; break; case KGD_ENGINE_MEC2: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw->data; break; case KGD_ENGINE_RLC: hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw->data; break; case KGD_ENGINE_SDMA1: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; + adev->sdma.instance[0].fw->data; break; case KGD_ENGINE_SDMA2: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; + adev->sdma.instance[1].fw->data; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 133d06671e46..fb6e5dbd5a03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -39,6 +39,12 @@ #include "vi_structs.h" #include "vid.h" +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + struct cik_sdma_rlc_registers; /* @@ -55,12 +61,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr); + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, @@ -85,6 +94,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} static const struct kfd2kgd_calls kfd2kgd = { .init_gtt_mem_allocation = alloc_gtt_mem, @@ -111,12 +147,15 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, .write_vmid_invalidate_request = write_vmid_invalidate_request, - .get_fw_version = get_fw_version + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; + return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) @@ -147,7 +186,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, queue_id, 0); @@ -216,7 +255,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) uint32_t mec; uint32_t pipe; - mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, 0, 0); @@ -244,20 +283,67 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr) + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) { - struct vi_mqd *m; - uint32_t shadow_wptr, valid_wptr; struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct vi_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, wptr_val, data; m = get_mqd(mqd); - valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr)); - if (valid_wptr == 0) - m->cp_hqd_pq_wptr = shadow_wptr; - acquire_queue(kgd, pipe_id, queue_id); - gfx_v8_0_mqd_commit(adev, mqd); + + /* HIQ is set during driver init period with vmid set to 0*/ + if (m->cp_hqd_vmid == 0) { + uint32_t value, mec, pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + value = RREG32(mmRLC_CP_SCHEDULERS); + value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, + ((mec << 5) | (pipe << 3) | queue_id | 0x80)); + WREG32(mmRLC_CP_SCHEDULERS, value); + } + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + + for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Tonga errata: EOP RPTR/WPTR should be left unmodified. + * This is safe since EOP RPTR==WPTR for any inactive HQD + * on ASICs that do not support context-save. + * EOP writes/reads can start anywhere in the ring. + */ + if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { + WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); + WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); + WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); + } + + for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Copy userspace write pointer value to register. + * Activate doorbell logic to monitor subsequent changes. + */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + if (read_user_wptr(mm, wptr, wptr_val)) + WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(mmCP_HQD_ACTIVE, data); + release_queue(kgd); return 0; @@ -308,29 +394,102 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; - int timeout = utimeout; + enum hqd_dequeue_request_type type; + unsigned long flags, end_jiffies; + int retry; + struct vi_mqd *m = get_mqd(mqd); acquire_queue(kgd, pipe_id, queue_id); - WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + if (m->cp_hqd_vmid == 0) + WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); + + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + /* Workaround: If IQ timer is active and the wait time is close to or + * equal to 0, dequeueing is not safe. Wait until either the wait time + * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is + * cleared before continuing. Also, ensure wait times are set to at + * least 0x3. + */ + local_irq_save(flags); + preempt_disable(); + retry = 5000; /* wait for 500 usecs at maximum */ + while (true) { + temp = RREG32(mmCP_HQD_IQ_TIMER); + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { + pr_debug("HW is processing IQ\n"); + goto loop; + } + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) + == 3) /* SEM-rearm is safe */ + break; + /* Wait time 3 is safe for CP, but our MMIO read/write + * time is close to 1 microsecond, so check for 10 to + * leave more buffer room + */ + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) + >= 10) + break; + pr_debug("IQ timer is active\n"); + } else + break; +loop: + if (!retry) { + pr_err("CP HQD IQ timer status time out\n"); + break; + } + ndelay(100); + --retry; + } + retry = 1000; + while (true) { + temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) + break; + pr_debug("Dequeue request is pending\n"); + + if (!retry) { + pr_err("CP HQD dequeue request time out\n"); + break; + } + ndelay(100); + --retry; + } + local_irq_restore(flags); + preempt_enable(); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { temp = RREG32(mmCP_HQD_ACTIVE); - if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; - if (timeout <= 0) { - pr_err("kfd: cp queue preemption time out.\n"); + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } - msleep(20); - timeout -= 20; + usleep_range(500, 1000); } release_queue(kgd); @@ -444,6 +603,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, return 0; } +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + lock_srbm(kgd, 0, 0, 0, vmid); + WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); + unlock_srbm(kgd); +} + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; @@ -454,42 +623,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) switch (type) { case KGD_ENGINE_PFP: hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw->data; break; case KGD_ENGINE_ME: hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; + adev->gfx.me_fw->data; break; case KGD_ENGINE_CE: hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; + adev->gfx.ce_fw->data; break; case KGD_ENGINE_MEC1: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; + adev->gfx.mec_fw->data; break; case KGD_ENGINE_MEC2: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw->data; break; case KGD_ENGINE_RLC: hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw->data; break; case KGD_ENGINE_SDMA1: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; + adev->sdma.instance[0].fw->data; break; case KGD_ENGINE_SDMA2: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; + adev->sdma.instance[1].fw->data; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 2fb299afc12b..63ec1e1bb6aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, n = AMDGPU_BENCHMARK_ITERATIONS; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, - NULL, &sobj); + NULL, 0, &sobj); if (r) { goto out_cleanup; } @@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, goto out_cleanup; } r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, - NULL, &dobj); + NULL, 0, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d324e1c24028..59089e027f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { @@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; total_size += amdgpu_bo_size(entry->robj); @@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; - const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; + const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a99e0bca6812..fd435a96481c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, true, domain, flags, NULL, &placement, NULL, - &obj); + 0, &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h r = amdgpu_bo_reserve(obj, true); if (unlikely(r != 0)) return r; - r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, + r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, min_offset, max_offset, mcaddr); amdgpu_bo_unreserve(obj); return r; @@ -659,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); if (CGS_UCODE_ID_CP_MEC == type) - info->image_size = (header->jt_offset) << 2; + info->image_size = le32_to_cpu(header->jt_offset) << 2; info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 33789510e663..269b835571eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, *offset = data->offset; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { amdgpu_bo_unref(&p->uf_entry.robj); @@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } /* get chunks */ - chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks); + chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)(uintptr_t)chunk_array[i]; + chunk_ptr = u64_to_user_ptr(chunk_array[i]); if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; + cdata = u64_to_user_ptr(user_chunk.chunk_data); p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (p->chunks[i].kdata == NULL) { @@ -246,7 +246,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, } total_vram = adev->mc.real_vram_size - adev->vram_pin_size; - used_vram = atomic64_read(&adev->vram_usage); + used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); @@ -292,7 +292,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, /* Do the same for visible VRAM if half of it is free */ if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { u64 total_vis_vram = adev->mc.visible_vram_size; - u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage); + u64 used_vis_vram = + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; @@ -348,11 +349,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, * that. */ if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) - domain = bo->prefered_domains; + domain = bo->preferred_domains; else domain = bo->allowed_domains; } else { - domain = bo->prefered_domains; + domain = bo->preferred_domains; } } else { domain = bo->allowed_domains; @@ -673,10 +674,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } error_free_pages: @@ -724,21 +723,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, + bool backoff) { - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; - if (!error) { - amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); - + if (!error) ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->fence); - } else if (backoff) { + else if (backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_dep_syncobjs; i++) drm_syncobj_put(parser->post_dep_syncobjs[i]); @@ -791,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (amdgpu_sriov_vf(adev)) { struct dma_fence *f; - bo_va = vm->csa_bo_va; + + bo_va = fpriv->csa_va; BUG_ON(!bo_va); r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) @@ -828,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); + r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ @@ -1038,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, { int r; struct dma_fence *fence; - r = drm_syncobj_fence_get(p->filp, handle, &fence); + r = drm_syncobj_find_fence(p->filp, handle, &fence); if (r) return r; @@ -1437,7 +1434,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, if (fences == NULL) return -ENOMEM; - fences_user = (void __user *)(uintptr_t)(wait->in.fences); + fences_user = u64_to_user_ptr(wait->in.fences); if (copy_from_user(fences, fences_user, sizeof(struct drm_amdgpu_fence) * fence_count)) { r = -EFAULT; @@ -1490,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } @@ -1499,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6279956e92a4..1a459ac63df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -336,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->vram_scratch.robj, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vram_scratch.robj); - return r; - } - r = amdgpu_bo_kmap(adev->vram_scratch.robj, - (void **)&adev->vram_scratch.ptr); - if (r) - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - - return r; + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->vram_scratch.robj, + &adev->vram_scratch.gpu_addr, + (void **)&adev->vram_scratch.ptr); } static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->vram_scratch.robj); - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - } - amdgpu_bo_unref(&adev->vram_scratch.robj); + amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } /** @@ -539,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), + /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -570,47 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); - if (offset < adev->wb.num_wb) { - __set_bit(offset, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} -/** - * amdgpu_wb_get_64bit - Allocate a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Allocate a wb slot for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) -{ - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 2, 7, 0); - if ((offset + 1) < adev->wb.num_wb) { + if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); - __set_bit(offset + 1, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} - -int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb) -{ - int i = 0; - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 8, 63, 0); - if ((offset + 7) < adev->wb.num_wb) { - for (i = 0; i < 8; i++) - __set_bit(offset + i, adev->wb.used); - *wb = offset; + *wb = offset * 8; /* convert to dw offset */ return 0; } else { return -EINVAL; @@ -632,39 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_wb_free_64bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) -{ - if ((wb + 1) < adev->wb.num_wb) { - __clear_bit(wb, adev->wb.used); - __clear_bit(wb + 1, adev->wb.used); - } -} - -/** - * amdgpu_wb_free_256bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb) -{ - int i = 0; - - if ((wb + 7) < adev->wb.num_wb) - for (i = 0; i < 8; i++) - __clear_bit(wb + i, adev->wb.used); -} - -/** * amdgpu_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations @@ -1180,6 +1076,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_gtt_size = -1; } + /* valid range is between 4 and 9 inclusive */ + if (amdgpu_vm_fragment_size != -1 && + (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { + dev_warn(adev->dev, "valid range is between 4 and 9\n"); + amdgpu_vm_fragment_size = -1; + } + amdgpu_check_vm_size(adev); amdgpu_check_block_size(adev); @@ -1948,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, - AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index cdf2ab20166a..6ad243293a78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); + drm_gem_object_put_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } @@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5e9ce8a29669..e39ec981b11c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -68,9 +68,10 @@ * - 3.16.0 - Add reserved vmid support * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. * - 3.18.0 - Export gpu always on cu bitmap + * - 3.19.0 - Add support for UVD MJPEG decode */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 18 +#define KMS_DRIVER_MINOR 19 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -94,6 +95,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; int amdgpu_vm_size = -1; +int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; @@ -183,6 +185,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)"); module_param_named(vm_size, amdgpu_vm_size, int, 0444); +MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)"); +module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444); + MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0a8ee2411180..9afa9c097e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) amdgpu_bo_unpin(abo); amdgpu_bo_unreserve(abo); } - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, @@ -250,7 +250,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = abo->kptr; + info->screen_base = amdgpu_bo_kptr(abo); info->screen_size = amdgpu_bo_size(abo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); @@ -280,7 +280,7 @@ out: } if (fb && ret) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 5cc4987cd887..94c1e2e8e34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -144,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->gart.robj); + NULL, NULL, 0, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 917ac5e074a0..7171968f261e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -59,7 +59,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, - flags, NULL, NULL, &robj); + flags, NULL, NULL, 0, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); @@ -225,9 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED| - AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) + AMDGPU_GEM_CREATE_VRAM_CLEARED)) return -EINVAL; /* reject invalid gem domains */ @@ -263,7 +261,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -306,7 +304,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, return r; bo = gem_to_amdgpu_bo(gobj); - bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); if (r) @@ -341,7 +339,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -355,7 +353,7 @@ unlock_mmap_sem: up_read(¤t->mm->mmap_sem); release_object: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -374,11 +372,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return -EPERM; } *offset_p = amdgpu_bo_mmap_offset(robj); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return 0; } @@ -448,7 +446,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } else r = ret; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -491,7 +489,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, unreserve: amdgpu_bo_unreserve(robj); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -623,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; @@ -643,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; @@ -664,7 +662,7 @@ error_backoff: ttm_eu_backoff_reservation(&ticket, &list); error_unref: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -689,11 +687,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { struct drm_amdgpu_gem_create_in info; - void __user *out = (void __user *)(uintptr_t)args->value; + void __user *out = u64_to_user_ptr(args->value); info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; - info.domains = robj->prefered_domains; + info.domains = robj->preferred_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) @@ -711,10 +709,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, amdgpu_bo_unreserve(robj); break; } - robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | + robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU); - robj->allowed_domains = robj->prefered_domains; + robj->allowed_domains = robj->preferred_domains; if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -726,7 +724,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -754,7 +752,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 5e6b90c6794f..9e05e257729f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -28,7 +28,7 @@ struct amdgpu_gtt_mgr { struct drm_mm mm; spinlock_t lock; - uint64_t available; + atomic64_t available; }; /** @@ -54,7 +54,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, size = (adev->mc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); - mgr->available = p_size; + atomic64_set(&mgr->available, p_size); man->priv = mgr; return 0; } @@ -153,15 +153,6 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, return r; } -void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_gtt_mgr *mgr = man->priv; - - seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", - man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20); - -} /** * amdgpu_gtt_mgr_new - allocate a new node * @@ -182,11 +173,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, int r; spin_lock(&mgr->lock); - if (mgr->available < mem->num_pages) { + if (atomic64_read(&mgr->available) < mem->num_pages) { spin_unlock(&mgr->lock); return 0; } - mgr->available -= mem->num_pages; + atomic64_sub(mem->num_pages, &mgr->available); spin_unlock(&mgr->lock); node = kzalloc(sizeof(*node), GFP_KERNEL); @@ -213,9 +204,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, return 0; err_out: - spin_lock(&mgr->lock); - mgr->available += mem->num_pages; - spin_unlock(&mgr->lock); + atomic64_add(mem->num_pages, &mgr->available); return r; } @@ -242,30 +231,47 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, spin_lock(&mgr->lock); if (node->start != AMDGPU_BO_INVALID_OFFSET) drm_mm_remove_node(node); - mgr->available += mem->num_pages; spin_unlock(&mgr->lock); + atomic64_add(mem->num_pages, &mgr->available); kfree(node); mem->mm_node = NULL; } /** + * amdgpu_gtt_mgr_usage - return usage of GTT domain + * + * @man: TTM memory type manager + * + * Return how many bytes are used in the GTT domain + */ +uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + + return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE; +} + +/** * amdgpu_gtt_mgr_debug - dump VRAM table * * @man: TTM memory type manager - * @prefix: text prefix + * @printer: DRM printer to use * * Dump the table content using printk. */ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, - const char *prefix) + struct drm_printer *printer) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_printer p = drm_debug_printer(prefix); spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, &p); + drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); + + drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", + man->size, (u64)atomic64_read(&mgr->available), + amdgpu_gtt_mgr_usage(man) >> 20); } const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09f833255ba1..e16229000a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - amdgpu_amdkfd_load_interface(adev); amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); @@ -456,13 +455,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = atomic64_read(&adev->vram_usage); + ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: - ui64 = atomic64_read(&adev->vram_vis_usage); + ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = atomic64_read(&adev->gtt_usage); + ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -498,7 +497,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file mem.vram.total_heap_size = adev->mc.real_vram_size; mem.vram.usable_heap_size = adev->mc.real_vram_size - adev->vram_pin_size; - mem.vram.heap_usage = atomic64_read(&adev->vram_usage); + mem.vram.heap_usage = + amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -507,7 +507,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file adev->mc.visible_vram_size - (adev->vram_pin_size - adev->invisible_pin_size); mem.cpu_accessible_vram.heap_usage = - atomic64_read(&adev->vram_vis_usage); + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; @@ -515,7 +515,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - adev->gart_pin_size; - mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); + mem.gtt.heap_usage = + amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, @@ -589,11 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = - (1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) * - AMDGPU_GPU_PAGE_SIZE; + dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; - dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; @@ -842,7 +840,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } if (amdgpu_sriov_vf(adev)) { - r = amdgpu_map_static_csa(adev, &fpriv->vm); + r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); if (r) goto out_suspend; } @@ -895,8 +893,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); - fpriv->vm.csa_bo_va = NULL; + amdgpu_vm_bo_rmv(adev, fpriv->csa_va); + fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727b..6558a3ed57a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) head = bo->mn_list.next; bo->mn = NULL; - list_del(&bo->mn_list); + list_del_init(&bo->mn_list); if (list_empty(head)) { struct amdgpu_mn_node *node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3ec43cf9ad78..e7e899190bef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -37,55 +37,6 @@ #include "amdgpu.h" #include "amdgpu_trace.h" - - -static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) -{ - if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) - return 0; - - return ((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; -} - -static void amdgpu_update_memory_usage(struct amdgpu_device *adev, - struct ttm_mem_reg *old_mem, - struct ttm_mem_reg *new_mem) -{ - u64 vis_size; - if (!adev) - return; - - if (new_mem) { - switch (new_mem->mem_type) { - case TTM_PL_TT: - atomic64_add(new_mem->size, &adev->gtt_usage); - break; - case TTM_PL_VRAM: - atomic64_add(new_mem->size, &adev->vram_usage); - vis_size = amdgpu_get_vis_part_size(adev, new_mem); - atomic64_add(vis_size, &adev->vram_vis_usage); - break; - } - } - - if (old_mem) { - switch (old_mem->mem_type) { - case TTM_PL_TT: - atomic64_sub(old_mem->size, &adev->gtt_usage); - break; - case TTM_PL_VRAM: - atomic64_sub(old_mem->size, &adev->vram_usage); - vis_size = amdgpu_get_vis_part_size(adev, old_mem); - atomic64_sub(vis_size, &adev->vram_vis_usage); - break; - } - } -} - static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); @@ -94,7 +45,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct amdgpu_bo, tbo); amdgpu_bo_kunmap(bo); - amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); @@ -220,7 +170,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, } /** - * amdgpu_bo_create_kernel - create BO for kernel use + * amdgpu_bo_create_reserved - create reserved BO for kernel use * * @adev: amdgpu device object * @size: size for the new BO @@ -230,24 +180,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, * @gpu_addr: GPU addr of the pinned BO * @cpu_addr: optional CPU address mapping * - * Allocates and pins a BO for kernel internal use. + * Allocates and pins a BO for kernel internal use, and returns it still + * reserved. * * Returns 0 on success, negative error code otherwise. */ -int amdgpu_bo_create_kernel(struct amdgpu_device *adev, - unsigned long size, int align, - u32 domain, struct amdgpu_bo **bo_ptr, - u64 *gpu_addr, void **cpu_addr) +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) { + bool free = false; int r; - r = amdgpu_bo_create(adev, size, align, true, domain, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo_ptr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); - return r; + if (!*bo_ptr) { + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, + NULL, NULL, 0, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", + r); + return r; + } + free = true; } r = amdgpu_bo_reserve(*bo_ptr, false); @@ -270,20 +226,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } } - amdgpu_bo_unreserve(*bo_ptr); - return 0; error_unreserve: amdgpu_bo_unreserve(*bo_ptr); error_free: - amdgpu_bo_unref(bo_ptr); + if (free) + amdgpu_bo_unref(bo_ptr); return r; } /** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, + gpu_addr, cpu_addr); + + if (r) + return r; + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -318,6 +306,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; @@ -352,13 +341,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU | AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA); - bo->allowed_domains = bo->prefered_domains; + bo->allowed_domains = bo->preferred_domains; if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -418,7 +407,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); if (unlikely(r)) goto fail_unreserve; @@ -470,6 +459,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &placement, bo->tbo.resv, + 0, &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); @@ -481,11 +471,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, return r; } +/* init_value will only take effect when flags contains + * AMDGPU_GEM_CREATE_VRAM_CLEARED. + */ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; @@ -500,7 +494,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, flags, sg, &placement, - resv, bo_ptr); + resv, init_value, bo_ptr); if (r) return r; @@ -562,7 +556,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) if (bo->pin_count) return 0; - domain = bo->prefered_domains; + domain = bo->preferred_domains; retry: amdgpu_ttm_placement_from_domain(bo, domain); @@ -609,16 +603,16 @@ err: int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) { - bool is_iomem; + void *kptr; long r; if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; - if (bo->kptr) { - if (ptr) { - *ptr = bo->kptr; - } + kptr = amdgpu_bo_kptr(bo); + if (kptr) { + if (ptr) + *ptr = kptr; return 0; } @@ -631,19 +625,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r) return r; - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) - *ptr = bo->kptr; + *ptr = amdgpu_bo_kptr(bo); return 0; } +void *amdgpu_bo_kptr(struct amdgpu_bo *bo) +{ + bool is_iomem; + + return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); +} + void amdgpu_bo_kunmap(struct amdgpu_bo *bo) { - if (bo->kptr == NULL) - return; - bo->kptr = NULL; - ttm_bo_kunmap(&bo->kmap); + if (bo->kmap.bo) + ttm_bo_kunmap(&bo->kmap); } struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) @@ -944,8 +942,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; /* move_notify is called before move happens */ - amdgpu_update_memory_usage(adev, &bo->mem, new_mem); - trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 833b172a2c2a..a288fa6d72c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -33,6 +33,61 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +/* bo virtual addresses in a vm */ +struct amdgpu_bo_va_mapping { + struct list_head list; + struct rb_node rb; + uint64_t start; + uint64_t last; + uint64_t __subtree_last; + uint64_t offset; + uint64_t flags; +}; + +/* User space allocated BO in a VM */ +struct amdgpu_bo_va { + struct amdgpu_vm_bo_base base; + + /* protected by bo being reserved */ + struct dma_fence *last_pt_update; + unsigned ref_count; + + /* mappings for this bo_va */ + struct list_head invalids; + struct list_head valids; +}; + +struct amdgpu_bo { + /* Protected by tbo.reserved */ + u32 preferred_domains; + u32 allowed_domains; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + struct ttm_placement placement; + struct ttm_buffer_object tbo; + struct ttm_bo_kmap_obj kmap; + u64 flags; + unsigned pin_count; + u64 tiling_flags; + u64 metadata_flags; + void *metadata; + u32 metadata_size; + unsigned prime_shared_count; + /* list of all virtual address to which this bo is associated to */ + struct list_head va; + /* Constant after initialization */ + struct drm_gem_object gem_base; + struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; + + struct ttm_bo_kmap_obj dma_buf_vmap; + struct amdgpu_mn *mn; + + union { + struct list_head mn_list; + struct list_head shadow_list; + }; +}; + /** * amdgpu_mem_type_to_domain - return domain corresponding to mem_type * @mem_type: ttm memory type @@ -132,6 +187,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, @@ -139,7 +195,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, @@ -147,6 +208,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); +void *amdgpu_bo_kptr(struct amdgpu_bo *bo); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); void amdgpu_bo_unref(struct amdgpu_bo **bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index c19c4d138751..f21a7716b90e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -30,6 +30,7 @@ struct cg_flag_name const char *name; }; +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 6bdc866570ab..5b3f92891f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 15b7149d1204..6c5646b48d1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -184,47 +184,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - if (ring->funcs->support_64bit_ptrs) { - r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } - - } else { - r = amdgpu_wb_get(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } - + r = amdgpu_wb_get(adev, &ring->rptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); + return r; } - if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) { - r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_wb_get(adev, &ring->wptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); + return r; + } - } else { - r = amdgpu_wb_get(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_wb_get(adev, &ring->fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); + return r; } r = amdgpu_wb_get(adev, &ring->cond_exe_offs); @@ -286,19 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) { ring->ready = false; - if (ring->funcs->support_64bit_ptrs) { - amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs); - amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs); - } else { - amdgpu_wb_free(ring->adev, ring->rptr_offs); - amdgpu_wb_free(ring->adev, ring->wptr_offs); - } + /* Not to finish a ring which is not initialized */ + if (!(ring->adev) || !(ring->adev->rings[ring->idx])) + return; + + amdgpu_wb_free(ring->adev, ring->rptr_offs); + amdgpu_wb_free(ring->adev, ring->wptr_offs); amdgpu_wb_free(ring->adev, ring->cond_exe_offs); - if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) - amdgpu_wb_free_256bit(ring->adev, ring->fence_offs); - else - amdgpu_wb_free(ring->adev, ring->fence_offs); + amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 5ca75a456ad2..3144400435b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); r = amdgpu_bo_create(adev, size, align, true, domain, - 0, NULL, NULL, &sa_manager->bo); + 0, NULL, NULL, 0, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6899180b265..c586f44312f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *f = e->fence; struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + if (dma_fence_is_signaled(f)) { + hash_del(&e->node); + dma_fence_put(f); + kmem_cache_free(amdgpu_sync_slab, e); + continue; + } if (ring && s_fence) { /* For fences from the same ring it is sufficient * when they are scheduled. @@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, } } - if (dma_fence_is_signaled(f)) { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - return f; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 3c4d7574d704..ed8c3739015b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, NULL, &vram_obj); + NULL, NULL, 0, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - NULL, gtt_obj + i); + NULL, 0, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 509f7a63d40c..1c88bd5e29ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -14,6 +14,62 @@ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) +TRACE_EVENT(amdgpu_ttm_tt_populate, + TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), + TP_ARGS(adev, dma_address, phys_address), + TP_STRUCT__entry( + __field(uint16_t, domain) + __field(uint8_t, bus) + __field(uint8_t, slot) + __field(uint8_t, func) + __field(uint64_t, dma) + __field(uint64_t, phys) + ), + TP_fast_assign( + __entry->domain = pci_domain_nr(adev->pdev->bus); + __entry->bus = adev->pdev->bus->number; + __entry->slot = PCI_SLOT(adev->pdev->devfn); + __entry->func = PCI_FUNC(adev->pdev->devfn); + __entry->dma = dma_address; + __entry->phys = phys_address; + ), + TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", + (unsigned)__entry->domain, + (unsigned)__entry->bus, + (unsigned)__entry->slot, + (unsigned)__entry->func, + (unsigned long long)__entry->dma, + (unsigned long long)__entry->phys) +); + +TRACE_EVENT(amdgpu_ttm_tt_unpopulate, + TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), + TP_ARGS(adev, dma_address, phys_address), + TP_STRUCT__entry( + __field(uint16_t, domain) + __field(uint8_t, bus) + __field(uint8_t, slot) + __field(uint8_t, func) + __field(uint64_t, dma) + __field(uint64_t, phys) + ), + TP_fast_assign( + __entry->domain = pci_domain_nr(adev->pdev->bus); + __entry->bus = adev->pdev->bus->number; + __entry->slot = PCI_SLOT(adev->pdev->devfn); + __entry->func = PCI_FUNC(adev->pdev->devfn); + __entry->dma = dma_address; + __entry->phys = phys_address; + ), + TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", + (unsigned)__entry->domain, + (unsigned)__entry->bus, + (unsigned)__entry->slot, + (unsigned)__entry->func, + (unsigned long long)__entry->dma, + (unsigned long long)__entry->phys) +); + TRACE_EVENT(amdgpu_mm_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), @@ -105,12 +161,12 @@ TRACE_EVENT(amdgpu_bo_create, __entry->bo = bo; __entry->pages = bo->tbo.num_pages; __entry->type = bo->tbo.mem.mem_type; - __entry->prefer = bo->prefered_domains; + __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d", + TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d", __entry->bo, __entry->pages, __entry->type, __entry->prefer, __entry->allow, __entry->visible) ); @@ -228,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, ), TP_fast_assign( - __entry->bo = bo_va ? bo_va->bo : NULL; + __entry->bo = bo_va ? bo_va->base.bo : NULL; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; @@ -252,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, ), TP_fast_assign( - __entry->bo = bo_va->bo; + __entry->bo = bo_va->base.bo; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e6f9a54c959d..8b2c294f6f79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -43,6 +43,7 @@ #include <linux/pagemap.h> #include <linux/debugfs.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "bif/bif_4_1_d.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) @@ -662,6 +663,38 @@ release_pages: return r; } +static void amdgpu_trace_dma_map(struct ttm_tt *ttm) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned i; + + if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) { + for (i = 0; i < ttm->num_pages; i++) { + trace_amdgpu_ttm_tt_populate( + adev, + gtt->ttm.dma_address[i], + page_to_phys(ttm->pages[i])); + } + } +} + +static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned i; + + if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) { + for (i = 0; i < ttm->num_pages; i++) { + trace_amdgpu_ttm_tt_unpopulate( + adev, + gtt->ttm.dma_address[i], + page_to_phys(ttm->pages[i])); + } + } +} + /* prepare the sg table with the user pages */ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) { @@ -688,6 +721,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); + amdgpu_trace_dma_map(ttm); + return 0; release_sg: @@ -721,6 +756,8 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) put_page(page); } + amdgpu_trace_dma_unmap(ttm); + sg_free_table(ttm->sg); } @@ -753,7 +790,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; - int r; + int r = 0; if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(ttm); @@ -892,7 +929,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) { - struct amdgpu_device *adev; + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned i; int r; @@ -915,14 +952,14 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; - return 0; + r = 0; + goto trace_mappings; } - adev = amdgpu_ttm_adev(ttm->bdev); - #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev); + r = ttm_dma_populate(>t->ttm, adev->dev); + goto trace_mappings; } #endif @@ -945,7 +982,12 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) return -EFAULT; } } - return 0; + + r = 0; +trace_mappings: + if (likely(!r)) + amdgpu_trace_dma_map(ttm); + return r; } static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) @@ -966,6 +1008,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) adev = amdgpu_ttm_adev(ttm->bdev); + amdgpu_trace_dma_unmap(ttm); + #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(>t->ttm, adev->dev); @@ -1232,23 +1276,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); - r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->stollen_vga_memory); - if (r) { - return r; - } - r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); + r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->stolen_vga_memory, + NULL, NULL); if (r) return r; - r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); - amdgpu_bo_unreserve(adev->stollen_vga_memory); - if (r) { - amdgpu_bo_unref(&adev->stollen_vga_memory); - return r; - } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); @@ -1319,13 +1352,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; amdgpu_ttm_debugfs_fini(adev); - if (adev->stollen_vga_memory) { - r = amdgpu_bo_reserve(adev->stollen_vga_memory, true); + if (adev->stolen_vga_memory) { + r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); if (r == 0) { - amdgpu_bo_unpin(adev->stollen_vga_memory); - amdgpu_bo_unreserve(adev->stollen_vga_memory); + amdgpu_bo_unpin(adev->stolen_vga_memory); + amdgpu_bo_unreserve(adev->stolen_vga_memory); } - amdgpu_bo_unref(&adev->stollen_vga_memory); + amdgpu_bo_unref(&adev->stolen_vga_memory); } ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); @@ -1509,11 +1542,12 @@ error_free: } int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -1545,7 +1579,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, num_pages -= mm_node->size; ++mm_node; } - num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* 10 double words for each SDMA_OP_PTEPDE cmd */ + num_dw = num_loops * 10; /* for IB padding */ num_dw += 64; @@ -1570,12 +1606,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint64_t dst_addr; + WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); + dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); while (byte_count) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); + amdgpu_vm_set_pte_pde(adev, &job->ibs[0], + dst_addr, 0, + cur_size_in_bytes >> 3, 0, + src_data); dst_addr += cur_size_in_bytes; byte_count -= cur_size_in_bytes; @@ -1601,32 +1641,16 @@ error_free: #if defined(CONFIG_DEBUG_FS) -extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager - *man); static int amdgpu_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; unsigned ttm_pl = *(int *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; - struct ttm_bo_global *glob = adev->mman.bdev.glob; + struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&glob->lru_lock); - drm_mm_print(mm, &p); - spin_unlock(&glob->lru_lock); - switch (ttm_pl) { - case TTM_PL_VRAM: - seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", - adev->mman.bdev.man[ttm_pl].size, - (u64)atomic64_read(&adev->vram_usage) >> 20, - (u64)atomic64_read(&adev->vram_vis_usage) >> 20); - break; - case TTM_PL_TT: - amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]); - break; - } + man->func->debug(man, &p); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f137c2458ee8..f22a4758719d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -66,6 +66,10 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); + +uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, @@ -73,7 +77,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush); int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index fcfb9d4f7477..36c763310df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -358,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, (le32_to_cpu(header->jt_offset) * 4); memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); - ucode->ucode_size += le32_to_cpu(header->jt_size) * 4; - return 0; } @@ -381,7 +379,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo); + NULL, NULL, 0, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); goto failed; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2ca09f111f08..e19928dae8e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -588,6 +588,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, } break; + case 8: /* MJPEG */ + min_dpb_size = 0; + break; + case 16: /* H265 */ image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; image_size = ALIGN(image_size, 256); @@ -1051,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -1101,7 +1105,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b692ad402252..c855366521ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r, timeout = adev->usec_timeout; - /* workaround VCE ring test slow issue for sriov*/ + /* skip ring test for sriov*/ if (amdgpu_sriov_vf(adev)) - timeout *= 10; + return 0; r = amdgpu_ring_alloc(ring, 16); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 09190fadd228..041e0121590c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (fences == 0) { if (adev->pm.dpm_enabled) { + /* might be used when with pg/cg amdgpu_dpm_enable_uvd(adev, false); - } else { - amdgpu_asic_set_uvd_clocks(adev, 0, 0); + */ } } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); @@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); - if (set_clocks) { - if (adev->pm.dpm_enabled) { - amdgpu_dpm_enable_uvd(adev, true); - } else { - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - } + if (set_clocks && adev->pm.dpm_enabled) { + /* might be used when with pg/cg + amdgpu_dpm_enable_uvd(adev, true); + */ } } @@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 8a081e162d13..ab05121b9272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) * address within META_DATA init package to support SRIOV gfx preemption. */ -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va) { - int r; - struct amdgpu_bo_va *bo_va; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; struct ttm_validate_buffer csa_tv; + int r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); @@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) return r; } - bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); - if (!bo_va) { + *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + if (!*bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, - AMDGPU_CSA_SIZE); + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - vm->csa_bo_va = bo_va; ttm_eu_backoff_reservation(&ticket, &list); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index e5b1baf387c1..afcfb8bcfb65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -90,7 +90,8 @@ static inline bool is_virtual_machine(void) struct amdgpu_vm; int amdgpu_allocate_static_csa(struct amdgpu_device *adev); -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 250c8e80e646..6b1343e5541d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -159,11 +159,20 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, */ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, int (*validate)(void *, struct amdgpu_bo *), - void *param, bool use_cpu_for_update) + void *param, bool use_cpu_for_update, + struct ttm_bo_global *glob) { unsigned i; int r; + if (parent->bo->shadow) { + struct amdgpu_bo *shadow = parent->bo->shadow; + + r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); + if (r) + return r; + } + if (use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, NULL); if (r) @@ -183,12 +192,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, if (r) return r; + spin_lock(&glob->lru_lock); + ttm_bo_move_to_lru_tail(&entry->bo->tbo); + if (entry->bo->shadow) + ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); + spin_unlock(&glob->lru_lock); + /* * Recurse into the sub directory. This is harmless because we * have only a maximum of 5 layers. */ r = amdgpu_vm_validate_level(entry, validate, param, - use_cpu_for_update); + use_cpu_for_update, glob); if (r) return r; } @@ -220,54 +235,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; return amdgpu_vm_validate_level(&vm->root, validate, param, - vm->use_cpu_for_update); + vm->use_cpu_for_update, + adev->mman.bdev.glob); } /** - * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent) -{ - unsigned i; - - if (!parent->entries) - return; - - for (i = 0; i <= parent->last_entry_used; ++i) { - struct amdgpu_vm_pt *entry = &parent->entries[i]; - - if (!entry->bo) - continue; - - ttm_bo_move_to_lru_tail(&entry->bo->tbo); - amdgpu_vm_move_level_in_lru(entry); - } -} - -/** - * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ - struct ttm_bo_global *glob = adev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - amdgpu_vm_move_level_in_lru(&vm->root); - spin_unlock(&glob->lru_lock); -} - - /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * * @adev: amdgpu_device pointer @@ -288,6 +260,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned pt_idx, from, to; int r; u64 flags; + uint64_t init_value = 0; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); @@ -321,6 +294,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); + if (vm->pte_support_ats) { + init_value = AMDGPU_PTE_SYSTEM; + if (level != adev->vm_manager.num_level - 1) + init_value |= AMDGPU_PDE_PTE; + } + /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.bo->tbo.resv; @@ -333,7 +312,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, resv, &pt); + NULL, resv, init_value, &pt); if (r) return r; @@ -352,7 +331,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->bo = pt; entry->addr = 0; - entry->huge_page = false; } if (level < adev->vm_manager.num_level) { @@ -892,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, { struct amdgpu_bo_va *bo_va; - list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->vm == vm) { + list_for_each_entry(bo_va, &bo->va, base.bo_list) { + if (bo_va->base.vm == vm) { return bo_va; } } @@ -1060,18 +1038,13 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, shadow = parent->bo->shadow; if (vm->use_cpu_for_update) { - pd_addr = (unsigned long)parent->bo->kptr; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; params.func = amdgpu_vm_cpu_set_ptes; } else { - if (shadow) { - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); @@ -1107,22 +1080,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (bo == NULL) continue; - if (bo->shadow) { - struct amdgpu_bo *pt_shadow = bo->shadow; - - r = amdgpu_ttm_bind(&pt_shadow->tbo, - &pt_shadow->tbo.mem); - if (r) - return r; - } - pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); - if (parent->entries[pt_idx].addr == pt || - parent->entries[pt_idx].huge_page) + /* Don't update huge pages here */ + if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) || + parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID)) continue; - parent->entries[pt_idx].addr = pt; + parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || @@ -1300,15 +1265,14 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, * * Check if we can update the PD with a huge page. */ -static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, - struct amdgpu_vm_pt *entry, - struct amdgpu_vm_pt *parent, - unsigned nptes, uint64_t dst, - uint64_t flags) +static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, + struct amdgpu_vm_pt *entry, + struct amdgpu_vm_pt *parent, + unsigned nptes, uint64_t dst, + uint64_t flags) { bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); uint64_t pd_addr, pde; - int r; /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type < CHIP_VEGA10 || @@ -1320,21 +1284,17 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, dst = amdgpu_gart_get_vm_pde(p->adev, dst); flags = AMDGPU_PTE_VALID; } else { + /* Set the huge page flag to stop scanning at this PDE */ flags |= AMDGPU_PDE_PTE; } - if (entry->addr == dst && - entry->huge_page == !!(flags & AMDGPU_PDE_PTE)) - return 0; + if (entry->addr == (dst | flags)) + return; - entry->addr = dst; - entry->huge_page = !!(flags & AMDGPU_PDE_PTE); + entry->addr = (dst | flags); if (use_cpu_update) { - r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr); - if (r) - return r; - + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); } else { @@ -1347,8 +1307,6 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); } - - return 0; } /** @@ -1375,7 +1333,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *pt; unsigned nptes; bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); - int r; /* walk over the address space and update the page tables */ for (addr = start; addr < end; addr += nptes, @@ -1391,17 +1348,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); - r = amdgpu_vm_handle_huge_pages(params, entry, parent, - nptes, dst, flags); - if (r) - return r; - - if (entry->huge_page) + amdgpu_vm_handle_huge_pages(params, entry, parent, + nptes, dst, flags); + /* We don't need to update PTEs for huge pages */ + if (entry->addr & AMDGPU_PDE_PTE) continue; pt = entry->bo; if (use_cpu_update) { - pe_start = (unsigned long)pt->kptr; + pe_start = (unsigned long)amdgpu_bo_kptr(pt); } else { if (pt->shadow) { pe_start = amdgpu_bo_gpu_offset(pt->shadow); @@ -1455,9 +1410,7 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. */ - - /* SI and newer are optimized for 64KB */ - unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev); + unsigned pages_per_frag = params->adev->vm_manager.fragment_size; uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); uint64_t frag_align = 1 << pages_per_frag; @@ -1771,7 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint64_t gtt_flags, flags; @@ -1780,27 +1734,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct dma_fence *exclusive; int r; - if (clear || !bo_va->bo) { + if (clear || !bo_va->base.bo) { mem = NULL; nodes = NULL; exclusive = NULL; } else { struct ttm_dma_tt *ttm; - mem = &bo_va->bo->tbo.mem; + mem = &bo_va->base.bo->tbo.mem; nodes = mem->mm_node; if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo_va->bo->tbo.ttm, struct - ttm_dma_tt, ttm); + ttm = container_of(bo_va->base.bo->tbo.ttm, + struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); + exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo_va->bo) { - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? + if (bo) { + flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); + gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && + adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? flags : 0; } else { flags = 0x0; @@ -1808,7 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->vm_status)) + if (!list_empty(&bo_va->base.vm_status)) list_splice_init(&bo_va->valids, &bo_va->invalids); spin_unlock(&vm->status_lock); @@ -1831,9 +1785,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); - list_del_init(&bo_va->vm_status); + list_del_init(&bo_va->base.vm_status); if (clear) - list_add(&bo_va->vm_status, &vm->cleared); + list_add(&bo_va->base.vm_status, &vm->cleared); spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { @@ -1995,15 +1949,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct dma_fence *f = NULL; int r; + uint64_t init_pte_value = 0; while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); + if (vm->pte_support_ats) + init_pte_value = AMDGPU_PTE_SYSTEM; + r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, mapping->start, mapping->last, - 0, 0, &f); + init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2023,26 +1981,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, } /** - * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT + * amdgpu_vm_clear_moved - clear moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm * - * Make sure all invalidated BOs are cleared in the PT. + * Make sure all moved BOs are cleared in the PT. * Returns 0 for success. * * PTs have to be reserved and mutex must be locked! */ -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, - struct amdgpu_vm *vm, struct amdgpu_sync *sync) +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; int r = 0; spin_lock(&vm->status_lock); - while (!list_empty(&vm->invalidated)) { - bo_va = list_first_entry(&vm->invalidated, - struct amdgpu_bo_va, vm_status); + while (!list_empty(&vm->moved)) { + bo_va = list_first_entry(&vm->moved, + struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); r = amdgpu_vm_bo_update(adev, bo_va, true); @@ -2082,16 +2040,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, if (bo_va == NULL) { return NULL; } - bo_va->vm = vm; - bo_va->bo = bo; + bo_va->base.vm = vm; + bo_va->base.bo = bo; + INIT_LIST_HEAD(&bo_va->base.bo_list); + INIT_LIST_HEAD(&bo_va->base.vm_status); + bo_va->ref_count = 1; - INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - INIT_LIST_HEAD(&bo_va->vm_status); if (bo) - list_add_tail(&bo_va->bo_list, &bo->va); + list_add_tail(&bo_va->base.bo_list, &bo->va); return bo_va; } @@ -2116,7 +2075,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping, *tmp; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; /* validate the parameters */ @@ -2127,7 +2087,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2137,7 +2097,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " - "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, + "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, tmp->start, tmp->last + 1); return -EINVAL; } @@ -2182,7 +2142,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; int r; @@ -2194,7 +2155,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; /* Allocate all the needed memory */ @@ -2202,7 +2163,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, if (!mapping) return -ENOMEM; - r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); + r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); if (r) { kfree(mapping); return r; @@ -2242,7 +2203,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t saddr) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2390,12 +2351,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va) { struct amdgpu_bo_va_mapping *mapping, *next; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; - list_del(&bo_va->bo_list); + list_del(&bo_va->base.bo_list); spin_lock(&vm->status_lock); - list_del(&bo_va->vm_status); + list_del(&bo_va->base.vm_status); spin_unlock(&vm->status_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { @@ -2427,13 +2388,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) { - struct amdgpu_bo_va *bo_va; + struct amdgpu_vm_bo_base *bo_base; - list_for_each_entry(bo_va, &bo->va, bo_list) { - spin_lock(&bo_va->vm->status_lock); - if (list_empty(&bo_va->vm_status)) - list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - spin_unlock(&bo_va->vm->status_lock); + list_for_each_entry(bo_base, &bo->va, bo_list) { + spin_lock(&bo_base->vm->status_lock); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, + &bo_base->vm->moved); + spin_unlock(&bo_base->vm->status_lock); } } @@ -2451,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) } /** - * amdgpu_vm_adjust_size - adjust vm size and block size + * amdgpu_vm_set_fragment_size - adjust fragment size in PTE + * + * @adev: amdgpu_device pointer + * @fragment_size_default: the default fragment size if it's set auto + */ +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default) +{ + if (amdgpu_vm_fragment_size == -1) + adev->vm_manager.fragment_size = fragment_size_default; + else + adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; +} + +/** + * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer * @vm_size: the default vm size if it's set auto */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default) { /* adjust vm size firstly */ if (amdgpu_vm_size == -1) @@ -2471,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) else adev->vm_manager.block_size = amdgpu_vm_block_size; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, adev->vm_manager.block_size); + amdgpu_vm_set_fragment_size(adev, fragment_size_default); + + DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n", + adev->vm_manager.vm_size, adev->vm_manager.block_size, + adev->vm_manager.fragment_size); } /** @@ -2494,13 +2473,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amd_sched_rq *rq; int r, i; u64 flags; + uint64_t init_pde_value = 0; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); - INIT_LIST_HEAD(&vm->invalidated); + INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); @@ -2515,10 +2495,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + vm->pte_support_ats = false; + + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - else + + if (adev->asic_type == CHIP_RAVEN) { + vm->pte_support_ats = true; + init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; + } + } else vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); DRM_DEBUG_DRIVER("VM update mode is %s\n", @@ -2538,7 +2525,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.bo); if (r) goto error_free_sched_entity; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 34d9174ebff2..ba6691b58ee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -50,11 +50,6 @@ struct amdgpu_bo_list_entry; /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -/* LOG2 number of continuous pages for the fragment field */ -#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \ - ((adev)->asic_type < CHIP_VEGA10 ? 4 : \ - (adev)->vm_manager.block_size) - #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) #define AMDGPU_PTE_SNOOPED (1ULL << 2) @@ -99,11 +94,22 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) +/* base structure for tracking BO usage in a VM */ +struct amdgpu_vm_bo_base { + /* constant after initialization */ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; + + /* protected by bo being reserved */ + struct list_head bo_list; + + /* protected by spinlock */ + struct list_head vm_status; +}; struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; - bool huge_page; /* array of page tables, one for each directory entry */ struct amdgpu_vm_pt *entries; @@ -118,7 +124,7 @@ struct amdgpu_vm { spinlock_t status_lock; /* BOs moved, but not yet updated in the PT */ - struct list_head invalidated; + struct list_head moved; /* BOs cleared in the PT because of a move */ struct list_head cleared; @@ -141,11 +147,12 @@ struct amdgpu_vm { u64 client_id; /* dedicated to vm */ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; - /* each VM will map on CSA */ - struct amdgpu_bo_va *csa_bo_va; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; + + /* Flag to indicate ATS support from PTE for GFX9 */ + bool pte_support_ats; }; struct amdgpu_vm_id { @@ -188,6 +195,7 @@ struct amdgpu_vm_manager { uint32_t num_level; uint64_t vm_size; uint32_t block_size; + uint32_t fragment_size; /* vram base address for page table entry */ u64 vram_base_offset; /* vm pte handling */ @@ -220,8 +228,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm); int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); @@ -237,8 +243,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync); +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); @@ -265,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, uint64_t saddr, uint64_t size); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, + uint32_t fragment_size_default); +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, + uint32_t fragment_size_default); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a2c59a08b2bd..26e900627971 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -28,6 +28,8 @@ struct amdgpu_vram_mgr { struct drm_mm mm; spinlock_t lock; + atomic64_t usage; + atomic64_t vis_usage; }; /** @@ -79,6 +81,27 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) } /** + * amdgpu_vram_mgr_vis_size - Calculate visible node size + * + * @adev: amdgpu device structure + * @node: MM node structure + * + * Calculate how many bytes of the MM node are inside visible VRAM + */ +static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, + struct drm_mm_node *node) +{ + uint64_t start = node->start << PAGE_SHIFT; + uint64_t end = (node->size + node->start) << PAGE_SHIFT; + + if (start >= adev->mc.visible_vram_size) + return 0; + + return (end > adev->mc.visible_vram_size ? + adev->mc.visible_vram_size : end) - start; +} + +/** * amdgpu_vram_mgr_new - allocate new ranges * * @man: TTM memory type manager @@ -93,11 +116,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm *mm = &mgr->mm; struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; + uint64_t usage = 0, vis_usage = 0; unsigned i; int r; @@ -142,6 +167,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (unlikely(r)) goto error; + usage += nodes[i].size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); + /* Calculate a virtual BO start address to easily check if * everything is CPU accessible. */ @@ -155,6 +183,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, } spin_unlock(&mgr->lock); + atomic64_add(usage, &mgr->usage); + atomic64_add(vis_usage, &mgr->vis_usage); + mem->mm_node = nodes; return 0; @@ -181,8 +212,10 @@ error: static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm_node *nodes = mem->mm_node; + uint64_t usage = 0, vis_usage = 0; unsigned pages = mem->num_pages; if (!mem->mm_node) @@ -192,31 +225,67 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, while (pages) { pages -= nodes->size; drm_mm_remove_node(nodes); + usage += nodes->size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); ++nodes; } spin_unlock(&mgr->lock); + atomic64_sub(usage, &mgr->usage); + atomic64_sub(vis_usage, &mgr->vis_usage); + kfree(mem->mm_node); mem->mm_node = NULL; } /** + * amdgpu_vram_mgr_usage - how many bytes are used in this domain + * + * @man: TTM memory type manager + * + * Returns how many bytes are used in this domain. + */ +uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_vram_mgr *mgr = man->priv; + + return atomic64_read(&mgr->usage); +} + +/** + * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part + * + * @man: TTM memory type manager + * + * Returns how many bytes are used in the visible part of VRAM + */ +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_vram_mgr *mgr = man->priv; + + return atomic64_read(&mgr->vis_usage); +} + +/** * amdgpu_vram_mgr_debug - dump VRAM table * * @man: TTM memory type manager - * @prefix: text prefix + * @printer: DRM printer to use * * Dump the table content using printk. */ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, - const char *prefix) + struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = man->priv; - struct drm_printer p = drm_debug_printer(prefix); spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, &p); + drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); + + drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", + man->size, amdgpu_vram_mgr_usage(man) >> 20, + amdgpu_vram_mgr_vis_usage(man) >> 20); } const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 490e84944851..4e519dc42916 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2431,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2439,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2473,7 +2473,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 921c6f772f11..11edc75edaa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2506,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2514,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2548,7 +2548,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bcd9521237f4..a51e35f824a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -42,6 +42,7 @@ #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "gca/gfx_7_2_enum.h" +#include "dce_v6_0.h" #include "si_enums.h" static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); @@ -2321,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2329,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2363,7 +2364,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 609438fe8584..9cf14b8b2db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2335,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2343,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2377,7 +2377,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 5ed919e45351..b9ee9073cb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -479,6 +479,8 @@ static int dce_virtual_hw_init(void *handle) #endif /* no DCE */ break; + case CHIP_VEGA10: + break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 4ac85f47f287..d228f5a99044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) @@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", + r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } @@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); dws = adev->gfx.rlc.clear_state_size + (256 / 4); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 17b7c6934b0a..00868764a0dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) } /** - * gmc_v7_0_init_compute_vmid - gart enable + * gfx_v7_0_init_compute_vmid - gart enable * * @adev: amdgpu_device pointer * @@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) #define DEFAULT_SH_MEM_BASES (0x6000) #define FIRST_COMPUTE_VMID (8) #define LAST_COMPUTE_VMID (16) -static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) +static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; uint32_t sh_mem_config; @@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) ELEMENT_SIZE, 1); sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, INDEX_STRIDE, 3); + WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { @@ -1934,12 +1935,11 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_BASES, sh_mem_base); - WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - gmc_v7_0_init_compute_vmid(adev); + gfx_v7_0_init_compute_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); @@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) */ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) { - int i, r; + int i; for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (ring->mqd_obj) { - r = amdgpu_bo_reserve(ring->mqd_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); - - amdgpu_bo_unpin(ring->mqd_obj); - amdgpu_bo_unreserve(ring->mqd_obj); - - amdgpu_bo_unref(&ring->mqd_obj); - ring->mqd_obj = NULL; - } + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); } } static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v7_0_mec_init(struct amdgpu_device *adev) @@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) /* allocate space for ALL pipes (even the ones we don't own) */ mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * GFX7_MEC_HPD_SIZE * 2; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); gfx_v7_0_mec_fini(adev); return r; } @@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) struct cik_mqd *mqd; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; - if (ring->mqd_obj == NULL) { - r = amdgpu_bo_create(adev, - sizeof(struct cik_mqd), - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &ring->mqd_obj); - if (r) { - dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto out; - - r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, - &mqd_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); - goto out_unreserve; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); + r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &mqd_gpu_addr, (void **)&mqd); if (r) { - dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); - goto out_unreserve; + dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); + return r; } mutex_lock(&adev->srbm_mutex); @@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) mutex_unlock(&adev->srbm_mutex); amdgpu_bo_kunmap(ring->mqd_obj); -out_unreserve: amdgpu_bo_unreserve(ring->mqd_obj); -out: return 0; } @@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, */ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* save restore block */ - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) @@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) @@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v7_0_get_csb_buffer(adev, dst_ptr); @@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } if (adev->gfx.rlc.cp_table_size) { - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 05436b8730b4..832e592fcd07 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* jump table block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v8_0_get_csb_buffer(adev, dst_ptr); @@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) if ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY)) { adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); return r; } @@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v8_0_mec_init(struct amdgpu_device *adev) @@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); return r; } @@ -3802,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) ELEMENT_SIZE, 1); sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, INDEX_STRIDE, 3); + WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); + mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { vi_srbm_select(adev, 0, 0, 0, i); @@ -3825,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); - WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 435db6f5efcf..69182eeca264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080, + SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000, SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107, + SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000, SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197, @@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (cs_data) { /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create rlc csb bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", + r); + gfx_v9_0_rlc_fini(adev); + return r; } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; @@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create cp table bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_err(adev->dev, + "(%d) failed to create cp table bo\n", r); + gfx_v9_0_rlc_fini(adev); + return r; } rv_init_cp_jump_table(adev); @@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } - if (adev->gfx.mec.mec_fw_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj); - amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - - amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj); - adev->gfx.mec.mec_fw_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); } static int gfx_v9_0_mec_init(struct amdgpu_device *adev) @@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } @@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; - if (adev->gfx.mec.mec_fw_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hdr->header.ucode_size_bytes, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.mec_fw_obj); - if (r) { - dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.mec_fw_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw); + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw); if (r) { - dev_warn(adev->dev, "(%d) map firmware bo failed\n", r); + dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } + memcpy(fw, fw_data, fw_size); amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - return 0; } @@ -2219,7 +2157,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; - int r, i; + int r, i, tmp; /* init the CP */ WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); @@ -2227,7 +2165,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4); + r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -2265,6 +2203,12 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x8000); amdgpu_ring_write(ring, 0x8000); + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); + tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | + (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); return 0; @@ -4158,7 +4102,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, return 0; } -const struct amd_ip_funcs gfx_v9_0_ip_funcs = { +static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, .late_init = gfx_v9_0_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index 56ef652a575d..fa5a3fbaf6ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -24,7 +24,6 @@ #ifndef __GFX_V9_0_H__ #define __GFX_V9_0_H__ -extern const struct amd_ip_funcs gfx_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 408723ef157c..4f2788b61a08 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,9 +143,10 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index d2dbb085f480..206e29cad753 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h @@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v1_0_init(struct amdgpu_device *adev); u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); -extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 93c45f26b7c8..12b0c4cd7a5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -461,6 +461,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { int r, i; + u32 field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -488,10 +489,12 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL2, VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK | VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK); + + field = adev->vm_manager.fragment_size; WREG32(mmVM_L2_CNTL3, VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | - (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | - (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); + (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | + (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); @@ -811,7 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 4a9e84062874..e42c1ad3af5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -562,7 +562,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -592,10 +592,12 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); @@ -948,7 +950,7 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 85c937b5e40b..7ca2dae8237a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -762,7 +762,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -793,10 +793,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* XXX: set to enable PTE/PDE in system memory */ tmp = RREG32(mmVM_L2_CNTL4); @@ -1046,7 +1048,7 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c22899a08106..2769c2b3b56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -541,9 +541,10 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; + amdgpu_vm_set_fragment_size(adev, 9); } else { - /* vm_size is 64GB for legacy 2-level page support*/ - amdgpu_vm_adjust_size(adev, 64); + /* vm_size is 64GB for legacy 2-level page support */ + amdgpu_vm_adjust_size(adev, 64, 9); adev->vm_manager.num_level = 1; } break; @@ -558,14 +559,16 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; adev->vm_manager.num_level = 3; + amdgpu_vm_set_fragment_size(adev, 9); break; default: break; } - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n", adev->vm_manager.vm_size, - adev->vm_manager.block_size); + adev->vm_manager.block_size, + adev->vm_manager.fragment_size); /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index ad8def3cc343..4395a4f12149 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,9 +157,10 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 57bb940c0ecd..5d38229baf69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable); -extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block; - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 591f3e7fb508..fd7c72aaafa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) DRM_DEBUG("Setting write pointer\n"); if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + DRM_DEBUG("Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " @@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), upper_32_bits(ring->wptr << 2)); /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); - adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); + WRITE_ONCE(*wb, (ring->wptr << 2)); DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2); @@ -573,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; u32 wb_offset; u32 doorbell; u32 doorbell_offset; u32 temp; + u64 wptr_gpu_addr; int i, r; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -660,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp); } + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + if (amdgpu_sriov_vf(adev)) + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); + else + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); + /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -687,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } return 0; @@ -783,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) const struct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data; u32 fw_size; - u32 digest_size = 0; int i, j; /* halt the MEs */ sdma_v4_0_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - uint16_t version_major; - uint16_t version_minor; if (!adev->sdma.instance[i].fw) return -EINVAL; @@ -799,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - version_major = le16_to_cpu(hdr->header.header_version_major); - version_minor = le16_to_cpu(hdr->header.header_version_minor); - - if (version_major == 1 && version_minor >= 1) { - const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr; - digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size); - } - - fw_size -= digest_size; - fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0); - for (j = 0; j < fw_size; j++) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 812a24dd1204..8284d5dbfc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1413,6 +1413,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, pitcairn_mgcg_cgcg_init, (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + break; case CHIP_VERDE: amdgpu_program_register_sequence(adev, verde_golden_registers, @@ -1437,6 +1438,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + break; case CHIP_HAINAN: amdgpu_program_register_sequence(adev, hainan_golden_registers, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index e79befd80eed..7f408f85fdb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -250,6 +250,7 @@ #define PACKET3_SET_UCONFIG_REG 0x79 #define PACKET3_SET_UCONFIG_REG_START 0x0000c000 #define PACKET3_SET_UCONFIG_REG_END 0x0000c400 +#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28) #define PACKET3_SCRATCH_RAM_WRITE 0x7D #define PACKET3_SCRATCH_RAM_READ 0x7E #define PACKET3_LOAD_CONST_RAM 0x80 diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 987b958368ac..23a85750edd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) { DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", @@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle) return r; } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); if (r) @@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); + WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; + adev->uvd.ring_enc[0].wptr = 0; + adev->uvd.ring_enc[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); return 0; } @@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; ring = &adev->uvd.ring; + ring->wptr = 0; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0xFFFFFFFF, 0x00000004); /* mc resume*/ @@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); /* mc resume end*/ @@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) UVD_LMI_CTRL__REQ_MODE_MASK | 0x00100000L)); - /* disable byte swapping */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88); - /* take all subblocks out of reset, except VCPU */ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); @@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK); - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* enable master interrupt */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), @@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) /* force RBC into idle state */ size = order_base_2(ring->ring_size); tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - /* set the write pointer delay */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0); - - /* set the wb address */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR), - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), - lower_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), - upper_32_bits(ring->gpu_addr)); - - ring->wptr = 0; ring = &adev->uvd.ring_enc[0]; + ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); + /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->uvd_table_size = table_size; - return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1ecd6bb90c1f..11134d5f7443 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); + WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); + adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0; + adev->vce.ring[0].wptr = 0; + adev->vce.ring[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); @@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); return 0; } @@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), - 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); /* end of MC_RESUME */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), @@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->vce_table_size = table_size; - - return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } /** @@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ ring->use_doorbell = true; + + /* currently only use the first encoding ring for sriov, + * so set unused location for other unused rings. + */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2; - else if (i == 1) - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); if (r) @@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, { uint32_t val = 0; - if (state == AMDGPU_IRQ_STATE_ENABLE) - val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + if (!amdgpu_sriov_vf(adev)) { + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6cac291c96da..9ff69b90df36 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle) /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | + adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_UVD | |