From 7d18e2f3f29763e0714733f0e0689f14de4faa03 Mon Sep 17 00:00:00 2001 From: Daniel Vetter <daniel.vetter@ffwll.ch> Date: Fri, 23 Oct 2020 14:39:25 +0200 Subject: drm/doc: Document legacy_cursor_update better It's the horror and shouldn't be used. Realized we're not clear on this in a discussion with Rob about what msm is doing to better support async commits. v2: Refine existing todo item to include this (Thomas) Cc: Sean Paul <sean@poorly.run> Cc: Rob Clark <robdclark@gmail.com> Acked-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20201023123925.2374863-3-daniel.vetter@ffwll.ch --- Documentation/gpu/todo.rst | 4 ++++ include/drm/drm_atomic.h | 12 +++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 700637e25ecd..6b224ef14455 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -105,6 +105,10 @@ converted over to the new infrastructure. One issue with the helpers is that they require that drivers handle completion events for atomic commits correctly. But fixing these bugs is good anyway. +Somewhat related is the legacy_cursor_update hack, which should be replaced with +the new atomic_async_check/commit functionality in the helpers in drivers that +still look at that flag. + Contact: Daniel Vetter, respective driver maintainers Level: Advanced diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d07c851d255b..413fd0ca56a8 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -308,7 +308,6 @@ struct __drm_private_objs_state { * struct drm_atomic_state - the global state object for atomic updates * @ref: count of all references to this state (will not be freed until zero) * @dev: parent DRM device - * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @async_update: hint for asynchronous plane update * @planes: pointer to array of structures with per-plane data * @crtcs: pointer to array of CRTC pointers @@ -336,6 +335,17 @@ struct drm_atomic_state { * drm_atomic_crtc_needs_modeset(). */ bool allow_modeset : 1; + /** + * @legacy_cursor_update: + * + * Hint to enforce legacy cursor IOCTL semantics. + * + * WARNING: This is thoroughly broken and pretty much impossible to + * implement correctly. Drivers must ignore this and should instead + * implement &drm_plane_helper_funcs.atomic_async_check and + * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this + * flag are not allowed. + */ bool legacy_cursor_update : 1; bool async_update : 1; /** -- cgit v1.2.3 From 65b7da27d2f6a9ad7da11ed502d0ba564bbdf17d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann <arnd@arndb.de> Date: Mon, 26 Oct 2020 20:41:01 +0100 Subject: drm/tilcdc: avoid 'make W=2' build failure The -Wmissing-field-initializer warning when building with W=2 turns into an error because tilcdc is built with -Werror: drm/tilcdc/tilcdc_drv.c:431:33: error: missing field 'data' initializer [-Werror,-Wmissing-field-initializers] { "regs", tilcdc_regs_show, 0 }, drm/tilcdc/tilcdc_drv.c:432:33: error: missing field 'data' initializer [-Werror,-Wmissing-field-initializers] { "mm", tilcdc_mm_show, 0 }, Add the missing field initializers to address the warning. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jyri Sarha <jsarha@ti.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201026194110.3817470-1-arnd@kernel.org --- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index c5f82e693f1a..3d7e4db756b7 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -432,8 +432,8 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg) } static struct drm_info_list tilcdc_debugfs_list[] = { - { "regs", tilcdc_regs_show, 0 }, - { "mm", tilcdc_mm_show, 0 }, + { "regs", tilcdc_regs_show, 0, NULL }, + { "mm", tilcdc_mm_show, 0, NULL }, }; static void tilcdc_debugfs_init(struct drm_minor *minor) -- cgit v1.2.3 From e000650375b65ff77c5ee852b5086f58c741179e Mon Sep 17 00:00:00 2001 From: Peilin Ye <yepeilin.cs@gmail.com> Date: Tue, 27 Oct 2020 12:31:08 -0400 Subject: fbdev/atafb: Remove unused extern variables Remove 6 unused extern variables to reduce confusion. It is worth mentioning that lib/fonts/font_8x8.c and lib/fonts/font_8x16.c also declare `fontdata_8x8` and `fontdata_8x16` respectively, and this file has nothing to do with them. Signed-off-by: Peilin Ye <yepeilin.cs@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/cb5bb49a33ff54fef41e719ee9d301a6a73c5f9c.1603788512.git.yepeilin.cs@gmail.com Link: https://patchwork.freedesktop.org/patch/msgid/20201028105647.1210161-1-yepeilin.cs@gmail.com --- drivers/video/fbdev/atafb.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index f253daa05d9d..e3812a8ff55a 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -240,14 +240,6 @@ static int *MV300_reg = MV300_reg_8bit; static int inverse; -extern int fontheight_8x8; -extern int fontwidth_8x8; -extern unsigned char fontdata_8x8[]; - -extern int fontheight_8x16; -extern int fontwidth_8x16; -extern unsigned char fontdata_8x16[]; - /* * struct fb_ops { * * open/release and usage marking -- cgit v1.2.3 From 7cb415003468d41aecd6877ae088c38f6c0fc174 Mon Sep 17 00:00:00 2001 From: Peilin Ye <yepeilin.cs@gmail.com> Date: Wed, 28 Oct 2020 06:56:47 -0400 Subject: Fonts: Make font size unsigned in font_desc `width` and `height` are defined as unsigned in our UAPI font descriptor `struct console_font`. Make them unsigned in our kernel font descriptor `struct font_desc`, too. Also, change the corresponding printk() format identifiers from `%d` to `%u`, in sti_select_fbfont(). Signed-off-by: Peilin Ye <yepeilin.cs@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20201028105647.1210161-1-yepeilin.cs@gmail.com --- drivers/video/console/sticore.c | 2 +- include/linux/font.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index 84c3ca37040a..fade32aa6737 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -506,7 +506,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) if (!fbfont) return NULL; - pr_info("STI selected %dx%d framebuffer font %s for sticon\n", + pr_info("STI selected %ux%u framebuffer font %s for sticon\n", fbfont->width, fbfont->height, fbfont->name); bpc = ((fbfont->width+7)/8) * fbfont->height; diff --git a/include/linux/font.h b/include/linux/font.h index 4a3f8741bb7e..b7214d7881f0 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -16,7 +16,7 @@ struct font_desc { int idx; const char *name; - int width, height; + unsigned int width, height; const void *data; int pref; }; -- cgit v1.2.3 From 5144eead3f8c80ac7f913c07139442fede94003e Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi <peter.ujfalusi@ti.com> Date: Fri, 23 Oct 2020 12:46:02 +0300 Subject: drm: xlnx: Use dma_request_chan for DMA channel request There is no need to use the of_dma_request_slave_channel() directly as dma_request_chan() is going to try to get the channel via OF as well. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201023094602.5630-1-peter.ujfalusi@ti.com --- drivers/gpu/drm/xlnx/zynqmp_disp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 0b3bd62e7631..5802752860dd 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -28,7 +28,6 @@ #include <linux/dmaengine.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> @@ -1316,8 +1315,7 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp, snprintf(dma_channel_name, sizeof(dma_channel_name), "%s%u", dma_names[layer->id], i); - dma->chan = of_dma_request_slave_channel(disp->dev->of_node, - dma_channel_name); + dma->chan = dma_request_chan(disp->dev, dma_channel_name); if (IS_ERR(dma->chan)) { dev_err(disp->dev, "failed to request dma channel\n"); ret = PTR_ERR(dma->chan); -- cgit v1.2.3 From d099fc8f540add80f725014fdd4f7f49f3c58911 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Thu, 22 Oct 2020 18:26:58 +0200 Subject: drm/ttm: new TT backend allocation pool v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This replaces the spaghetti code in the two existing page pools. First of all depending on the allocation size it is between 3 (1GiB) and 5 (1MiB) times faster than the old implementation. It makes better use of buddy pages to allow for larger physical contiguous allocations which should result in better TLB utilization at least for amdgpu. Instead of a completely braindead approach of filling the pool with one CPU while another one is trying to shrink it we only give back freed pages. This also results in much less locking contention and a trylock free MM shrinker callback, so we can guarantee that pages are given back to the system when needed. Downside of this is that it takes longer for many small allocations until the pool is filled up. We could address this, but I couldn't find an use case where this actually matters. We also don't bother freeing large chunks of pages any more since the CPU overhead in that path isn't really that important. The sysfs files are replaced with a single module parameter, allowing users to override how many pages should be globally pooled in TTM. This unfortunately breaks the UAPI slightly, but as far as we know nobody ever depended on this. Zeroing memory coming from the pool was handled inconsistently. The alloc_pages() based pool was zeroing it, the dma_alloc_attr() based one wasn't. For now the new implementation isn't zeroing pages from the pool either and only sets the __GFP_ZERO flag when necessary. The implementation has only 768 lines of code compared to the over 2600 of the old one, and also allows for saving quite a bunch of code in the drivers since we don't need specialized handling there any more based on kernel config. Additional to all of that there was a neat bug with IOMMU, coherent DMA mappings and huge pages which is now fixed in the new code as well. v2: make ttm_pool_apply_caching static as reported by the kernel bot, add some more checks v3: fix some more checkpatch.pl warnings Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397080/?series=83051&rev=1 --- drivers/gpu/drm/ttm/Makefile | 2 +- drivers/gpu/drm/ttm/ttm_memory.c | 3 + drivers/gpu/drm/ttm/ttm_pool.c | 667 +++++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_caching.h | 2 + include/drm/ttm/ttm_pool.h | 91 ++++++ 5 files changed, 764 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_pool.c create mode 100644 include/drm/ttm/ttm_pool.h diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 90c0da88cc98..0096bacbcf32 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -5,7 +5,7 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ - ttm_resource.o + ttm_resource.o ttm_pool.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 69cf622e79e5..3012d0388c51 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -38,6 +38,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/swap.h> +#include <drm/ttm/ttm_pool.h> #define TTM_MEMORY_ALLOC_RETRIES 4 @@ -453,6 +454,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) } ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); + ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); @@ -467,6 +469,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) /* let the page allocator first stop the shrink work. */ ttm_page_alloc_fini(); ttm_dma_page_alloc_fini(); + ttm_pool_mgr_fini(); flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c new file mode 100644 index 000000000000..1e50deefb5d5 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +/* Pooling of allocated pages is necessary because changing the caching + * attributes on x86 of the linear mapping requires a costly cross CPU TLB + * invalidate for those addresses. + * + * Additional to that allocations from the DMA coherent API are pooled as well + * cause they are rather slow compared to alloc_pages+map. + */ + +#include <linux/module.h> +#include <linux/dma-mapping.h> + +#ifdef CONFIG_X86 +#include <asm/set_memory.h> +#endif + +#include <drm/ttm/ttm_pool.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_tt.h> + +/** + * struct ttm_pool_dma - Helper object for coherent DMA mappings + * + * @addr: original DMA address returned for the mapping + * @vaddr: original vaddr return for the mapping and order in the lower bits + */ +struct ttm_pool_dma { + dma_addr_t addr; + unsigned long vaddr; +}; + +static unsigned long page_pool_size; + +MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); +module_param(page_pool_size, ulong, 0644); + +static atomic_long_t allocated_pages; + +static struct ttm_pool_type global_write_combined[MAX_ORDER]; +static struct ttm_pool_type global_uncached[MAX_ORDER]; + +static spinlock_t shrinker_lock; +static struct list_head shrinker_list; +static struct shrinker mm_shrinker; + +/* Allocate pages of size 1 << order with the given gfp_flags */ +static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, + unsigned int order) +{ + unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; + struct ttm_pool_dma *dma; + struct page *p; + void *vaddr; + + if (order) { + gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM; + gfp_flags &= ~__GFP_MOVABLE; + gfp_flags &= ~__GFP_COMP; + } + + if (!pool->use_dma_alloc) { + p = alloc_pages(gfp_flags, order); + if (p) + p->private = order; + return p; + } + + dma = kmalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + if (order) + attr |= DMA_ATTR_NO_WARN; + + vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, + &dma->addr, gfp_flags, attr); + if (!vaddr) + goto error_free; + + /* TODO: This is an illegal abuse of the DMA API, but we need to rework + * TTM page fault handling and extend the DMA API to clean this up. + */ + if (is_vmalloc_addr(vaddr)) + p = vmalloc_to_page(vaddr); + else + p = virt_to_page(vaddr); + + dma->vaddr = (unsigned long)vaddr | order; + p->private = (unsigned long)dma; + return p; + +error_free: + kfree(dma); + return NULL; +} + +/* Reset the caching and pages of size 1 << order */ +static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, + unsigned int order, struct page *p) +{ + unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; + struct ttm_pool_dma *dma; + void *vaddr; + +#ifdef CONFIG_X86 + /* We don't care that set_pages_wb is inefficient here. This is only + * used when we have to shrink and CPU overhead is irrelevant then. + */ + if (caching != ttm_cached && !PageHighMem(p)) + set_pages_wb(p, 1 << order); +#endif + + if (!pool->use_dma_alloc) { + __free_pages(p, order); + return; + } + + if (order) + attr |= DMA_ATTR_NO_WARN; + + dma = (void *)p->private; + vaddr = (void *)(dma->vaddr & PAGE_MASK); + dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, + attr); + kfree(dma); +} + +/* Apply a new caching to an array of pages */ +static int ttm_pool_apply_caching(struct page **first, struct page **last, + enum ttm_caching caching) +{ +#ifdef CONFIG_X86 + unsigned int num_pages = last - first; + + if (!num_pages) + return 0; + + switch (caching) { + case ttm_cached: + break; + case ttm_write_combined: + return set_pages_array_wc(first, num_pages); + case ttm_uncached: + return set_pages_array_uc(first, num_pages); + } +#endif + return 0; +} + +/* Map pages of 1 << order size and fill the DMA address array */ +static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, + struct page *p, dma_addr_t **dma_addr) +{ + dma_addr_t addr; + unsigned int i; + + if (pool->use_dma_alloc) { + struct ttm_pool_dma *dma = (void *)p->private; + + addr = dma->addr; + } else { + size_t size = (1ULL << order) * PAGE_SIZE; + + addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(pool->dev, **dma_addr)) + return -EFAULT; + } + + for (i = 1 << order; i ; --i) { + *(*dma_addr)++ = addr; + addr += PAGE_SIZE; + } + + return 0; +} + +/* Unmap pages of 1 << order size */ +static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, + unsigned int num_pages) +{ + /* Unmapped while freeing the page */ + if (pool->use_dma_alloc) + return; + + dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, + DMA_BIDIRECTIONAL); +} + +/* Give pages into a specific pool_type */ +static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) +{ + spin_lock(&pt->lock); + list_add(&p->lru, &pt->pages); + spin_unlock(&pt->lock); + atomic_long_add(1 << pt->order, &allocated_pages); +} + +/* Take pages from a specific pool_type, return NULL when nothing available */ +static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) +{ + struct page *p; + + spin_lock(&pt->lock); + p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); + if (p) { + atomic_long_sub(1 << pt->order, &allocated_pages); + list_del(&p->lru); + } + spin_unlock(&pt->lock); + + return p; +} + +/* Count the number of pages available in a pool_type */ +static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) +{ + unsigned int count = 0; + struct page *p; + + spin_lock(&pt->lock); + /* Only used for debugfs, the overhead doesn't matter */ + list_for_each_entry(p, &pt->pages, lru) + ++count; + spin_unlock(&pt->lock); + + return count; +} + +/* Initialize and add a pool type to the global shrinker list */ +static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, + enum ttm_caching caching, unsigned int order) +{ + pt->pool = pool; + pt->caching = caching; + pt->order = order; + spin_lock_init(&pt->lock); + INIT_LIST_HEAD(&pt->pages); + + spin_lock(&shrinker_lock); + list_add_tail(&pt->shrinker_list, &shrinker_list); + spin_unlock(&shrinker_lock); +} + +/* Remove a pool_type from the global shrinker list and free all pages */ +static void ttm_pool_type_fini(struct ttm_pool_type *pt) +{ + struct page *p, *tmp; + + spin_lock(&shrinker_lock); + list_del(&pt->shrinker_list); + spin_unlock(&shrinker_lock); + + list_for_each_entry_safe(p, tmp, &pt->pages, lru) + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); +} + +/* Return the pool_type to use for the given caching and order */ +static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, + enum ttm_caching caching, + unsigned int order) +{ + if (pool->use_dma_alloc) + return &pool->caching[caching].orders[order]; + +#ifdef CONFIG_X86 + switch (caching) { + case ttm_write_combined: + return &global_write_combined[order]; + case ttm_uncached: + return &global_uncached[order]; + default: + break; + } +#endif + + return NULL; +} + +/* Free pages using the global shrinker list */ +static unsigned int ttm_pool_shrink(void) +{ + struct ttm_pool_type *pt; + unsigned int num_freed; + struct page *p; + + spin_lock(&shrinker_lock); + pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); + + p = ttm_pool_type_take(pt); + if (p) { + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); + num_freed = 1 << pt->order; + } else { + num_freed = 0; + } + + list_move_tail(&pt->shrinker_list, &shrinker_list); + spin_unlock(&shrinker_lock); + + return num_freed; +} + +/* Return the allocation order based for a page */ +static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) +{ + if (pool->use_dma_alloc) { + struct ttm_pool_dma *dma = (void *)p->private; + + return dma->vaddr & ~PAGE_MASK; + } + + return p->private; +} + +/** + * ttm_pool_alloc - Fill a ttm_tt object + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx) +{ + unsigned long num_pages = tt->num_pages; + dma_addr_t *dma_addr = tt->dma_address; + struct page **caching = tt->pages; + struct page **pages = tt->pages; + gfp_t gfp_flags = GFP_USER; + unsigned int i, order; + struct page *p; + int r; + + WARN_ON(!num_pages || ttm_tt_is_populated(tt)); + WARN_ON(dma_addr && !pool->dev); + + if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + + if (tt->page_flags & TTM_PAGE_FLAG_NO_RETRY) + gfp_flags |= __GFP_RETRY_MAYFAIL; + + if (pool->use_dma32) + gfp_flags |= GFP_DMA32; + else + gfp_flags |= GFP_HIGHUSER; + + for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; + order = min_t(unsigned int, order, __fls(num_pages))) { + bool apply_caching = false; + struct ttm_pool_type *pt; + + pt = ttm_pool_select_type(pool, tt->caching, order); + p = pt ? ttm_pool_type_take(pt) : NULL; + if (p) { + apply_caching = true; + } else { + p = ttm_pool_alloc_page(pool, gfp_flags, order); + if (p && PageHighMem(p)) + apply_caching = true; + } + + if (!p) { + if (order) { + --order; + continue; + } + r = -ENOMEM; + goto error_free_all; + } + + if (apply_caching) { + r = ttm_pool_apply_caching(caching, pages, + tt->caching); + if (r) + goto error_free_page; + caching = pages + (1 << order); + } + + r = ttm_mem_global_alloc_page(&ttm_mem_glob, p, + (1 << order) * PAGE_SIZE, + ctx); + if (r) + goto error_free_page; + + if (dma_addr) { + r = ttm_pool_map(pool, order, p, &dma_addr); + if (r) + goto error_global_free; + } + + num_pages -= 1 << order; + for (i = 1 << order; i; --i) + *(pages++) = p++; + } + + r = ttm_pool_apply_caching(caching, pages, tt->caching); + if (r) + goto error_free_all; + + return 0; + +error_global_free: + ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE); + +error_free_page: + ttm_pool_free_page(pool, tt->caching, order, p); + +error_free_all: + num_pages = tt->num_pages - num_pages; + for (i = 0; i < num_pages; ) { + order = ttm_pool_page_order(pool, tt->pages[i]); + ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); + i += 1 << order; + } + + return r; +} +EXPORT_SYMBOL(ttm_pool_alloc); + +/** + * ttm_pool_free - Free the backing pages from a ttm_tt object + * + * @pool: Pool to give pages back to. + * @tt: ttm_tt object to unpopulate + * + * Give the packing pages back to a pool or free them + */ +void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) +{ + unsigned int i; + + for (i = 0; i < tt->num_pages; ) { + struct page *p = tt->pages[i]; + unsigned int order, num_pages; + struct ttm_pool_type *pt; + + order = ttm_pool_page_order(pool, p); + num_pages = 1ULL << order; + ttm_mem_global_free_page(&ttm_mem_glob, p, + num_pages * PAGE_SIZE); + if (tt->dma_address) + ttm_pool_unmap(pool, tt->dma_address[i], num_pages); + + pt = ttm_pool_select_type(pool, tt->caching, order); + if (pt) + ttm_pool_type_give(pt, tt->pages[i]); + else + ttm_pool_free_page(pool, tt->caching, order, + tt->pages[i]); + + i += num_pages; + } + + while (atomic_long_read(&allocated_pages) > page_pool_size) + ttm_pool_shrink(); +} +EXPORT_SYMBOL(ttm_pool_free); + +/** + * ttm_pool_init - Initialize a pool + * + * @pool: the pool to initialize + * @dev: device for DMA allocations and mappings + * @use_dma_alloc: true if coherent DMA alloc should be used + * @use_dma32: true if GFP_DMA32 should be used + * + * Initialize the pool and its pool types. + */ +void ttm_pool_init(struct ttm_pool *pool, struct device *dev, + bool use_dma_alloc, bool use_dma32) +{ + unsigned int i, j; + + WARN_ON(!dev && use_dma_alloc); + + pool->dev = dev; + pool->use_dma_alloc = use_dma_alloc; + pool->use_dma32 = use_dma32; + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_init(&pool->caching[i].orders[j], + pool, i, j); +} +EXPORT_SYMBOL(ttm_pool_init); + +/** + * ttm_pool_fini - Cleanup a pool + * + * @pool: the pool to clean up + * + * Free all pages in the pool and unregister the types from the global + * shrinker. + */ +void ttm_pool_fini(struct ttm_pool *pool) +{ + unsigned int i, j; + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_fini(&pool->caching[i].orders[j]); +} +EXPORT_SYMBOL(ttm_pool_fini); + +#ifdef CONFIG_DEBUG_FS + +/* Dump information about the different pool types */ +static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, + struct seq_file *m) +{ + unsigned int i; + + for (i = 0; i < MAX_ORDER; ++i) + seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); + seq_puts(m, "\n"); +} + +/** + * ttm_pool_debugfs - Debugfs dump function for a pool + * + * @pool: the pool to dump the information for + * @m: seq_file to dump to + * + * Make a debugfs dump with the per pool and global information. + */ +int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) +{ + unsigned int i; + + spin_lock(&shrinker_lock); + + seq_puts(m, "\t "); + for (i = 0; i < MAX_ORDER; ++i) + seq_printf(m, " ---%2u---", i); + seq_puts(m, "\n"); + + seq_puts(m, "wc\t:"); + ttm_pool_debugfs_orders(global_write_combined, m); + seq_puts(m, "uc\t:"); + ttm_pool_debugfs_orders(global_uncached, m); + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { + seq_puts(m, "DMA "); + switch (i) { + case ttm_cached: + seq_puts(m, "\t:"); + break; + case ttm_write_combined: + seq_puts(m, "wc\t:"); + break; + case ttm_uncached: + seq_puts(m, "uc\t:"); + break; + } + ttm_pool_debugfs_orders(pool->caching[i].orders, m); + } + + seq_printf(m, "\ntotal\t: %8lu of %8lu\n", + atomic_long_read(&allocated_pages), page_pool_size); + + spin_unlock(&shrinker_lock); + + return 0; +} +EXPORT_SYMBOL(ttm_pool_debugfs); + +#endif + +/* As long as pages are available make sure to release at least one */ +static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_freed = 0; + + do + num_freed += ttm_pool_shrink(); + while (!num_freed && atomic_long_read(&allocated_pages)); + + return num_freed; +} + +/* Return the number of pages available or SHRINK_EMPTY if we have none */ +static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_pages = atomic_long_read(&allocated_pages); + + return num_pages ? num_pages : SHRINK_EMPTY; +} + +/** + * ttm_pool_mgr_init - Initialize globals + * + * @num_pages: default number of pages + * + * Initialize the global locks and lists for the MM shrinker. + */ +int ttm_pool_mgr_init(unsigned long num_pages) +{ + unsigned int i; + + if (!page_pool_size) + page_pool_size = num_pages; + + spin_lock_init(&shrinker_lock); + INIT_LIST_HEAD(&shrinker_list); + + for (i = 0; i < MAX_ORDER; ++i) { + ttm_pool_type_init(&global_write_combined[i], NULL, + ttm_write_combined, i); + ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); + } + + mm_shrinker.count_objects = ttm_pool_shrinker_count; + mm_shrinker.scan_objects = ttm_pool_shrinker_scan; + mm_shrinker.seeks = 1; + return register_shrinker(&mm_shrinker); +} + +/** + * ttm_pool_mgr_fini - Finalize globals + * + * Cleanup the global pools and unregister the MM shrinker. + */ +void ttm_pool_mgr_fini(void) +{ + unsigned int i; + + for (i = 0; i < MAX_ORDER; ++i) { + ttm_pool_type_fini(&global_write_combined[i]); + ttm_pool_type_fini(&global_uncached[i]); + } + + unregister_shrinker(&mm_shrinker); + WARN_ON(!list_empty(&shrinker_list)); +} diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index 161624dcf6be..a0b4a49fa432 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -25,6 +25,8 @@ #ifndef _TTM_CACHING_H_ #define _TTM_CACHING_H_ +#define TTM_NUM_CACHING_TYPES 3 + enum ttm_caching { ttm_uncached, ttm_write_combined, diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h new file mode 100644 index 000000000000..4321728bdd11 --- /dev/null +++ b/include/drm/ttm/ttm_pool.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_PAGE_POOL_H_ +#define _TTM_PAGE_POOL_H_ + +#include <linux/mmzone.h> +#include <linux/llist.h> +#include <linux/spinlock.h> +#include <drm/ttm/ttm_caching.h> + +struct device; +struct ttm_tt; +struct ttm_pool; +struct ttm_operation_ctx; + +/** + * ttm_pool_type - Pool for a certain memory type + * + * @pool: the pool we belong to, might be NULL for the global ones + * @order: the allocation order our pages have + * @caching: the caching type our pages have + * @shrinker_list: our place on the global shrinker list + * @lock: protection of the page list + * @pages: the list of pages in the pool + */ +struct ttm_pool_type { + struct ttm_pool *pool; + unsigned int order; + enum ttm_caching caching; + + struct list_head shrinker_list; + + spinlock_t lock; + struct list_head pages; +}; + +/** + * ttm_pool - Pool for all caching and orders + * + * @use_dma_alloc: if coherent DMA allocations should be used + * @use_dma32: if GFP_DMA32 should be used + * @caching: pools for each caching/order + */ +struct ttm_pool { + struct device *dev; + + bool use_dma_alloc; + bool use_dma32; + + struct { + struct ttm_pool_type orders[MAX_ORDER]; + } caching[TTM_NUM_CACHING_TYPES]; +}; + +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx); +void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); + +void ttm_pool_init(struct ttm_pool *pool, struct device *dev, + bool use_dma_alloc, bool use_dma32); +void ttm_pool_fini(struct ttm_pool *pool); + +int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); + +int ttm_pool_mgr_init(unsigned long num_pages); +void ttm_pool_mgr_fini(void); + +#endif -- cgit v1.2.3 From ee5d2a8e549e90325fcc31825269f89647cd6fac Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:10:28 +0200 Subject: drm/ttm: wire up the new pool as default one v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide the necessary parameters by all drivers and use the new pool alloc when no driver specific function is provided. v2: fix the GEM VRAM helpers Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397081/?series=83051&rev=1 --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- drivers/gpu/drm/drm_gem_vram_helper.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_ttm.c | 14 +++++++++----- drivers/gpu/drm/qxl/qxl_ttm.c | 5 ++--- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 8 ++++++-- drivers/gpu/drm/ttm/ttm_memory.c | 2 +- drivers/gpu/drm/ttm/ttm_tt.c | 5 ++--- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 5 +++-- include/drm/ttm/ttm_bo_driver.h | 11 +++++++---- 10 files changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index beacd00221d8..34944927838e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1914,10 +1914,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&adev->mman.bdev, - &amdgpu_bo_driver, + r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, + adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 9da823eb0edd..683762b2f9a8 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1045,10 +1045,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, vmm->vram_base = vram_base; vmm->vram_size = vram_size; - ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, + ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - true); + false, true); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 0592ed6eaad1..d696d882c9eb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -281,6 +281,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) struct nvkm_pci *pci = device->pci; struct nvif_mmu *mmu = &drm->client.mmu; struct drm_device *dev = drm->dev; + bool need_swiotlb = false; int typei, ret; ret = nouveau_ttm_init_host(drm, 0); @@ -315,11 +316,14 @@ nouveau_ttm_init(struct nouveau_drm *drm) drm->agp.cma = pci->agp.cma; } - ret = ttm_bo_device_init(&drm->ttm.bdev, - &nouveau_bo_driver, - dev->anon_inode->i_mapping, - dev->vma_offset_manager, - drm->client.mmu.dmabits <= 32 ? true : false); +#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) + need_swiotlb = !!swiotlb_nr_tbl(); +#endif + + ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, + drm->dev->dev, dev->anon_inode->i_mapping, + dev->vma_offset_manager, need_swiotlb, + drm->client.mmu.dmabits <= 32); if (ret) { NV_ERROR(drm, "error initialising bo driver, %d\n", ret); return ret; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 9609eeb52821..d8ecfb8b3193 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -194,11 +194,10 @@ int qxl_ttm_init(struct qxl_device *qdev) int num_io_pages; /* != rom->num_io_pages, we include surface0 */ /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&qdev->mman.bdev, - &qxl_bo_driver, + r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, qdev->ddev.anon_inode->i_mapping, qdev->ddev.vma_offset_manager, - false); + false, false); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0a6d7ea847db..1add3918519c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -846,10 +846,10 @@ int radeon_ttm_init(struct radeon_device *rdev) int r; /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&rdev->mman.bdev, - &radeon_bo_driver, + r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, rdev->ddev->anon_inode->i_mapping, rdev->ddev->vma_offset_manager, + rdev->need_swiotlb, dma_addressing_limited(&rdev->pdev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 40c72a0f9325..e9f91cfce5e9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); + ttm_pool_fini(&bdev->pool); + if (!ret) ttm_bo_global_release(); @@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, + struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, - bool need_dma32) + bool use_dma_alloc, bool use_dma32) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret; @@ -1324,12 +1327,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; ttm_bo_init_sysman(bdev); + ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); bdev->vma_manager = vma_manager; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = mapping; - bdev->need_dma32 = need_dma32; + bdev->need_dma32 = use_dma32; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&ttm_global_mutex); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 3012d0388c51..b15a91c90271 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -454,7 +454,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) } ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); - ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); + ttm_pool_mgr_init(glob->zone_kernel->max_mem / (2 * PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 65c4254eea5c..90054280cd8f 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -37,7 +37,6 @@ #include <linux/file.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> /** * Allocates a ttm structure for the given BO. @@ -321,7 +320,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, if (bdev->driver->ttm_tt_populate) ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); else - ret = ttm_pool_populate(ttm, ctx); + ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); if (ret) return ret; @@ -363,6 +362,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev, if (bdev->driver->ttm_tt_unpopulate) bdev->driver->ttm_tt_unpopulate(bdev, ttm); else - ttm_pool_unpopulate(ttm); + ttm_pool_free(&bdev->pool, ttm); ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4860370740e0..7bd1be26afe4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -878,10 +878,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) drm_vma_offset_manager_init(&dev_priv->vma_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); - ret = ttm_bo_device_init(&dev_priv->bdev, - &vmw_bo_driver, + ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, + dev_priv->dev->dev, dev->anon_inode->i_mapping, &dev_priv->vma_manager, + dev_priv->map_mode == vmw_dma_alloc_coherent, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 29f6a1d1c853..45ae87640909 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -42,6 +42,7 @@ #include "ttm_module.h" #include "ttm_placement.h" #include "ttm_tt.h" +#include "ttm_pool.h" /** * struct ttm_bo_driver @@ -295,6 +296,7 @@ struct ttm_bo_device { * Protected by internal locks. */ struct drm_vma_offset_manager *vma_manager; + struct ttm_pool pool; /* * Protected by the global:lru lock. @@ -395,11 +397,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev); * @bdev: A pointer to a struct ttm_bo_device to initialize. * @glob: A pointer to an initialized struct ttm_bo_global. * @driver: A pointer to a struct ttm_bo_driver set up by the caller. + * @dev: The core kernel device pointer for DMA mappings and allocations. * @mapping: The address space to use for this bo. * @vma_manager: A pointer to a vma manager. - * @file_page_offset: Offset into the device address space that is available - * for buffer data. This ensures compatibility with other users of the - * address space. + * @use_dma_alloc: If coherent DMA allocation API should be used. + * @use_dma32: If we should use GFP_DMA32 for device memory allocations. * * Initializes a struct ttm_bo_device: * Returns: @@ -407,9 +409,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev); */ int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, + struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, - bool need_dma32); + bool use_dma_alloc, bool use_dma32); /** * ttm_bo_unmap_virtual -- cgit v1.2.3 From e93b2da9799e5cb97760969f3e1f02a5bdac29fe Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:11:29 +0200 Subject: drm/amdgpu: switch to new allocator v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should be able to handle all cases here. v2: fix debugfs as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397086/?series=83051&rev=1 --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 45 ++++++++++----------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 34944927838e..f3416cc1bd04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,7 +47,6 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> @@ -1383,15 +1382,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, return 0; } -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev, ctx); - } -#endif - - /* fall back to generic helper to populate the page array - * and map them to the device */ - return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); + return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); } /** @@ -1400,7 +1391,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; @@ -1425,16 +1417,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt * return; adev = amdgpu_ttm_adev(bdev); - -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, adev->dev); - return; - } -#endif - - /* fall back to generic helper to unmap and unpopulate array */ - ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); + return ttm_pool_free(&adev->mman.bdev.pool, ttm); } /** @@ -2347,16 +2330,22 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) return 0; } +static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + return ttm_pool_debugfs(&adev->mman.bdev.pool, m); +} + static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif + {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL}, }; /** @@ -2649,12 +2638,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) } count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); - -#ifdef CONFIG_SWIOTLB - if (!(adev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); #else return 0; -- cgit v1.2.3 From 0fe3cf3a53b5c1205ec7d321be1185b075dff205 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:12:23 +0200 Subject: drm/radeon: switch to new allocator v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should be able to handle all cases here. v2: fix debugfs as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397088/?series=83051&rev=1 --- drivers/gpu/drm/radeon/radeon_ttm.c | 52 ++++++++++--------------------------- 1 file changed, 14 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1add3918519c..95038ac3382e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -47,7 +47,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include "radeon_reg.h" @@ -679,19 +678,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, return 0; } -#if IS_ENABLED(CONFIG_AGP) - if (rdev->flags & RADEON_IS_AGP) { - return ttm_pool_populate(ttm, ctx); - } -#endif - -#ifdef CONFIG_SWIOTLB - if (rdev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, rdev->dev, ctx); - } -#endif - - return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx); + return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx); } static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) @@ -709,21 +696,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt * if (slave) return; -#if IS_ENABLED(CONFIG_AGP) - if (rdev->flags & RADEON_IS_AGP) { - ttm_pool_unpopulate(ttm); - return; - } -#endif - -#ifdef CONFIG_SWIOTLB - if (rdev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, rdev->dev); - return; - } -#endif - - ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm); + return ttm_pool_free(&rdev->mman.bdev.pool, ttm); } int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, @@ -857,6 +830,9 @@ int radeon_ttm_init(struct radeon_device *rdev) } rdev->mman.initialized = true; + ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb, + dma_addressing_limited(&rdev->pdev->dev)); + r = radeon_ttm_init_vram(rdev); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); @@ -1004,6 +980,14 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) return 0; } +static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + + return ttm_pool_debugfs(&rdev->mman.bdev.pool, m); +} static int ttm_pl_vram = TTM_PL_VRAM; static int ttm_pl_tt = TTM_PL_TT; @@ -1011,10 +995,7 @@ static int ttm_pl_tt = TTM_PL_TT; static struct drm_info_list radeon_ttm_debugfs_list[] = { {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif + {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL} }; static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) @@ -1142,11 +1123,6 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) count = ARRAY_SIZE(radeon_ttm_debugfs_list); -#ifdef CONFIG_SWIOTLB - if (!(rdev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); #else -- cgit v1.2.3 From 461619f5c3242aaee9ec3f0b7072719bd86ea207 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:13:25 +0200 Subject: drm/nouveau: switch to new allocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should be able to handle all cases now. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397082/?series=83051&rev=1 --- drivers/gpu/drm/nouveau/nouveau_bo.c | 30 ++---------------------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 - 2 files changed, 2 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 75fddbcd7832..746c06ed195b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1327,25 +1327,13 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, drm = nouveau_bdev(bdev); dev = drm->dev->dev; -#if IS_ENABLED(CONFIG_AGP) - if (drm->agp.bridge) { - return ttm_pool_populate(ttm, ctx); - } -#endif - -#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - if (swiotlb_nr_tbl()) { - return ttm_dma_populate((void *)ttm, dev, ctx); - } -#endif - return ttm_populate_and_map_pages(dev, ttm_dma, ctx); + return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); } static void nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1356,21 +1344,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, drm = nouveau_bdev(bdev); dev = drm->dev->dev; -#if IS_ENABLED(CONFIG_AGP) - if (drm->agp.bridge) { - ttm_pool_unpopulate(ttm); - return; - } -#endif - -#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - if (swiotlb_nr_tbl()) { - ttm_dma_unpopulate((void *)ttm, dev); - return; - } -#endif - - ttm_unmap_and_unpopulate_pages(dev, ttm_dma); + return ttm_pool_free(&drm->ttm.bdev.pool, ttm); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b8025507a9e4..9d04d1b36434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -56,7 +56,6 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/drm_audio_component.h> -- cgit v1.2.3 From 8567d51555c12d169c4e0f796030051fff1c318d Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:16:05 +0200 Subject: drm/vmwgfx: switch to new allocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should be able to handle all cases now. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397083/?series=83051&rev=1 --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 4 ---- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 36 +++--------------------------- 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7bd1be26afe4..b3a60959b5d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -595,10 +595,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && - (dev_priv->map_mode == vmw_dma_alloc_coherent)) - return -EINVAL; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 92a5d245ff4d..aa0cdf476a53 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -28,7 +28,6 @@ #include "vmwgfx_drv.h" #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_page_alloc.h> static const struct ttm_place vram_placement_flags = { .fpfn = 0, @@ -582,30 +581,11 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static int vmw_ttm_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct vmw_ttm_tt *vmw_tt = - container_of(ttm, struct vmw_ttm_tt, dma_ttm); - struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); - int ret; - + /* TODO: maybe completely drop this ? */ if (ttm_tt_is_populated(ttm)) return 0; - if (dev_priv->map_mode == vmw_dma_alloc_coherent) { - size_t size = - ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); - ret = ttm_mem_global_alloc(glob, size, ctx); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, - ctx); - if (unlikely(ret != 0)) - ttm_mem_global_free(glob, size); - } else - ret = ttm_pool_populate(ttm, ctx); - - return ret; + return ttm_pool_alloc(&bdev->pool, ttm, ctx); } static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, @@ -613,9 +593,6 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); - struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); - if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); @@ -623,14 +600,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, } vmw_ttm_unmap_dma(vmw_tt); - if (dev_priv->map_mode == vmw_dma_alloc_coherent) { - size_t size = - ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); - - ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); - ttm_mem_global_free(glob, size); - } else - ttm_pool_unpopulate(ttm); + ttm_pool_free(&bdev->pool, ttm); } static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, -- cgit v1.2.3 From f9b2c9e361f661763151078e9d8aa0cd3bbc25d4 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:15:35 +0200 Subject: drm/qxl: drop ttm_page_alloc.h include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed as far as I can see. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397084/?series=83051&rev=1 --- drivers/gpu/drm/qxl/qxl_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index d8ecfb8b3193..a80d59634143 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -32,7 +32,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include "qxl_drv.h" -- cgit v1.2.3 From fbf1c39cab37590e9b8c5a19d3b129e7fe86fee3 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:16:59 +0200 Subject: drm/vram_helpers: drop ttm_page_alloc.h include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed as far as I can see. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397085/?series=83051&rev=1 --- drivers/gpu/drm/drm_gem_vram_helper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 683762b2f9a8..16d68c04ea5d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -15,7 +15,6 @@ #include <drm/drm_plane.h> #include <drm/drm_prime.h> #include <drm/drm_simple_kms_helper.h> -#include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; -- cgit v1.2.3 From 256dd44bd897055571c131703afdd02b2e4f1f29 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Sat, 24 Oct 2020 13:17:49 +0200 Subject: drm/ttm: nuke old page allocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used any more. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com> Tested-by: Huang Rui <ray.huang@amd.com> Link: https://patchwork.freedesktop.org/patch/397087/?series=83051&rev=1 --- drivers/gpu/drm/Kconfig | 7 - drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_agp_backend.c | 1 - drivers/gpu/drm/ttm/ttm_bo.c | 1 - drivers/gpu/drm/ttm/ttm_memory.c | 7 +- drivers/gpu/drm/ttm/ttm_page_alloc.c | 1176 ---------------------------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 1226 ------------------------------ drivers/gpu/drm/ttm/ttm_set_memory.h | 84 -- drivers/gpu/drm/ttm/ttm_tt.c | 4 - include/drm/ttm/ttm_bo_driver.h | 2 - include/drm/ttm/ttm_page_alloc.h | 122 --- include/drm/ttm/ttm_tt.h | 2 - 12 files changed, 2 insertions(+), 2633 deletions(-) delete mode 100644 drivers/gpu/drm/ttm/ttm_page_alloc.c delete mode 100644 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c delete mode 100644 drivers/gpu/drm/ttm/ttm_set_memory.h delete mode 100644 include/drm/ttm/ttm_page_alloc.h diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 32257189e09b..64376dd298ed 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -182,13 +182,6 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. -config DRM_TTM_DMA_PAGE_POOL - bool - depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU) - default y - help - Choose this if you need the TTM dma page pool - config DRM_VRAM_HELPER tristate depends on DRM diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 0096bacbcf32..b6f5f87b270f 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,9 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ + ttm_execbuf_util.o ttm_range_manager.o \ ttm_resource.o ttm_pool.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o -ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4f76c9287159..03c86628e4ac 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -34,7 +34,6 @@ #include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include <linux/agp_backend.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e9f91cfce5e9..530c9a4af09f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1333,7 +1333,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = mapping; - bdev->need_dma32 = use_dma32; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&ttm_global_mutex); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index b15a91c90271..f9a90bfaa3c1 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -30,7 +30,6 @@ #include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> @@ -452,9 +451,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) pr_info("Zone %7s: Available graphics memory: %llu KiB\n", zone->name, (unsigned long long)zone->max_mem >> 10); } - ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); - ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); - ttm_pool_mgr_init(glob->zone_kernel->max_mem / (2 * PAGE_SIZE)); + ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); @@ -467,8 +464,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) unsigned int i; /* let the page allocator first stop the shrink work. */ - ttm_page_alloc_fini(); - ttm_dma_page_alloc_fini(); ttm_pool_mgr_fini(); flush_workqueue(glob->swap_queue); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c deleted file mode 100644 index 29e6c29ad60e..000000000000 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ /dev/null @@ -1,1176 +0,0 @@ -/* - * Copyright (c) Red Hat Inc. - - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie <airlied@redhat.com> - * Jerome Glisse <jglisse@redhat.com> - * Pauli Nieminen <suokkos@gmail.com> - */ - -/* simple list based uncached page pool - * - Pool collects resently freed pages for reuse - * - Use page->lru to keep a free list - * - doesn't track currently in use pages - */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/highmem.h> -#include <linux/mm_types.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/seq_file.h> /* for seq_printf */ -#include <linux/slab.h> -#include <linux/dma-mapping.h> - -#include <linux/atomic.h> - -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> - -#include "ttm_set_memory.h" - -#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) -#define SMALL_ALLOCATION 16 -#define FREE_ALL_PAGES (~0U) -/* times are in msecs */ -#define PAGE_FREE_INTERVAL 1000 - -/** - * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. - * - * @lock: Protects the shared pool from concurrnet access. Must be used with - * irqsave/irqrestore variants because pool allocator maybe called from - * delayed work. - * @fill_lock: Prevent concurrent calls to fill. - * @list: Pool of free uc/wc pages for fast reuse. - * @gfp_flags: Flags to pass for alloc_page. - * @npages: Number of pages in pool. - */ -struct ttm_page_pool { - spinlock_t lock; - bool fill_lock; - struct list_head list; - gfp_t gfp_flags; - unsigned npages; - char *name; - unsigned long nfrees; - unsigned long nrefills; - unsigned int order; -}; - -/** - * Limits for the pool. They are handled without locks because only place where - * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialization to access them is pointless. - */ - -struct ttm_pool_opts { - unsigned alloc_size; - unsigned max_size; - unsigned small; -}; - -#define NUM_POOLS 6 - -/** - * struct ttm_pool_manager - Holds memory pools for fst allocation - * - * Manager is read only object for pool code so it doesn't need locking. - * - * @free_interval: minimum number of jiffies between freeing pages from pool. - * @page_alloc_inited: reference counting for pool allocation. - * @work: Work that is used to shrink the pool. Work is only run when there is - * some pages to free. - * @small_allocation: Limit in number of pages what is small allocation. - * - * @pools: All pool objects in use. - **/ -struct ttm_pool_manager { - struct kobject kobj; - struct shrinker mm_shrink; - struct ttm_pool_opts options; - - union { - struct ttm_page_pool pools[NUM_POOLS]; - struct { - struct ttm_page_pool wc_pool; - struct ttm_page_pool uc_pool; - struct ttm_page_pool wc_pool_dma32; - struct ttm_page_pool uc_pool_dma32; - struct ttm_page_pool wc_pool_huge; - struct ttm_page_pool uc_pool_huge; - } ; - }; -}; - -static struct attribute ttm_page_pool_max = { - .name = "pool_max_size", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_small = { - .name = "pool_small_allocation", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_alloc_size = { - .name = "pool_allocation_size", - .mode = S_IRUGO | S_IWUSR -}; - -static struct attribute *ttm_pool_attrs[] = { - &ttm_page_pool_max, - &ttm_page_pool_small, - &ttm_page_pool_alloc_size, - NULL -}; - -static void ttm_pool_kobj_release(struct kobject *kobj) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - kfree(m); -} - -static ssize_t ttm_pool_store(struct kobject *kobj, - struct attribute *attr, const char *buffer, size_t size) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - int chars; - unsigned val; - chars = sscanf(buffer, "%u", &val); - if (chars == 0) - return size; - - /* Convert kb to number of pages */ - val = val / (PAGE_SIZE >> 10); - - if (attr == &ttm_page_pool_max) - m->options.max_size = val; - else if (attr == &ttm_page_pool_small) - m->options.small = val; - else if (attr == &ttm_page_pool_alloc_size) { - if (val > NUM_PAGES_TO_ALLOC*8) { - pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - return size; - } else if (val > NUM_PAGES_TO_ALLOC) { - pr_warn("Setting allocation size to larger than %lu is not recommended\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - } - m->options.alloc_size = val; - } - - return size; -} - -static ssize_t ttm_pool_show(struct kobject *kobj, - struct attribute *attr, char *buffer) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - unsigned val = 0; - - if (attr == &ttm_page_pool_max) - val = m->options.max_size; - else if (attr == &ttm_page_pool_small) - val = m->options.small; - else if (attr == &ttm_page_pool_alloc_size) - val = m->options.alloc_size; - - val = val * (PAGE_SIZE >> 10); - - return snprintf(buffer, PAGE_SIZE, "%u\n", val); -} - -static const struct sysfs_ops ttm_pool_sysfs_ops = { - .show = &ttm_pool_show, - .store = &ttm_pool_store, -}; - -static struct kobj_type ttm_pool_kobj_type = { - .release = &ttm_pool_kobj_release, - .sysfs_ops = &ttm_pool_sysfs_ops, - .default_attrs = ttm_pool_attrs, -}; - -static struct ttm_pool_manager *_manager; - -/** - * Select the right pool or requested caching state and ttm flags. */ -static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, - enum ttm_caching cstate) -{ - int pool_index; - - if (cstate == ttm_cached) - return NULL; - - if (cstate == ttm_write_combined) - pool_index = 0x0; - else - pool_index = 0x1; - - if (flags & TTM_PAGE_FLAG_DMA32) { - if (huge) - return NULL; - pool_index |= 0x2; - - } else if (huge) { - pool_index |= 0x4; - } - - return &_manager->pools[pool_index]; -} - -/* set memory back to wb and free the pages. */ -static void ttm_pages_put(struct page *pages[], unsigned npages, - unsigned int order) -{ - unsigned int i, pages_nr = (1 << order); - - if (order == 0) { - if (ttm_set_pages_array_wb(pages, npages)) - pr_err("Failed to set %d pages to wb!\n", npages); - } - - for (i = 0; i < npages; ++i) { - if (order > 0) { - if (ttm_set_pages_wb(pages[i], pages_nr)) - pr_err("Failed to set %d pages to wb!\n", pages_nr); - } - __free_pages(pages[i], order); - } -} - -static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, - unsigned freed_pages) -{ - pool->npages -= freed_pages; - pool->nfrees += freed_pages; -} - -/** - * Free pages from pool. - * - * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC - * number of pages in one go. - * - * @pool: to free the pages from - * @free_all: If set to true will free all pages in pool - * @use_static: Safe to use static buffer - **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, - bool use_static) -{ - static struct page *static_buf[NUM_PAGES_TO_ALLOC]; - unsigned long irq_flags; - struct page *p; - struct page **pages_to_free; - unsigned freed_pages = 0, - npages_to_free = nr_free; - - if (NUM_PAGES_TO_ALLOC < nr_free) - npages_to_free = NUM_PAGES_TO_ALLOC; - - if (use_static) - pages_to_free = static_buf; - else - pages_to_free = kmalloc_array(npages_to_free, - sizeof(struct page *), - GFP_KERNEL); - if (!pages_to_free) { - pr_debug("Failed to allocate memory for pool free operation\n"); - return 0; - } - -restart: - spin_lock_irqsave(&pool->lock, irq_flags); - - list_for_each_entry_reverse(p, &pool->list, lru) { - if (freed_pages >= npages_to_free) - break; - - pages_to_free[freed_pages++] = p; - /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ - if (freed_pages >= NUM_PAGES_TO_ALLOC) { - /* remove range of pages from the pool */ - __list_del(p->lru.prev, &pool->list); - - ttm_pool_update_free_locked(pool, freed_pages); - /** - * Because changing page caching is costly - * we unlock the pool to prevent stalling. - */ - spin_unlock_irqrestore(&pool->lock, irq_flags); - - ttm_pages_put(pages_to_free, freed_pages, pool->order); - if (likely(nr_free != FREE_ALL_PAGES)) - nr_free -= freed_pages; - - if (NUM_PAGES_TO_ALLOC >= nr_free) - npages_to_free = nr_free; - else - npages_to_free = NUM_PAGES_TO_ALLOC; - - freed_pages = 0; - - /* free all so restart the processing */ - if (nr_free) - goto restart; - - /* Not allowed to fall through or break because - * following context is inside spinlock while we are - * outside here. - */ - goto out; - - } - } - - /* remove range of pages from the pool */ - if (freed_pages) { - __list_del(&p->lru, &pool->list); - - ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; - } - - spin_unlock_irqrestore(&pool->lock, irq_flags); - - if (freed_pages) - ttm_pages_put(pages_to_free, freed_pages, pool->order); -out: - if (pages_to_free != static_buf) - kfree(pages_to_free); - return nr_free; -} - -/** - * Callback for mm to request pool to reduce number of page held. - * - * XXX: (dchinner) Deadlock warning! - * - * This code is crying out for a shrinker per pool.... - */ -static unsigned long -ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - static DEFINE_MUTEX(lock); - static unsigned start_pool; - unsigned i; - unsigned pool_offset; - struct ttm_page_pool *pool; - int shrink_pages = sc->nr_to_scan; - unsigned long freed = 0; - unsigned int nr_free_pool; - - if (!mutex_trylock(&lock)) - return SHRINK_STOP; - pool_offset = ++start_pool % NUM_POOLS; - /* select start pool in round robin fashion */ - for (i = 0; i < NUM_POOLS; ++i) { - unsigned nr_free = shrink_pages; - unsigned page_nr; - - if (shrink_pages == 0) - break; - - pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - page_nr = (1 << pool->order); - /* OK to use static buffer since global mutex is held. */ - nr_free_pool = roundup(nr_free, page_nr) >> pool->order; - shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); - freed += (nr_free_pool - shrink_pages) << pool->order; - if (freed >= sc->nr_to_scan) - break; - shrink_pages <<= pool->order; - } - mutex_unlock(&lock); - return freed; -} - - -static unsigned long -ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - unsigned i; - unsigned long count = 0; - struct ttm_page_pool *pool; - - for (i = 0; i < NUM_POOLS; ++i) { - pool = &_manager->pools[i]; - count += (pool->npages << pool->order); - } - - return count; -} - -static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) -{ - manager->mm_shrink.count_objects = ttm_pool_shrink_count; - manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; - manager->mm_shrink.seeks = 1; - return register_shrinker(&manager->mm_shrink); -} - -static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) -{ - unregister_shrinker(&manager->mm_shrink); -} - -static int ttm_set_pages_caching(struct page **pages, - enum ttm_caching cstate, unsigned cpages) -{ - int r = 0; - /* Set page caching */ - switch (cstate) { - case ttm_uncached: - r = ttm_set_pages_array_uc(pages, cpages); - if (r) - pr_err("Failed to set %d pages to uc!\n", cpages); - break; - case ttm_write_combined: - r = ttm_set_pages_array_wc(pages, cpages); - if (r) - pr_err("Failed to set %d pages to wc!\n", cpages); - break; - default: - break; - } - return r; -} - -/** - * Free pages the pages that failed to change the caching state. If there is - * any pages that have changed their caching state already put them to the - * pool. - */ -static void ttm_handle_caching_failure(struct page **failed_pages, - unsigned cpages) -{ - unsigned i; - - /* Failed pages have to be freed */ - for (i = 0; i < cpages; ++i) { - list_del(&failed_pages[i]->lru); - __free_page(failed_pages[i]); - } -} - -/** - * Allocate new pages with correct caching. - * - * This function is reentrant if caller updates count depending on number of - * pages returned in pages array. - */ -static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, - int ttm_flags, enum ttm_caching cstate, - unsigned count, unsigned order) -{ - struct page **caching_array; - struct page *p; - int r = 0; - unsigned i, j, cpages; - unsigned npages = 1 << order; - unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); - - /* allocate array for page caching change */ - caching_array = kmalloc_array(max_cpages, sizeof(struct page *), - GFP_KERNEL); - - if (!caching_array) { - pr_debug("Unable to allocate table for new pages\n"); - return -ENOMEM; - } - - for (i = 0, cpages = 0; i < count; ++i) { - p = alloc_pages(gfp_flags, order); - - if (!p) { - pr_debug("Unable to get page %u\n", i); - - /* store already allocated pages in the pool after - * setting the caching state */ - if (cpages) { - r = ttm_set_pages_caching(caching_array, - cstate, cpages); - if (r) - ttm_handle_caching_failure(caching_array, - cpages); - } - r = -ENOMEM; - goto out; - } - - list_add(&p->lru, pages); - -#ifdef CONFIG_HIGHMEM - /* gfp flags of highmem page should never be dma32 so we - * we should be fine in such case - */ - if (PageHighMem(p)) - continue; - -#endif - for (j = 0; j < npages; ++j) { - caching_array[cpages++] = p++; - if (cpages == max_cpages) { - - r = ttm_set_pages_caching(caching_array, - cstate, cpages); - if (r) { - ttm_handle_caching_failure(caching_array, - cpages); - goto out; - } - cpages = 0; - } - } - } - - if (cpages) { - r = ttm_set_pages_caching(caching_array, cstate, cpages); - if (r) - ttm_handle_caching_failure(caching_array, cpages); - } -out: - kfree(caching_array); - - return r; -} - -/** - * Fill the given pool if there aren't enough pages and the requested number of - * pages is small. - */ -static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, - enum ttm_caching cstate, - unsigned count, unsigned long *irq_flags) -{ - struct page *p; - int r; - unsigned cpages = 0; - /** - * Only allow one pool fill operation at a time. - * If pool doesn't have enough pages for the allocation new pages are - * allocated from outside of pool. - */ - if (pool->fill_lock) - return; - - pool->fill_lock = true; - - /* If allocation request is small and there are not enough - * pages in a pool we fill the pool up first. */ - if (count < _manager->options.small - && count > pool->npages) { - struct list_head new_pages; - unsigned alloc_size = _manager->options.alloc_size; - - /** - * Can't change page caching if in irqsave context. We have to - * drop the pool->lock. - */ - spin_unlock_irqrestore(&pool->lock, *irq_flags); - - INIT_LIST_HEAD(&new_pages); - r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, - cstate, alloc_size, 0); - spin_lock_irqsave(&pool->lock, *irq_flags); - - if (!r) { - list_splice(&new_pages, &pool->list); - ++pool->nrefills; - pool->npages += alloc_size; - } else { - pr_debug("Failed to fill pool (%p)\n", pool); - /* If we have any pages left put them to the pool. */ - list_for_each_entry(p, &new_pages, lru) { - ++cpages; - } - list_splice(&new_pages, &pool->list); - pool->npages += cpages; - } - - } - pool->fill_lock = false; -} - -/** - * Allocate pages from the pool and put them on the return list. - * - * @return zero for success or negative error code. - */ -static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, - struct list_head *pages, - int ttm_flags, - enum ttm_caching cstate, - unsigned count, unsigned order) -{ - unsigned long irq_flags; - struct list_head *p; - unsigned i; - int r = 0; - - spin_lock_irqsave(&pool->lock, irq_flags); - if (!order) - ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, - &irq_flags); - - if (count >= pool->npages) { - /* take all pages from the pool */ - list_splice_init(&pool->list, pages); - count -= pool->npages; - pool->npages = 0; - goto out; - } - /* find the last pages to include for requested number of pages. Split - * pool to begin and halve it to reduce search space. */ - if (count <= pool->npages/2) { - i = 0; - list_for_each(p, &pool->list) { - if (++i == count) - break; - } - } else { - i = pool->npages + 1; - list_for_each_prev(p, &pool->list) { - if (--i == count) - break; - } - } - /* Cut 'count' number of pages from the pool */ - list_cut_position(pages, &pool->list, p); - pool->npages -= count; - count = 0; -out: - spin_unlock_irqrestore(&pool->lock, irq_flags); - - /* clear the pages coming from the pool if requested */ - if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - struct page *page; - - list_for_each_entry(page, pages, lru) { - if (PageHighMem(page)) - clear_highpage(page); - else - clear_page(page_address(page)); - } - } - - /* If pool didn't have enough pages allocate new one. */ - if (count) { - gfp_t gfp_flags = pool->gfp_flags; - - /* set zero flag for page allocation if required */ - if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - /* ttm_alloc_new_pages doesn't reference pool so we can run - * multiple requests in parallel. - **/ - r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, - count, order); - } - - return r; -} - -/* Put all pages in pages list to correct pool to wait for reuse */ -static void ttm_put_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching cstate) -{ - struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); -#endif - unsigned long irq_flags; - unsigned i; - - if (pool == NULL) { - /* No pool for this memory type so free the pages */ - i = 0; - while (i < npages) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct page *p = pages[i]; -#endif - unsigned order = 0, j; - - if (!pages[i]) { - ++i; - continue; - } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(flags & TTM_PAGE_FLAG_DMA32) && - (npages - i) >= HPAGE_PMD_NR) { - for (j = 1; j < HPAGE_PMD_NR; ++j) - if (++p != pages[i + j]) - break; - - if (j == HPAGE_PMD_NR) - order = HPAGE_PMD_ORDER; - } -#endif - - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - __free_pages(pages[i], order); - - j = 1 << order; - while (j) { - pages[i++] = NULL; - --j; - } - } - return; - } - - i = 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (huge) { - unsigned max_size, n2free; - - spin_lock_irqsave(&huge->lock, irq_flags); - while ((npages - i) >= HPAGE_PMD_NR) { - struct page *p = pages[i]; - unsigned j; - - if (!p) - break; - - for (j = 1; j < HPAGE_PMD_NR; ++j) - if (++p != pages[i + j]) - break; - - if (j != HPAGE_PMD_NR) - break; - - list_add_tail(&pages[i]->lru, &huge->list); - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[i++] = NULL; - huge->npages++; - } - - /* Check that we don't go over the pool limit */ - max_size = _manager->options.max_size; - max_size /= HPAGE_PMD_NR; - if (huge->npages > max_size) - n2free = huge->npages - max_size; - else - n2free = 0; - spin_unlock_irqrestore(&huge->lock, irq_flags); - if (n2free) - ttm_page_pool_free(huge, n2free, false); - } -#endif - - spin_lock_irqsave(&pool->lock, irq_flags); - while (i < npages) { - if (pages[i]) { - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - list_add_tail(&pages[i]->lru, &pool->list); - pages[i] = NULL; - pool->npages++; - } - ++i; - } - /* Check that we don't go over the pool limit */ - npages = 0; - if (pool->npages > _manager->options.max_size) { - npages = pool->npages - _manager->options.max_size; - /* free at least NUM_PAGES_TO_ALLOC number of pages - * to reduce calls to set_memory_wb */ - if (npages < NUM_PAGES_TO_ALLOC) - npages = NUM_PAGES_TO_ALLOC; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - if (npages) - ttm_page_pool_free(pool, npages, false); -} - -/* - * On success pages list will hold count number of correctly - * cached pages. - */ -static int ttm_get_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching cstate) -{ - struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); -#endif - struct list_head plist; - struct page *p = NULL; - unsigned count, first; - int r; - - /* No pool for cached pages */ - if (pool == NULL) { - gfp_t gfp_flags = GFP_USER; - unsigned i; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - unsigned j; -#endif - - /* set zero flag for page allocation if required */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - if (flags & TTM_PAGE_FLAG_DMA32) - gfp_flags |= GFP_DMA32; - else - gfp_flags |= GFP_HIGHUSER; - - i = 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(gfp_flags & GFP_DMA32)) { - while (npages >= HPAGE_PMD_NR) { - gfp_t huge_flags = gfp_flags; - - huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM; - huge_flags &= ~__GFP_MOVABLE; - huge_flags &= ~__GFP_COMP; - p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); - if (!p) - break; - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[i++] = p++; - - npages -= HPAGE_PMD_NR; - } - } -#endif - - first = i; - while (npages) { - p = alloc_page(gfp_flags); - if (!p) { - pr_debug("Unable to allocate page\n"); - return -ENOMEM; - } - - /* Swap the pages if we detect consecutive order */ - if (i > first && pages[i - 1] == p - 1) - swap(p, pages[i - 1]); - - pages[i++] = p; - --npages; - } - return 0; - } - - count = 0; - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (huge && npages >= HPAGE_PMD_NR) { - INIT_LIST_HEAD(&plist); - ttm_page_pool_get_pages(huge, &plist, flags, cstate, - npages / HPAGE_PMD_NR, - HPAGE_PMD_ORDER); - - list_for_each_entry(p, &plist, lru) { - unsigned j; - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[count++] = &p[j]; - } - } -#endif - - INIT_LIST_HEAD(&plist); - r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, - npages - count, 0); - - first = count; - list_for_each_entry(p, &plist, lru) { - struct page *tmp = p; - - /* Swap the pages if we detect consecutive order */ - if (count > first && pages[count - 1] == tmp - 1) - swap(tmp, pages[count - 1]); - pages[count++] = tmp; - } - - if (r) { - /* If there is any pages in the list put them back to - * the pool. - */ - pr_debug("Failed to allocate extra pages for large request\n"); - ttm_put_pages(pages, count, flags, cstate); - return r; - } - - return 0; -} - -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, - char *name, unsigned int order) -{ - spin_lock_init(&pool->lock); - pool->fill_lock = false; - INIT_LIST_HEAD(&pool->list); - pool->npages = pool->nfrees = 0; - pool->gfp_flags = flags; - pool->name = name; - pool->order = order; -} - -int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) -{ - int ret; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - unsigned order = HPAGE_PMD_ORDER; -#else - unsigned order = 0; -#endif - - WARN_ON(_manager); - - pr_info("Initializing pool allocator\n"); - - _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); - if (!_manager) - return -ENOMEM; - - ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); - - ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); - - ttm_page_pool_init_locked(&_manager->wc_pool_dma32, - GFP_USER | GFP_DMA32, "wc dma", 0); - - ttm_page_pool_init_locked(&_manager->uc_pool_dma32, - GFP_USER | GFP_DMA32, "uc dma", 0); - - ttm_page_pool_init_locked(&_manager->wc_pool_huge, - (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM) & - ~(__GFP_MOVABLE | __GFP_COMP), - "wc huge", order); - - ttm_page_pool_init_locked(&_manager->uc_pool_huge, - (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM) & - ~(__GFP_MOVABLE | __GFP_COMP) - , "uc huge", order); - - _manager->options.max_size = max_pages; - _manager->options.small = SMALL_ALLOCATION; - _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; - - ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, - &glob->kobj, "pool"); - if (unlikely(ret != 0)) - goto error; - - ret = ttm_pool_mm_shrink_init(_manager); - if (unlikely(ret != 0)) - goto error; - return 0; - -error: - kobject_put(&_manager->kobj); - _manager = NULL; - return ret; -} - -void ttm_page_alloc_fini(void) -{ - int i; - - pr_info("Finalizing pool allocator\n"); - ttm_pool_mm_shrink_fini(_manager); - - /* OK to use static buffer since global mutex is no longer used. */ - for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); - - kobject_put(&_manager->kobj); - _manager = NULL; -} - -static void -ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - unsigned i; - - if (mem_count_update == 0) - goto put_pages; - - for (i = 0; i < mem_count_update; ++i) { - if (!ttm->pages[i]) - continue; - - ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); - } - -put_pages: - ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching); -} - -int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - unsigned i; - int ret; - - if (ttm_tt_is_populated(ttm)) - return 0; - - if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) - return -ENOMEM; - - ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching); - if (unlikely(ret != 0)) { - ttm_pool_unpopulate_helper(ttm, 0); - return ret; - } - - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - PAGE_SIZE, ctx); - if (unlikely(ret != 0)) { - ttm_pool_unpopulate_helper(ttm, i); - return -ENOMEM; - } - } - - return 0; -} -EXPORT_SYMBOL(ttm_pool_populate); - -void ttm_pool_unpopulate(struct ttm_tt *ttm) -{ - ttm_pool_unpopulate_helper(ttm, ttm->num_pages); -} -EXPORT_SYMBOL(ttm_pool_unpopulate); - -int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, - struct ttm_operation_ctx *ctx) -{ - unsigned i, j; - int r; - - r = ttm_pool_populate(tt, ctx); - if (r) - return r; - - for (i = 0; i < tt->num_pages; ++i) { - struct page *p = tt->pages[i]; - size_t num_pages = 1; - - for (j = i + 1; j < tt->num_pages; ++j) { - if (++p != tt->pages[j]) - break; - - ++num_pages; - } - - tt->dma_address[i] = dma_map_page(dev, tt->pages[i], - 0, num_pages * PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, tt->dma_address[i])) { - while (i--) { - dma_unmap_page(dev, tt->dma_address[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - tt->dma_address[i] = 0; - } - ttm_pool_unpopulate(tt); - return -EFAULT; - } - - for (j = 1; j < num_pages; ++j) { - tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; - ++i; - } - } - return 0; -} -EXPORT_SYMBOL(ttm_populate_and_map_pages); - -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt) -{ - unsigned i, j; - - for (i = 0; i < tt->num_pages;) { - struct page *p = tt->pages[i]; - size_t num_pages = 1; - - if (!tt->dma_address[i] || !tt->pages[i]) { - ++i; - continue; - } - - for (j = i + 1; j < tt->num_pages; ++j) { - if (++p != tt->pages[j]) - break; - - ++num_pages; - } - - dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, - DMA_BIDIRECTIONAL); - - i += num_pages; - } - ttm_pool_unpopulate(tt); -} -EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); - -int ttm_page_alloc_debugfs(struct seq_file *m, void *data) -{ - struct ttm_page_pool *p; - unsigned i; - char *h[] = {"pool", "refills", "pages freed", "size"}; - if (!_manager) { - seq_printf(m, "No pool allocator running.\n"); - return 0; - } - seq_printf(m, "%7s %12s %13s %8s\n", - h[0], h[1], h[2], h[3]); - for (i = 0; i < NUM_POOLS; ++i) { - p = &_manager->pools[i]; - - seq_printf(m, "%7s %12ld %13ld %8d\n", - p->name, p->nrefills, - p->nfrees, p->npages); - } - return 0; -} -EXPORT_SYMBOL(ttm_page_alloc_debugfs); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c deleted file mode 100644 index c0353c25efd6..000000000000 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ /dev/null @@ -1,1226 +0,0 @@ -/* - * Copyright 2011 (c) Oracle Corp. - - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> - */ - -/* - * A simple DMA pool losely based on dmapool.c. It has certain advantages - * over the DMA pools: - * - Pool collects resently freed pages for reuse (and hooks up to - * the shrinker). - * - Tracks currently in use pages - * - Tracks whether the page is UC, WB or cached (and reverts to WB - * when freed). - */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/dma-mapping.h> -#include <linux/list.h> -#include <linux/seq_file.h> /* for seq_printf */ -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/highmem.h> -#include <linux/mm_types.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/atomic.h> -#include <linux/device.h> -#include <linux/kthread.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> - -#include "ttm_set_memory.h" - -#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) -#define SMALL_ALLOCATION 4 -#define FREE_ALL_PAGES (~0U) -#define VADDR_FLAG_HUGE_POOL 1UL -#define VADDR_FLAG_UPDATED_COUNT 2UL - -enum pool_type { - IS_UNDEFINED = 0, - IS_WC = 1 << 1, - IS_UC = 1 << 2, - IS_CACHED = 1 << 3, - IS_DMA32 = 1 << 4, - IS_HUGE = 1 << 5 -}; - -/* - * The pool structure. There are up to nine pools: - * - generic (not restricted to DMA32): - * - write combined, uncached, cached. - * - dma32 (up to 2^32 - so up 4GB): - * - write combined, uncached, cached. - * - huge (not restricted to DMA32): - * - write combined, uncached, cached. - * for each 'struct device'. The 'cached' is for pages that are actively used. - * The other ones can be shrunk by the shrinker API if neccessary. - * @pools: The 'struct device->dma_pools' link. - * @type: Type of the pool - * @lock: Protects the free_list from concurrnet access. Must be - * used with irqsave/irqrestore variants because pool allocator maybe called - * from delayed work. - * @free_list: Pool of pages that are free to be used. No order requirements. - * @dev: The device that is associated with these pools. - * @size: Size used during DMA allocation. - * @npages_free: Count of available pages for re-use. - * @npages_in_use: Count of pages that are in use. - * @nfrees: Stats when pool is shrinking. - * @nrefills: Stats when the pool is grown. - * @gfp_flags: Flags to pass for alloc_page. - * @name: Name of the pool. - * @dev_name: Name derieved from dev - similar to how dev_info works. - * Used during shutdown as the dev_info during release is unavailable. - */ -struct dma_pool { - struct list_head pools; /* The 'struct device->dma_pools link */ - enum pool_type type; - spinlock_t lock; - struct list_head free_list; - struct device *dev; - unsigned size; - unsigned npages_free; - unsigned npages_in_use; - unsigned long nfrees; /* Stats when shrunk. */ - unsigned long nrefills; /* Stats when grown. */ - gfp_t gfp_flags; - char name[13]; /* "cached dma32" */ - char dev_name[64]; /* Constructed from dev */ -}; - -/* - * The accounting page keeping track of the allocated page along with - * the DMA address. - * @page_list: The link to the 'page_list' in 'struct dma_pool'. - * @vaddr: The virtual address of the page and a flag if the page belongs to a - * huge pool - * @dma: The bus address of the page. If the page is not allocated - * via the DMA API, it will be -1. - */ -struct dma_page { - struct list_head page_list; - unsigned long vaddr; - struct page *p; - dma_addr_t dma; -}; - -/* - * Limits for the pool. They are handled without locks because only place where - * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialization to access them is pointless. - */ - -struct ttm_pool_opts { - unsigned alloc_size; - unsigned max_size; - unsigned small; -}; - -/* - * Contains the list of all of the 'struct device' and their corresponding - * DMA pools. Guarded by _mutex->lock. - * @pools: The link to 'struct ttm_pool_manager->pools' - * @dev: The 'struct device' associated with the 'pool' - * @pool: The 'struct dma_pool' associated with the 'dev' - */ -struct device_pools { - struct list_head pools; - struct device *dev; - struct dma_pool *pool; -}; - -/* - * struct ttm_pool_manager - Holds memory pools for fast allocation - * - * @lock: Lock used when adding/removing from pools - * @pools: List of 'struct device' and 'struct dma_pool' tuples. - * @options: Limits for the pool. - * @npools: Total amount of pools in existence. - * @shrinker: The structure used by [un|]register_shrinker - */ -struct ttm_pool_manager { - struct mutex lock; - struct list_head pools; - struct ttm_pool_opts options; - unsigned npools; - struct shrinker mm_shrink; - struct kobject kobj; -}; - -static struct ttm_pool_manager *_manager; - -static struct attribute ttm_page_pool_max = { - .name = "pool_max_size", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_small = { - .name = "pool_small_allocation", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_alloc_size = { - .name = "pool_allocation_size", - .mode = S_IRUGO | S_IWUSR -}; - -static struct attribute *ttm_pool_attrs[] = { - &ttm_page_pool_max, - &ttm_page_pool_small, - &ttm_page_pool_alloc_size, - NULL -}; - -static void ttm_pool_kobj_release(struct kobject *kobj) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - kfree(m); -} - -static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t size) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - int chars; - unsigned val; - - chars = sscanf(buffer, "%u", &val); - if (chars == 0) - return size; - - /* Convert kb to number of pages */ - val = val / (PAGE_SIZE >> 10); - - if (attr == &ttm_page_pool_max) { - m->options.max_size = val; - } else if (attr == &ttm_page_pool_small) { - m->options.small = val; - } else if (attr == &ttm_page_pool_alloc_size) { - if (val > NUM_PAGES_TO_ALLOC*8) { - pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - return size; - } else if (val > NUM_PAGES_TO_ALLOC) { - pr_warn("Setting allocation size to larger than %lu is not recommended\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - } - m->options.alloc_size = val; - } - - return size; -} - -static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - unsigned val = 0; - - if (attr == &ttm_page_pool_max) - val = m->options.max_size; - else if (attr == &ttm_page_pool_small) - val = m->options.small; - else if (attr == &ttm_page_pool_alloc_size) - val = m->options.alloc_size; - - val = val * (PAGE_SIZE >> 10); - - return snprintf(buffer, PAGE_SIZE, "%u\n", val); -} - -static const struct sysfs_ops ttm_pool_sysfs_ops = { - .show = &ttm_pool_show, - .store = &ttm_pool_store, -}; - -static struct kobj_type ttm_pool_kobj_type = { - .release = &ttm_pool_kobj_release, - .sysfs_ops = &ttm_pool_sysfs_ops, - .default_attrs = ttm_pool_attrs, -}; - -static int ttm_set_pages_caching(struct dma_pool *pool, - struct page **pages, unsigned cpages) -{ - int r = 0; - /* Set page caching */ - if (pool->type & IS_UC) { - r = ttm_set_pages_array_uc(pages, cpages); - if (r) - pr_err("%s: Failed to set %d pages to uc!\n", - pool->dev_name, cpages); - } - if (pool->type & IS_WC) { - r = ttm_set_pages_array_wc(pages, cpages); - if (r) - pr_err("%s: Failed to set %d pages to wc!\n", - pool->dev_name, cpages); - } - return r; -} - -static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) -{ - unsigned long attrs = 0; - dma_addr_t dma = d_page->dma; - d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - if (pool->type & IS_HUGE) - attrs = DMA_ATTR_NO_WARN; - - dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); - - kfree(d_page); - d_page = NULL; -} -static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) -{ - struct dma_page *d_page; - unsigned long attrs = 0; - void *vaddr; - - d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); - if (!d_page) - return NULL; - - if (pool->type & IS_HUGE) - attrs = DMA_ATTR_NO_WARN; - - vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, - pool->gfp_flags, attrs); - if (vaddr) { - if (is_vmalloc_addr(vaddr)) - d_page->p = vmalloc_to_page(vaddr); - else - d_page->p = virt_to_page(vaddr); - d_page->vaddr = (unsigned long)vaddr; - if (pool->type & IS_HUGE) - d_page->vaddr |= VADDR_FLAG_HUGE_POOL; - } else { - kfree(d_page); - d_page = NULL; - } - return d_page; -} -static enum pool_type ttm_to_type(int flags, enum ttm_caching cstate) -{ - enum pool_type type = IS_UNDEFINED; - - if (flags & TTM_PAGE_FLAG_DMA32) - type |= IS_DMA32; - if (cstate == ttm_cached) - type |= IS_CACHED; - else if (cstate == ttm_uncached) - type |= IS_UC; - else - type |= IS_WC; - - return type; -} - -static void ttm_pool_update_free_locked(struct dma_pool *pool, - unsigned freed_pages) -{ - pool->npages_free -= freed_pages; - pool->nfrees += freed_pages; - -} - -/* set memory back to wb and free the pages. */ -static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) -{ - struct page *page = d_page->p; - unsigned num_pages; - - /* Don't set WB on WB page pool. */ - if (!(pool->type & IS_CACHED)) { - num_pages = pool->size / PAGE_SIZE; - if (ttm_set_pages_wb(page, num_pages)) - pr_err("%s: Failed to set %d pages to wb!\n", - pool->dev_name, num_pages); - } - - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); -} - -static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, - struct page *pages[], unsigned npages) -{ - struct dma_page *d_page, *tmp; - - if (pool->type & IS_HUGE) { - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) - ttm_dma_page_put(pool, d_page); - - return; - } - - /* Don't set WB on WB page pool. */ - if (npages && !(pool->type & IS_CACHED) && - ttm_set_pages_array_wb(pages, npages)) - pr_err("%s: Failed to set %d pages to wb!\n", - pool->dev_name, npages); - - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); - } -} - -/* - * Free pages from pool. - * - * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC - * number of pages in one go. - * - * @pool: to free the pages from - * @nr_free: If set to true will free all pages in pool - * @use_static: Safe to use static buffer - **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, - bool use_static) -{ - static struct page *static_buf[NUM_PAGES_TO_ALLOC]; - unsigned long irq_flags; - struct dma_page *dma_p, *tmp; - struct page **pages_to_free; - struct list_head d_pages; - unsigned freed_pages = 0, - npages_to_free = nr_free; - - if (NUM_PAGES_TO_ALLOC < nr_free) - npages_to_free = NUM_PAGES_TO_ALLOC; - - if (use_static) - pages_to_free = static_buf; - else - pages_to_free = kmalloc_array(npages_to_free, - sizeof(struct page *), - GFP_KERNEL); - - if (!pages_to_free) { - pr_debug("%s: Failed to allocate memory for pool free operation\n", - pool->dev_name); - return 0; - } - INIT_LIST_HEAD(&d_pages); -restart: - spin_lock_irqsave(&pool->lock, irq_flags); - - /* We picking the oldest ones off the list */ - list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, - page_list) { - if (freed_pages >= npages_to_free) - break; - - /* Move the dma_page from one list to another. */ - list_move(&dma_p->page_list, &d_pages); - - pages_to_free[freed_pages++] = dma_p->p; - /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ - if (freed_pages >= NUM_PAGES_TO_ALLOC) { - - ttm_pool_update_free_locked(pool, freed_pages); - /** - * Because changing page caching is costly - * we unlock the pool to prevent stalling. - */ - spin_unlock_irqrestore(&pool->lock, irq_flags); - - ttm_dma_pages_put(pool, &d_pages, pages_to_free, - freed_pages); - - INIT_LIST_HEAD(&d_pages); - - if (likely(nr_free != FREE_ALL_PAGES)) - nr_free -= freed_pages; - - if (NUM_PAGES_TO_ALLOC >= nr_free) - npages_to_free = nr_free; - else - npages_to_free = NUM_PAGES_TO_ALLOC; - - freed_pages = 0; - - /* free all so restart the processing */ - if (nr_free) - goto restart; - - /* Not allowed to fall through or break because - * following context is inside spinlock while we are - * outside here. - */ - goto out; - - } - } - - /* remove range of pages from the pool */ - if (freed_pages) { - ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; - } - - spin_unlock_irqrestore(&pool->lock, irq_flags); - - if (freed_pages) - ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); -out: - if (pages_to_free != static_buf) - kfree(pages_to_free); - return nr_free; -} - -static void ttm_dma_free_pool(struct device *dev, enum pool_type type) -{ - struct device_pools *p; - struct dma_pool *pool; - - if (!dev) - return; - - mutex_lock(&_manager->lock); - list_for_each_entry_reverse(p, &_manager->pools, pools) { - if (p->dev != dev) - continue; - pool = p->pool; - if (pool->type != type) - continue; - - list_del(&p->pools); - kfree(p); - _manager->npools--; - break; - } - list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { - if (pool->type != type) - continue; - /* Takes a spinlock.. */ - /* OK to use static buffer since global mutex is held. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); - WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); - /* This code path is called after _all_ references to the - * struct device has been dropped - so nobody should be - * touching it. In case somebody is trying to _add_ we are - * guarded by the mutex. */ - list_del(&pool->pools); - kfree(pool); - break; - } - mutex_unlock(&_manager->lock); -} - -/* - * On free-ing of the 'struct device' this deconstructor is run. - * Albeit the pool might have already been freed earlier. - */ -static void ttm_dma_pool_release(struct device *dev, void *res) -{ - struct dma_pool *pool = *(struct dma_pool **)res; - - if (pool) - ttm_dma_free_pool(dev, pool->type); -} - -static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) -{ - return *(struct dma_pool **)res == match_data; -} - -static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, - enum pool_type type) -{ - const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; - enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE}; - struct device_pools *sec_pool = NULL; - struct dma_pool *pool = NULL, **ptr; - unsigned i; - int ret = -ENODEV; - char *p; - - if (!dev) - return NULL; - - ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - ret = -ENOMEM; - - pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, - dev_to_node(dev)); - if (!pool) - goto err_mem; - - sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, - dev_to_node(dev)); - if (!sec_pool) - goto err_mem; - - INIT_LIST_HEAD(&sec_pool->pools); - sec_pool->dev = dev; - sec_pool->pool = pool; - - INIT_LIST_HEAD(&pool->free_list); - INIT_LIST_HEAD(&pool->pools); - spin_lock_init(&pool->lock); - pool->dev = dev; - pool->npages_free = pool->npages_in_use = 0; - pool->nfrees = 0; - pool->gfp_flags = flags; - if (type & IS_HUGE) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - pool->size = HPAGE_PMD_SIZE; -#else - BUG(); -#endif - else - pool->size = PAGE_SIZE; - pool->type = type; - pool->nrefills = 0; - p = pool->name; - for (i = 0; i < ARRAY_SIZE(t); i++) { - if (type & t[i]) { - p += scnprintf(p, sizeof(pool->name) - (p - pool->name), - "%s", n[i]); - } - } - *p = 0; - /* We copy the name for pr_ calls b/c when dma_pool_destroy is called - * - the kobj->name has already been deallocated.*/ - snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", - dev_driver_string(dev), dev_name(dev)); - mutex_lock(&_manager->lock); - /* You can get the dma_pool from either the global: */ - list_add(&sec_pool->pools, &_manager->pools); - _manager->npools++; - /* or from 'struct device': */ - list_add(&pool->pools, &dev->dma_pools); - mutex_unlock(&_manager->lock); - - *ptr = pool; - devres_add(dev, ptr); - - return pool; -err_mem: - devres_free(ptr); - kfree(sec_pool); - kfree(pool); - return ERR_PTR(ret); -} - -static struct dma_pool *ttm_dma_find_pool(struct device *dev, - enum pool_type type) -{ - struct dma_pool *pool, *tmp; - - if (type == IS_UNDEFINED) - return NULL; - - /* NB: We iterate on the 'struct dev' which has no spinlock, but - * it does have a kref which we have taken. The kref is taken during - * graphic driver loading - in the drm_pci_init it calls either - * pci_dev_get or pci_register_driver which both end up taking a kref - * on 'struct device'. - * - * On teardown, the graphic drivers end up quiescing the TTM (put_pages) - * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice - * thing is at that point of time there are no pages associated with the - * driver so this function will not be called. - */ - list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) - if (pool->type == type) - return pool; - return NULL; -} - -/* - * Free pages the pages that failed to change the caching state. If there - * are pages that have changed their caching state already put them to the - * pool. - */ -static void ttm_dma_handle_caching_failure(struct dma_pool *pool, - struct list_head *d_pages, - struct page **failed_pages, - unsigned cpages) -{ - struct dma_page *d_page, *tmp; - struct page *p; - unsigned i = 0; - - p = failed_pages[0]; - if (!p) - return; - /* Find the failed page. */ - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { - if (d_page->p != p) - continue; - /* .. and then progress over the full list. */ - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); - if (++i < cpages) - p = failed_pages[i]; - else - break; - } - -} - -/* - * Allocate 'count' pages, and put 'need' number of them on the - * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. - * The full list of pages should also be on 'd_pages'. - * We return zero for success, and negative numbers as errors. - */ -static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, - struct list_head *d_pages, - unsigned count) -{ - struct page **caching_array; - struct dma_page *dma_p; - struct page *p; - int r = 0; - unsigned i, j, npages, cpages; - unsigned max_cpages = min(count, - (unsigned)(PAGE_SIZE/sizeof(struct page *))); - - /* allocate array for page caching change */ - caching_array = kmalloc_array(max_cpages, sizeof(struct page *), - GFP_KERNEL); - - if (!caching_array) { - pr_debug("%s: Unable to allocate table for new pages\n", - pool->dev_name); - return -ENOMEM; - } - - if (count > 1) - pr_debug("%s: (%s:%d) Getting %d pages\n", - pool->dev_name, pool->name, current->pid, count); - - for (i = 0, cpages = 0; i < count; ++i) { - dma_p = __ttm_dma_alloc_page(pool); - if (!dma_p) { - pr_debug("%s: Unable to get page %u\n", - pool->dev_name, i); - - /* store already allocated pages in the pool after - * setting the caching state */ - if (cpages) { - r = ttm_set_pages_caching(pool, caching_array, - cpages); - if (r) - ttm_dma_handle_caching_failure( - pool, d_pages, caching_array, - cpages); - } - r = -ENOMEM; - goto out; - } - p = dma_p->p; - list_add(&dma_p->page_list, d_pages); - -#ifdef CONFIG_HIGHMEM - /* gfp flags of highmem page should never be dma32 so we - * we should be fine in such case - */ - if (PageHighMem(p)) - continue; -#endif - - npages = pool->size / PAGE_SIZE; - for (j = 0; j < npages; ++j) { - caching_array[cpages++] = p + j; - if (cpages == max_cpages) { - /* Note: Cannot hold the spinlock */ - r = ttm_set_pages_caching(pool, caching_array, - cpages); - if (r) { - ttm_dma_handle_caching_failure( - pool, d_pages, caching_array, - cpages); - goto out; - } - cpages = 0; - } - } - } - - if (cpages) { - r = ttm_set_pages_caching(pool, caching_array, cpages); - if (r) - ttm_dma_handle_caching_failure(pool, d_pages, - caching_array, cpages); - } -out: - kfree(caching_array); - return r; -} - -/* - * @return count of pages still required to fulfill the request. - */ -static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, - unsigned long *irq_flags) -{ - unsigned count = _manager->options.small; - int r = pool->npages_free; - - if (count > pool->npages_free) { - struct list_head d_pages; - - INIT_LIST_HEAD(&d_pages); - - spin_unlock_irqrestore(&pool->lock, *irq_flags); - - /* Returns how many more are neccessary to fulfill the - * request. */ - r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); - - spin_lock_irqsave(&pool->lock, *irq_flags); - if (!r) { - /* Add the fresh to the end.. */ - list_splice(&d_pages, &pool->free_list); - ++pool->nrefills; - pool->npages_free += count; - r = count; - } else { - struct dma_page *d_page; - unsigned cpages = 0; - - pr_debug("%s: Failed to fill %s pool (r:%d)!\n", - pool->dev_name, pool->name, r); - - list_for_each_entry(d_page, &d_pages, page_list) { - cpages++; - } - list_splice_tail(&d_pages, &pool->free_list); - pool->npages_free += cpages; - r = cpages; - } - } - return r; -} - -/* - * The populate list is actually a stack (not that is matters as TTM - * allocates one page at a time. - * return dma_page pointer if success, otherwise NULL. - */ -static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, - struct ttm_tt *ttm, - unsigned index) -{ - struct dma_page *d_page = NULL; - unsigned long irq_flags; - int count; - - spin_lock_irqsave(&pool->lock, irq_flags); - count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); - if (count) { - d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); - ttm->pages[index] = d_page->p; - ttm->dma_address[index] = d_page->dma; - list_move_tail(&d_page->page_list, &ttm->pages_list); - pool->npages_in_use += 1; - pool->npages_free -= 1; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - return d_page; -} - -static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge) -{ - gfp_t gfp_flags; - - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - gfp_flags = GFP_USER | GFP_DMA32; - else - gfp_flags = GFP_HIGHUSER; - if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (huge) { - gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM; - gfp_flags &= ~__GFP_MOVABLE; - gfp_flags &= ~__GFP_COMP; - } - - if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - return gfp_flags; -} - -/* - * On success pages list will hold count number of correctly - * cached pages. On failure will hold the negative return value (-ENOMEM, etc). - */ -int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev, - struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - unsigned long num_pages = ttm->num_pages; - struct dma_pool *pool; - struct dma_page *d_page; - enum pool_type type; - unsigned i; - int ret; - - if (ttm_tt_is_populated(ttm)) - return 0; - - if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) - return -ENOMEM; - - INIT_LIST_HEAD(&ttm->pages_list); - i = 0; - - type = ttm_to_type(ttm->page_flags, ttm->caching); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - goto skip_huge; - - pool = ttm_dma_find_pool(dev, type | IS_HUGE); - if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true); - - pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); - if (IS_ERR_OR_NULL(pool)) - goto skip_huge; - } - - while (num_pages >= HPAGE_PMD_NR) { - unsigned j; - - d_page = ttm_dma_pool_get_pages(pool, ttm, i); - if (!d_page) - break; - - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, ctx); - if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm, dev); - return -ENOMEM; - } - - d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; - for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { - ttm->pages[j] = ttm->pages[j - 1] + 1; - ttm->dma_address[j] = ttm->dma_address[j - 1] + - PAGE_SIZE; - } - - i += HPAGE_PMD_NR; - num_pages -= HPAGE_PMD_NR; - } - -skip_huge: -#endif - - pool = ttm_dma_find_pool(dev, type); - if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false); - - pool = ttm_dma_pool_init(dev, gfp_flags, type); - if (IS_ERR_OR_NULL(pool)) - return -ENOMEM; - } - - while (num_pages) { - d_page = ttm_dma_pool_get_pages(pool, ttm, i); - if (!d_page) { - ttm_dma_unpopulate(ttm, dev); - return -ENOMEM; - } - - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, ctx); - if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm, dev); - return -ENOMEM; - } - - d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; - ++i; - --num_pages; - } - - return 0; -} -EXPORT_SYMBOL_GPL(ttm_dma_populate); - -/* Put all pages in pages list to correct pool to wait for reuse */ -void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - struct dma_pool *pool; - struct dma_page *d_page, *next; - enum pool_type type; - bool is_cached = false; - unsigned count, i, npages = 0; - unsigned long irq_flags; - - type = ttm_to_type(ttm->page_flags, ttm->caching); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - pool = ttm_dma_find_pool(dev, type | IS_HUGE); - if (pool) { - count = 0; - list_for_each_entry_safe(d_page, next, &ttm->pages_list, - page_list) { - if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) - continue; - - count++; - if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { - ttm_mem_global_free_page(mem_glob, d_page->p, - pool->size); - d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; - } - ttm_dma_page_put(pool, d_page); - } - - spin_lock_irqsave(&pool->lock, irq_flags); - pool->npages_in_use -= count; - pool->nfrees += count; - spin_unlock_irqrestore(&pool->lock, irq_flags); - } -#endif - - pool = ttm_dma_find_pool(dev, type); - if (!pool) - return; - - is_cached = (ttm_dma_find_pool(pool->dev, - ttm_to_type(ttm->page_flags, ttm_cached)) == pool); - - /* make sure pages array match list and count number of pages */ - count = 0; - list_for_each_entry_safe(d_page, next, &ttm->pages_list, - page_list) { - ttm->pages[count] = d_page->p; - count++; - - if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { - ttm_mem_global_free_page(mem_glob, d_page->p, - pool->size); - d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; - } - - if (is_cached) - ttm_dma_page_put(pool, d_page); - } - - spin_lock_irqsave(&pool->lock, irq_flags); - pool->npages_in_use -= count; - if (is_cached) { - pool->nfrees += count; - } else { - pool->npages_free += count; - list_splice(&ttm->pages_list, &pool->free_list); - /* - * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages - * to free in order to minimize calls to set_memory_wb(). - */ - if (pool->npages_free >= (_manager->options.max_size + - NUM_PAGES_TO_ALLOC)) - npages = pool->npages_free - _manager->options.max_size; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - - INIT_LIST_HEAD(&ttm->pages_list); - for (i = 0; i < ttm->num_pages; i++) { - ttm->pages[i] = NULL; - ttm->dma_address[i] = 0; - } - - /* shrink pool if necessary (only on !is_cached pools)*/ - if (npages) - ttm_dma_page_pool_free(pool, npages, false); -} -EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); - -/** - * Callback for mm to request pool to reduce number of page held. - * - * XXX: (dchinner) Deadlock warning! - * - * I'm getting sadder as I hear more pathetical whimpers about needing per-pool - * shrinkers - */ -static unsigned long -ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - static unsigned start_pool; - unsigned idx = 0; - unsigned pool_offset; - unsigned shrink_pages = sc->nr_to_scan; - struct device_pools *p; - unsigned long freed = 0; - - if (list_empty(&_manager->pools)) - return SHRINK_STOP; - - if (!mutex_trylock(&_manager->lock)) - return SHRINK_STOP; - if (!_manager->npools) - goto out; - pool_offset = ++start_pool % _manager->npools; - list_for_each_entry(p, &_manager->pools, pools) { - unsigned nr_free; - - if (!p->dev) - continue; - if (shrink_pages == 0) - break; - /* Do it in round-robin fashion. */ - if (++idx < pool_offset) - continue; - nr_free = shrink_pages; - /* OK to use static buffer since global mutex is held. */ - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); - freed += nr_free - shrink_pages; - - pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", - p->pool->dev_name, p->pool->name, current->pid, - nr_free, shrink_pages); - } -out: - mutex_unlock(&_manager->lock); - return freed; -} - -static unsigned long -ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - struct device_pools *p; - unsigned long count = 0; - - if (!mutex_trylock(&_manager->lock)) - return 0; - list_for_each_entry(p, &_manager->pools, pools) - count += p->pool->npages_free; - mutex_unlock(&_manager->lock); - return count; -} - -static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) -{ - manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; - manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; - manager->mm_shrink.seeks = 1; - return register_shrinker(&manager->mm_shrink); -} - -static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) -{ - unregister_shrinker(&manager->mm_shrink); -} - -int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) -{ - int ret; - - WARN_ON(_manager); - - pr_info("Initializing DMA pool allocator\n"); - - _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); - if (!_manager) - return -ENOMEM; - - mutex_init(&_manager->lock); - INIT_LIST_HEAD(&_manager->pools); - - _manager->options.max_size = max_pages; - _manager->options.small = SMALL_ALLOCATION; - _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; - - /* This takes care of auto-freeing the _manager */ - ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, - &glob->kobj, "dma_pool"); - if (unlikely(ret != 0)) - goto error; - - ret = ttm_dma_pool_mm_shrink_init(_manager); - if (unlikely(ret != 0)) - goto error; - return 0; - -error: - kobject_put(&_manager->kobj); - _manager = NULL; - return ret; -} - -void ttm_dma_page_alloc_fini(void) -{ - struct device_pools *p, *t; - - pr_info("Finalizing DMA pool allocator\n"); - ttm_dma_pool_mm_shrink_fini(_manager); - - list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { - dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, - current->pid); - WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, - ttm_dma_pool_match, p->pool)); - ttm_dma_free_pool(p->dev, p->pool->type); - } - kobject_put(&_manager->kobj); - _manager = NULL; -} - -int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) -{ - struct device_pools *p; - struct dma_pool *pool = NULL; - - if (!_manager) { - seq_printf(m, "No pool allocator running.\n"); - return 0; - } - seq_printf(m, " pool refills pages freed inuse available name\n"); - mutex_lock(&_manager->lock); - list_for_each_entry(p, &_manager->pools, pools) { - struct device *dev = p->dev; - if (!dev) - continue; - pool = p->pool; - seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", - pool->name, pool->nrefills, - pool->nfrees, pool->npages_in_use, - pool->npages_free, - pool->dev_name); - } - mutex_unlock(&_manager->lock); - return 0; -} -EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); diff --git a/drivers/gpu/drm/ttm/ttm_set_memory.h b/drivers/gpu/drm/ttm/ttm_set_memory.h deleted file mode 100644 index 2343c18a6133..000000000000 --- a/drivers/gpu/drm/ttm/ttm_set_memory.h +++ /dev/null @@ -1,84 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2018 Advanced Micro Devices, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Huang Rui <ray.huang@amd.com> - */ - -#ifndef TTM_SET_MEMORY -#define TTM_SET_MEMORY - -#include <linux/mm.h> - -#ifdef CONFIG_X86 - -#include <asm/set_memory.h> - -static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) -{ - return set_pages_array_wb(pages, addrinarray); -} - -static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) -{ - return set_pages_array_wc(pages, addrinarray); -} - -static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) -{ - return set_pages_array_uc(pages, addrinarray); -} - -static inline int ttm_set_pages_wb(struct page *page, int numpages) -{ - return set_pages_wb(page, numpages); -} - -#else /* for CONFIG_X86 */ - -static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) -{ - return 0; -} - -static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) -{ - return 0; -} - -static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) -{ - return 0; -} - -static inline int ttm_set_pages_wb(struct page *page, int numpages) -{ - return 0; -} - -#endif /* for CONFIG_X86 */ - -#endif diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 90054280cd8f..8861a74ac335 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -51,9 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) if (bo->ttm) return 0; - if (bdev->need_dma32) - page_flags |= TTM_PAGE_FLAG_DMA32; - if (bdev->no_retry) page_flags |= TTM_PAGE_FLAG_NO_RETRY; @@ -141,7 +138,6 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ttm->dma_address = NULL; ttm->swap_storage = NULL; ttm->sg = bo->sg; - INIT_LIST_HEAD(&ttm->pages_list); ttm->caching = caching; } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 45ae87640909..e9f683fa72dc 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -315,8 +315,6 @@ struct ttm_bo_device { struct delayed_work wq; - bool need_dma32; - bool no_retry; }; diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h deleted file mode 100644 index 8fa1e7df6213..000000000000 --- a/include/drm/ttm/ttm_page_alloc.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) Red Hat Inc. - - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie <airlied@redhat.com> - * Jerome Glisse <jglisse@redhat.com> - */ -#ifndef TTM_PAGE_ALLOC -#define TTM_PAGE_ALLOC - -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_memory.h> - -struct device; - -/** - * Initialize pool allocator. - */ -int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); -/** - * Free pool allocator. - */ -void ttm_page_alloc_fini(void); - -/** - * ttm_pool_populate: - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Add backing pages to all of @ttm - */ -int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); - -/** - * ttm_pool_unpopulate: - * - * @ttm: The struct ttm_tt which to free backing pages. - * - * Free all pages of @ttm - */ -void ttm_pool_unpopulate(struct ttm_tt *ttm); - -/** - * Populates and DMA maps pages to fullfil a ttm_dma_populate() request - */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, - struct ttm_operation_ctx *ctx); - -/** - * Unpopulates and DMA unmaps pages as part of a - * ttm_dma_unpopulate() request */ -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt); - -/** - * Output the state of pools to debugfs file - */ -int ttm_page_alloc_debugfs(struct seq_file *m, void *data); - -#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL) -/** - * Initialize pool allocator. - */ -int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); - -/** - * Free pool allocator. - */ -void ttm_dma_page_alloc_fini(void); - -/** - * Output the state of pools to debugfs file - */ -int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); - -int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev, - struct ttm_operation_ctx *ctx); -void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev); - -#else -static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, - unsigned max_pages) -{ - return -ENODEV; -} - -static inline void ttm_dma_page_alloc_fini(void) { return; } - -static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) -{ - return 0; -} -static inline int ttm_dma_populate(struct ttm_tt *ttm_dma, - struct device *dev, - struct ttm_operation_ctx *ctx) -{ - return -ENOMEM; -} -static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, - struct device *dev) -{ -} -#endif - -#endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index df9a80650feb..da27e9d8fa64 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -37,7 +37,6 @@ struct ttm_operation_ctx; #define TTM_PAGE_FLAG_SWAPPED (1 << 4) #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) -#define TTM_PAGE_FLAG_DMA32 (1 << 7) #define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_PAGE_FLAG_NO_RETRY (1 << 9) @@ -66,7 +65,6 @@ struct ttm_tt { struct sg_table *sg; dma_addr_t *dma_address; struct file *swap_storage; - struct list_head pages_list; enum ttm_caching caching; }; -- cgit v1.2.3 From fa3bfa3527b31562dba28d49c0a18b722b42e226 Mon Sep 17 00:00:00 2001 From: Chris Wilson <chris@chris-wilson.co.uk> Date: Thu, 29 Oct 2020 21:30:42 +0000 Subject: drm: Quieten [zero] EDID carping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a few displays in CI that always report their EDID as a bunch of zeroes. This is consistent behaviour, so one assumes intentional indication of an "absent" EDID. Flagging these consistent warnings detracts from CI. One option would be to ignore the zero EDIDs as intentional behaviour, but Ville would like to keep the information available for debugging. The simple alternative then is to reduce the loglevel for all the EDID dumping from WARN to DEBUG so the information is present but not annoy CI. Note that the bad EDID dumping is already only shown if drm.debug=KMS, it's just the loglevel chosen was set to be caught by CI if it ever occurred as it was expected to be an internal error not external. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2203 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201029213042.11672-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/drm_edid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6840f0530a38..7060152a5daa 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1844,7 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector, if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; - drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name); + drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name); for (i = 0; i < num_blocks; i++) { u8 *block = edid + i * EDID_LENGTH; char prefix[20]; @@ -1856,7 +1856,7 @@ static void connector_bad_edid(struct drm_connector *connector, else sprintf(prefix, "\t[%02x] GOOD ", i); - print_hex_dump(KERN_WARNING, + print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 1, block, EDID_LENGTH, false); } -- cgit v1.2.3 From 728da60da7c1ec1e21ae64648e376666de3c279c Mon Sep 17 00:00:00 2001 From: Robin Murphy <robin.murphy@arm.com> Date: Tue, 22 Sep 2020 15:16:48 +0100 Subject: iommu/io-pgtable-arm: Support coherency for Mali LPAE Midgard GPUs have ACE-Lite master interfaces which allows systems to integrate them in an I/O-coherent manner. It seems that from the GPU's viewpoint, the rest of the system is its outer shareable domain, and so even when snoop signals are wired up, they are only emitted for outer shareable accesses. As such, setting the TTBR_SHARE_OUTER bit does indeed get coherent pagetable walks working nicely for the coherent T620 in the Arm Juno SoC. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Neil Armstrong <narmstrong@baylibre.com> Reviewed-by: Steven Price <steven.price@arm.com> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com> Link: https://patchwork.freedesktop.org/patch/msgid/8df778355378127ea7eccc9521d6427e3e48d4f2.1600780574.git.robin.murphy@arm.com --- drivers/iommu/io-pgtable-arm.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index dc7bcf858b6d..b4072a18e45d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -440,7 +440,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, << ARM_LPAE_PTE_ATTRINDX_SHIFT); } - if (prot & IOMMU_CACHE) + /* + * Also Mali has its own notions of shareability wherein its Inner + * domain covers the cores within the GPU, and its Outer domain is + * "outside the GPU" (i.e. either the Inner or System domain in CPU + * terms, depending on coherency). + */ + if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) pte |= ARM_LPAE_PTE_SH_IS; else pte |= ARM_LPAE_PTE_SH_OS; @@ -1049,6 +1055,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | ARM_MALI_LPAE_TTBR_READ_INNER | ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; + if (cfg->coherent_walk) + cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; + return &data->iop; out_free_data: -- cgit v1.2.3 From 268af50f38b1f2199a2e85e38073d7a25c20190c Mon Sep 17 00:00:00 2001 From: Robin Murphy <robin.murphy@arm.com> Date: Tue, 22 Sep 2020 15:16:49 +0100 Subject: drm/panfrost: Support cache-coherent integrations When the GPU's ACE-Lite interface is fully wired up and capable of snooping CPU caches, it may be described as "dma-coherent" in devicetree, which will already inform the DMA layer not to perform unnecessary cache maintenance. However, we still need to ensure that the GPU uses the appropriate cacheable outer-shareable attributes in order to generate the requisite snoop signals, and that CPU mappings don't create a mismatch by using a non-cacheable type either. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Neil Armstrong <narmstrong@baylibre.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com> Link: https://patchwork.freedesktop.org/patch/msgid/7024ce18c1cb1a226e918037d49175571db0b436.1600780574.git.robin.murphy@arm.com --- drivers/gpu/drm/panfrost/panfrost_device.h | 1 + drivers/gpu/drm/panfrost/panfrost_drv.c | 2 ++ drivers/gpu/drm/panfrost/panfrost_gem.c | 2 ++ drivers/gpu/drm/panfrost/panfrost_mmu.c | 1 + 4 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 2e9cbd1c4a58..140e004a3790 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -88,6 +88,7 @@ struct panfrost_device { /* pm_domains for devices with more than one. */ struct device *pm_domain_devs[MAX_PM_DOMAINS]; struct device_link *pm_domain_links[MAX_PM_DOMAINS]; + bool coherent; struct panfrost_features features; const struct panfrost_compatible *comp; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 37d4cb7a5491..6e5dedacb777 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev) if (!pfdev->comp) return -ENODEV; + pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; + /* Allocate and initialze the DRM device. */ ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); if (IS_ERR(ddev)) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 33355dd302f1..cdf1a8754eba 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -220,6 +220,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { + struct panfrost_device *pfdev = dev->dev_private; struct panfrost_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); @@ -229,6 +230,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t INIT_LIST_HEAD(&obj->mappings.list); mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; + obj->base.map_cached = pfdev->coherent; return &obj->base.base; } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e8f7b11352d2..8852fd378f7a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) .pgsize_bitmap = SZ_4K | SZ_2M, .ias = FIELD_GET(0xff, pfdev->features.mmu_features), .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .coherent_walk = pfdev->coherent, .tlb = &mmu_tlb_ops, .iommu_dev = pfdev->dev, }; -- cgit v1.2.3 From 03544505cb10ddc73df3b6176e71cdb366834134 Mon Sep 17 00:00:00 2001 From: Robin Murphy <robin.murphy@arm.com> Date: Tue, 22 Sep 2020 15:16:50 +0100 Subject: arm64: dts: meson: Describe G12b GPU as coherent According to a downstream commit I found in the Khadas vendor kernel, the GPU on G12b is wired up for ACE-lite, so (now that Panfrost knows how to handle this properly) we should describe it as such. Otherwise the mismatch leads to all manner of fun with mismatched attributes and inadvertently snooping stale data from caches, which would account for at least some of the brokenness observed on this platform. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Neil Armstrong <narmstrong@baylibre.com> Reviewed-by: Neil Armstrong <narmstrong@baylibre.com> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com> Link: https://patchwork.freedesktop.org/patch/msgid/765446e529e50b304af63432da7836c4d31eb8d4.1600780574.git.robin.murphy@arm.com --- arch/arm64/boot/dts/amlogic/meson-g12b.dtsi | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi index 9b8548e5f6e5..ee8fcae9f9f0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi @@ -135,3 +135,7 @@ }; }; }; + +&mali { + dma-coherent; +}; -- cgit v1.2.3 From a17d609e3e216c406f7c0cec2a94086a4401ac06 Mon Sep 17 00:00:00 2001 From: Steven Price <steven.price@arm.com> Date: Thu, 29 Oct 2020 17:00:47 +0000 Subject: drm/panfrost: Don't corrupt the queue mutex on open/close The mutex within the panfrost_queue_state should have the lifetime of the queue, however it was erroneously initialised/destroyed during panfrost_job_{open,close} which is called every time a client opens/closes the drm node. Move the initialisation/destruction to panfrost_job_{init,fini} where it belongs. Fixes: 1a11a88cfd9a ("drm/panfrost: Fix job timeout handling") Signed-off-by: Steven Price <steven.price@arm.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201029170047.30564-1-steven.price@arm.com --- drivers/gpu/drm/panfrost/panfrost_job.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index d0469e944143..4902bc6624c8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -554,6 +554,8 @@ int panfrost_job_init(struct panfrost_device *pfdev) } for (j = 0; j < NUM_JOB_SLOTS; j++) { + mutex_init(&js->queue[j].lock); + js->queue[j].fence_context = dma_fence_context_alloc(1); ret = drm_sched_init(&js->queue[j].sched, @@ -584,8 +586,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, 0); - for (j = 0; j < NUM_JOB_SLOTS; j++) + for (j = 0; j < NUM_JOB_SLOTS; j++) { drm_sched_fini(&js->queue[j].sched); + mutex_destroy(&js->queue[j].lock); + } } @@ -597,7 +601,6 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) int ret, i; for (i = 0; i < NUM_JOB_SLOTS; i++) { - mutex_init(&js->queue[i].lock); sched = &js->queue[i].sched; ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], DRM_SCHED_PRIORITY_NORMAL, &sched, @@ -614,10 +617,8 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) struct panfrost_job_slot *js = pfdev->js; int i; - for (i = 0; i < NUM_JOB_SLOTS; i++) { + for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); - mutex_destroy(&js->queue[i].lock); - } } int panfrost_job_is_idle(struct panfrost_device *pfdev) -- cgit v1.2.3 From 4dbafbd30aefcc1e56086875c5aa940490c0a9c3 Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Fri, 30 Oct 2020 12:11:38 +0100 Subject: drm/nouveu: fix swiotlb include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The check for swiotlb has moved to nouveu_ttm.c, but we forgot to move the include as well. This blows up only when merged with linux-next, not sure why drm-misc-next works stand alone. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/397835/ --- drivers/gpu/drm/nouveau/nouveau_bo.c | 1 - drivers/gpu/drm/nouveau/nouveau_ttm.c | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 746c06ed195b..8133377d865d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -28,7 +28,6 @@ */ #include <linux/dma-mapping.h> -#include <linux/swiotlb.h> #include "nouveau_drv.h" #include "nouveau_chan.h" diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index d696d882c9eb..2bf36229dd57 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -22,6 +22,9 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#include <linux/swiotlb.h> + #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" -- cgit v1.2.3 From dbc8c28a742d70644341148b436facb64e56641b Mon Sep 17 00:00:00 2001 From: Tian Tao <tiantao6@hisilicon.com> Date: Thu, 29 Oct 2020 15:11:45 +0800 Subject: drm/hisilicon: Adding a const declaration to an invariant construct Some constructs cannot be changed after being assigned a value, so add const declarations to invariant constructs. Signed-off-by: Tian Tao <tiantao6@hisilicon.com> Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/1603955505-43942-1-git-send-email-tiantao6@hisilicon.com --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c | 2 +- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index a1eabadf5adb..ef18b4787195 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -139,7 +139,7 @@ static const u32 channel_formats1[] = { DRM_FORMAT_ABGR8888 }; -static struct drm_plane_funcs hibmc_plane_funcs = { +static const struct drm_plane_funcs hibmc_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 0c1b40d25ac4..fee6fe810e74 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -369,7 +369,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev) drm_dev_put(dev); } -static struct pci_device_id hibmc_pci_table[] = { +static const struct pci_device_id hibmc_pci_table[] = { { PCI_VDEVICE(HUAWEI, 0x1711) }, {0,} }; -- cgit v1.2.3 From d23bce963e7f17738374d700f0dc5464c5f7cba2 Mon Sep 17 00:00:00 2001 From: Linus Walleij <linus.walleij@linaro.org> Date: Fri, 30 Oct 2020 01:28:50 +0100 Subject: fbdev/sh_mobile: Drop unused include The driver includes <linux/gpio.h> but doesn't use any symbols from this file. Cc: Magnus Damm <magnus.damm@gmail.com> Cc: Geert Uytterhoeven <geert+renesas@glider.be> Cc: linux-renesas-soc@vger.kernel.org Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> # on r8a77440/armadillo, where this is the last piece of non-DT support. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Link: https://patchwork.freedesktop.org/patch/msgid/20201030002850.6495-1-linus.walleij@linaro.org --- drivers/video/fbdev/sh_mobile_lcdcfb.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index c0952cc96bdb..aa4ebe3192ec 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -16,7 +16,6 @@ #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/fbcon.h> -#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioctl.h> -- cgit v1.2.3 From 95f4f40a085d899e4fe3c5cbac2708368dbe4277 Mon Sep 17 00:00:00 2001 From: Maxime Ripard <maxime@cerno.tech> Date: Mon, 2 Nov 2020 11:50:50 +0100 Subject: drm/nouveau/ttm: Add limits.h It seems that a recent commit broke the nouveau compilation when swiotlb is disabled (which is the case on our ARM defconfig for example). Daniel says """ Since the proper fix is maybe stuck in the usual "drm abuses swiotlb internals" bikeshed, maybe best if we push a fix to including limits.h in nouveau and call it done? """ So let's go down the simplest path to fix our build, and goes back to it later if needed. Link: https://patchwork.freedesktop.org/patch/397835/ Fixes: 4dbafbd30aef ("drm/nouveu: fix swiotlb include") Acked-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Maxime Ripard <maxime@cerno.tech> --- drivers/gpu/drm/nouveau/nouveau_ttm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 2bf36229dd57..a37bc3d7b38b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -23,6 +23,7 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/limits.h> #include <linux/swiotlb.h> #include "nouveau_drv.h" -- cgit v1.2.3 From 29b77ad7b9ca8c87152a1a9e8188970fb2a93df4 Mon Sep 17 00:00:00 2001 From: Maxime Ripard <maxime@cerno.tech> Date: Wed, 28 Oct 2020 13:32:21 +0100 Subject: drm/atomic: Pass the full state to CRTC atomic_check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current atomic helpers have either their object state being passed as an argument or the full atomic state. The former is the pattern that was done at first, before switching to the latter for new hooks or when it was needed. Let's start convert all the remaining helpers to provide a consistent interface, starting with the CRTC's atomic_check. The conversion was done using the coccinelle script below, built tested on all the drivers and actually tested on vc4. virtual report @@ struct drm_crtc_helper_funcs *FUNCS; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; identifier dev, state; identifier ret, f; @@ f(struct drm_device *dev, struct drm_atomic_state *state) { <... - ret = FUNCS->atomic_check(crtc, crtc_state); + ret = FUNCS->atomic_check(crtc, state); ...> } @@ identifier crtc, new_state; @@ struct drm_crtc_helper_funcs { ... - int (*atomic_check)(struct drm_crtc *crtc, struct drm_crtc_state *new_state); + int (*atomic_check)(struct drm_crtc *crtc, struct drm_atomic_state *state); ... } @ crtc_atomic_func @ identifier helpers; identifier func; @@ static struct drm_crtc_helper_funcs helpers = { ..., .atomic_check = func, ..., }; @ ignores_new_state @ identifier crtc_atomic_func.func; identifier crtc, new_state; @@ int func(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { ... when != new_state } @ adds_new_state depends on crtc_atomic_func && !ignores_new_state @ identifier crtc_atomic_func.func; identifier crtc, new_state; @@ int func(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); ... } @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; expression E; type T; @@ int func(...) { ... - T state = E; + T crtc_state = E; <+... - state + crtc_state ...+> } @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; type T; @@ int func(...) { ... - T state; + T crtc_state; <+... - state + crtc_state ...+> } @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; identifier new_state; identifier crtc; @@ int func(struct drm_crtc *crtc, - struct drm_crtc_state *new_state + struct drm_atomic_state *state ) { ... } @@ identifier new_state; identifier crtc; @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *new_state + struct drm_atomic_state *state ) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); ... } @@ identifier new_state; identifier crtc; @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *new_state + struct drm_atomic_state *state ); @ include depends on adds_new_state @ @@ #include <drm/drm_atomic.h> @ no_include depends on !include && adds_new_state @ @@ + #include <drm/drm_atomic.h> #include <drm/...> Signed-off-by: Maxime Ripard <maxime@cerno.tech> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Acked-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201028123222.1732139-1-maxime@cerno.tech --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 ++++++++------ drivers/gpu/drm/arm/display/komeda/komeda_crtc.c | 10 ++++++---- drivers/gpu/drm/arm/malidp_crtc.c | 20 +++++++++++--------- drivers/gpu/drm/armada/armada_crtc.c | 10 ++++++---- drivers/gpu/drm/ast/ast_mode.c | 12 +++++++----- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 3 ++- drivers/gpu/drm/drm_atomic_helper.c | 2 +- drivers/gpu/drm/drm_simple_kms_helper.c | 10 ++++++---- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 8 +++++--- drivers/gpu/drm/imx/ipuv3-crtc.c | 6 ++++-- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 19 ++++++++++++------- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 23 +++++++++++++---------- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 2 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 13 ++++++++----- drivers/gpu/drm/mxsfb/mxsfb_kms.c | 10 ++++++---- drivers/gpu/drm/nouveau/dispnv50/head.c | 7 +++++-- drivers/gpu/drm/omapdrm/omap_crtc.c | 13 ++++++++----- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 11 +++++++---- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4 +++- drivers/gpu/drm/sun4i/sun4i_crtc.c | 7 +++++-- drivers/gpu/drm/tidss/tidss_crtc.c | 10 ++++++---- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 12 +++++++----- drivers/gpu/drm/vc4/vc4_crtc.c | 11 +++++++---- drivers/gpu/drm/vc4/vc4_txp.c | 10 ++++++---- drivers/gpu/drm/virtio/virtgpu_display.c | 2 +- drivers/gpu/drm/vkms/vkms_crtc.c | 16 +++++++++------- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 +++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 2 +- drivers/gpu/drm/xlnx/zynqmp_disp.c | 6 ++++-- include/drm/drm_modeset_helper_vtables.h | 5 ++--- 30 files changed, 170 insertions(+), 112 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e2b23486ba4c..86fd4420f128 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5514,17 +5514,19 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, } static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc *dc = adev->dm.dc; - struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); int ret = -EINVAL; - dm_update_crtc_active_planes(crtc, state); + dm_update_crtc_active_planes(crtc, crtc_state); if (unlikely(!dm_crtc_state->stream && - modeset_required(state, NULL, dm_crtc_state->stream))) { + modeset_required(crtc_state, NULL, dm_crtc_state->stream))) { WARN_ON(1); return ret; } @@ -5535,8 +5537,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, * planes are disabled, which is not supported by the hardware. And there is legacy * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. */ - if (state->enable && - !(state->plane_mask & drm_plane_mask(crtc->primary))) + if (crtc_state->enable && + !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) return -EINVAL; /* In some use cases, like reset, no stream is attached */ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index a4bbf56a7fc1..cc65623b5455 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -74,16 +74,18 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) */ static int komeda_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct komeda_crtc *kcrtc = to_kcrtc(crtc); - struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state); + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_state); int err; - if (drm_atomic_crtc_needs_modeset(state)) + if (drm_atomic_crtc_needs_modeset(crtc_state)) komeda_crtc_update_clock_ratio(kcrtc_st); - if (state->active) { + if (crtc_state->active) { err = komeda_build_display_data_flow(kcrtc, kcrtc_st); if (err) return err; diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 49766eb7a554..108e7a31bd26 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -337,8 +337,10 @@ mclk_calc: } static int malidp_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct drm_plane *plane; @@ -373,7 +375,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, */ /* first count the number of rotated planes */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct drm_framebuffer *fb = pstate->fb; if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) @@ -389,7 +391,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, rot_mem_free += hwdev->rotation_memory[1]; /* now validate the rotation memory requirements */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct malidp_plane *mp = to_malidp_plane(plane); struct malidp_plane_state *ms = to_malidp_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; @@ -417,18 +419,18 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, } /* If only the writeback routing has changed, we don't need a modeset */ - if (state->connectors_changed) { + if (crtc_state->connectors_changed) { u32 old_mask = crtc->state->connector_mask; - u32 new_mask = state->connector_mask; + u32 new_mask = crtc_state->connector_mask; if ((old_mask ^ new_mask) == (1 << drm_connector_index(&malidp->mw_connector.base))) - state->connectors_changed = false; + crtc_state->connectors_changed = false; } - ret = malidp_crtc_atomic_check_gamma(crtc, state); - ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state); - ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state); + ret = malidp_crtc_atomic_check_gamma(crtc, crtc_state); + ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, crtc_state); + ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, crtc_state); return ret; } diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index e0fbfc9ce386..d22ca1496c43 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -413,15 +413,17 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (state->gamma_lut && drm_color_lut_size(state->gamma_lut) != 256) + if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != 256) return -EINVAL; - if (state->color_mgmt_changed) - state->planes_changed = true; + if (crtc_state->color_mgmt_changed) + crtc_state->planes_changed = true; return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index bd03a8a67e3a..346dce2d654f 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -751,24 +751,26 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) } static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct ast_crtc_state *ast_state; const struct drm_format_info *format; bool succ; - if (!state->enable) + if (!crtc_state->enable) return 0; /* no mode checks if CRTC is being disabled */ - ast_state = to_ast_crtc_state(state); + ast_state = to_ast_crtc_state(crtc_state); format = ast_state->format; if (drm_WARN_ON_ONCE(dev, !format)) return -EINVAL; /* BUG: We didn't set format in primary check(). */ - succ = ast_get_vbios_mode_info(format, &state->mode, - &state->adjusted_mode, + succ = ast_get_vbios_mode_info(format, &crtc_state->mode, + &crtc_state->adjusted_mode, &ast_state->vbios_mode_info); if (!succ) return -EINVAL; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 2b3888df22f8..0e533ded2a96 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -325,8 +325,9 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state) } static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, - struct drm_crtc_state *s) + struct drm_atomic_state *state) { + struct drm_crtc_state *s = drm_atomic_get_new_crtc_state(state, c); int ret; ret = atmel_hlcdc_crtc_select_output_mode(s); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index a7bcb4b4586c..12d8095318d0 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -918,7 +918,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(crtc, new_crtc_state); + ret = funcs->atomic_check(crtc, state); if (ret) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name); diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index fa87b63e152a..4b46689634dd 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -86,16 +86,18 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, } static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - bool has_primary = state->plane_mask & + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); /* We always want to have an active plane with an active CRTC */ - if (has_primary != state->enable) + if (has_primary != crtc_state->enable) return -EINVAL; - return drm_atomic_add_affected_planes(state->state, crtc); + return drm_atomic_add_affected_planes(crtc_state->state, crtc); } static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 35f1d1dbb126..928f764efce8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -49,15 +49,17 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - if (!state->enable) + if (!crtc_state->enable) return 0; if (exynos_crtc->ops->atomic_check) - return exynos_crtc->ops->atomic_check(exynos_crtc, state); + return exynos_crtc->ops->atomic_check(exynos_crtc, crtc_state); return 0; } diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7ecc27c41a6a..b6d864d7a0df 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -227,11 +227,13 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, } static int ipu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); u32 primary_plane_mask = drm_plane_mask(crtc->primary); - if (state->active && (primary_plane_mask & state->plane_mask) == 0) + if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 2329754af116..aaa324bd5572 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -239,28 +239,33 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv, } static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL; - if (state->gamma_lut && - drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) { + if (crtc_state->gamma_lut && + drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) { dev_dbg(priv->dev, "Invalid palette size\n"); return -EINVAL; } - if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) { - f1_state = drm_atomic_get_plane_state(state->state, &priv->f1); + if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) { + f1_state = drm_atomic_get_plane_state(crtc_state->state, + &priv->f1); if (IS_ERR(f1_state)) return PTR_ERR(f1_state); - f0_state = drm_atomic_get_plane_state(state->state, &priv->f0); + f0_state = drm_atomic_get_plane_state(crtc_state->state, + &priv->f0); if (IS_ERR(f0_state)) return PTR_ERR(f0_state); if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) { - ipu_state = drm_atomic_get_plane_state(state->state, priv->ipu_plane); + ipu_state = drm_atomic_get_plane_state(crtc_state->state, + priv->ipu_plane); if (IS_ERR(ipu_state)) return PTR_ERR(ipu_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 6a24ce245a37..169df7ca85fd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -815,10 +815,12 @@ struct plane_state { }; static int dpu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); struct plane_state *pstates; const struct drm_plane_state *pstate; @@ -835,32 +837,33 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); - if (!state->enable || !state->active) { + if (!crtc_state->enable || !crtc_state->active) { DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", - crtc->base.id, state->enable, state->active); + crtc->base.id, crtc_state->enable, + crtc_state->active); goto end; } - mode = &state->adjusted_mode; + mode = &crtc_state->adjusted_mode; DPU_DEBUG("%s: check", dpu_crtc->name); /* force a full mode set if active state changed */ - if (state->active_changed) - state->mode_changed = true; + if (crtc_state->active_changed) + crtc_state->mode_changed = true; memset(pipe_staged, 0, sizeof(pipe_staged)); if (cstate->num_mixers) { mixer_width = mode->hdisplay / cstate->num_mixers; - _dpu_crtc_setup_lm_bounds(crtc, state); + _dpu_crtc_setup_lm_bounds(crtc, crtc_state); } crtc_rect.x2 = mode->hdisplay; crtc_rect.y2 = mode->vdisplay; /* get plane state for all drm planes associated with crtc state */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct drm_rect dst, clip = crtc_rect; if (IS_ERR_OR_NULL(pstate)) { @@ -966,7 +969,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); - rc = dpu_core_perf_crtc_check(crtc, state); + rc = dpu_core_perf_crtc_check(crtc, crtc_state); if (rc) { DPU_ERROR("crtc%d failed performance check %d\n", crtc->base.id, rc); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 6b03ceeb5ba1..af80f3baf05b 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -307,7 +307,7 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, } static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s: check", mdp4_crtc->name); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 747dd8a7aa6e..500f885c0eae 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -7,6 +7,7 @@ #include <linux/sort.h> +#include <drm/drm_atomic.h> #include <drm/drm_mode.h> #include <drm/drm_crtc.h> #include <drm/drm_flip_work.h> @@ -682,15 +683,17 @@ static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, } static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; - const struct drm_display_mode *mode = &state->adjusted_mode; + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; bool cursor_plane = false; bool need_right_mixer = false; int cnt = 0, i; @@ -699,7 +702,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, DBG("%s: check", crtc->name); - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { if (!pstate->visible) continue; @@ -731,7 +734,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, if (mode->hdisplay > hw_cfg->lm.max_width) need_right_mixer = true; - ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); + ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer); if (ret) { DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret); return ret; @@ -744,7 +747,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, WARN_ON(cursor_plane && (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); - start = get_start_stage(crtc, state, &pstates[0].state->base); + start = get_start_stage(crtc, crtc_state, &pstates[0].state->base); /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 956f631997f2..b0757f84a979 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -269,17 +269,19 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) } static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - bool has_primary = state->plane_mask & + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); /* The primary plane has to be enabled when the CRTC is active. */ - if (state->active && !has_primary) + if (crtc_state->active && !has_primary) return -EINVAL; /* TODO: Is this needed ? */ - return drm_atomic_add_affected_planes(state->state, crtc); + return drm_atomic_add_affected_planes(crtc_state->state, crtc); } static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 841edfaf5b9d..0542ca22b33a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -30,6 +30,7 @@ #include <nvif/event.h> #include <nvif/cl0046.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_vblank.h> @@ -310,12 +311,14 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) } static int -nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) +nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct nouveau_drm *drm = nouveau_drm(crtc->dev); struct nv50_head *head = nv50_head(crtc); struct nv50_head_atom *armh = nv50_head_atom(crtc->state); - struct nv50_head_atom *asyh = nv50_head_atom(state); + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); struct nouveau_conn_atom *asyc = NULL; struct drm_connector_state *conns; struct drm_connector *conn; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index fef3b0032fd8..69a0770ba38e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -569,22 +569,25 @@ static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc) } static int omap_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_plane_state *pri_state; - if (state->color_mgmt_changed && state->gamma_lut) { - unsigned int length = state->gamma_lut->length / + if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { + unsigned int length = crtc_state->gamma_lut->length / sizeof(struct drm_color_lut); if (length < 2) return -EINVAL; } - pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary); + pri_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); if (pri_state) { struct omap_crtc_state *omap_crtc_state = - to_omap_crtc_state(state); + to_omap_crtc_state(crtc_state); /* Mirror new values for zpos and rotation in omap_crtc_state */ omap_crtc_state->zpos = pri_state->zpos; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 4c360a255849..460fb07b786f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -682,20 +682,23 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) */ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state); struct drm_encoder *encoder; int ret; - ret = rcar_du_cmm_check(crtc, state); + ret = rcar_du_cmm_check(crtc, crtc_state); if (ret) return ret; /* Store the routes from the CRTC output to the DU outputs. */ rstate->outputs = 0; - drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) { + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc_state->encoder_mask) { struct rcar_du_encoder *renc; /* Skip the writeback encoder. */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 47835715b44b..fcbd758e6531 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1415,8 +1415,10 @@ static void vop_wait_for_irq_handler(struct vop *vop) } static int vop_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct vop *vop = to_vop(crtc); struct drm_plane *plane; struct drm_plane_state *plane_state; diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 999deb64bd70..8f91391832db 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -15,6 +15,7 @@ #include <video/videomode.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_modes.h> @@ -45,14 +46,16 @@ static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc) } static int sun4i_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct sunxi_engine *engine = scrtc->engine; int ret = 0; if (engine && engine->ops && engine->ops->atomic_check) - ret = engine->ops->atomic_check(engine, state); + ret = engine->ops->atomic_check(engine, crtc_state); return ret; } diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 848b9c7b553d..6739f489dfdf 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -85,8 +85,10 @@ void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus) /* drm_crtc_helper_funcs */ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); struct dispc_device *dispc = tidss->dispc; @@ -97,10 +99,10 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, dev_dbg(ddev->dev, "%s\n", __func__); - if (!state->enable) + if (!crtc_state->enable) return 0; - mode = &state->adjusted_mode; + mode = &crtc_state->adjusted_mode; ok = dispc_vp_mode_valid(dispc, hw_videoport, mode); if (ok != MODE_OK) { @@ -109,7 +111,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } - return dispc_vp_bus_check(dispc, hw_videoport, state); + return dispc_vp_bus_check(dispc, hw_videoport, crtc_state); } /* diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index da2ab2aa3577..d87a9fd4a203 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -657,15 +657,17 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, } static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); /* If we are not active we don't care */ - if (!state->active) + if (!crtc_state->active) return 0; - if (state->state->planes[0].ptr != crtc->primary || - state->state->planes[0].state == NULL || - state->state->planes[0].state->crtc != crtc) { + if (crtc_state->state->planes[0].ptr != crtc->primary || + crtc_state->state->planes[0].state == NULL || + crtc_state->state->planes[0].state->crtc != crtc) { dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); return -EINVAL; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index f04f5cc8c839..06088854c647 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -584,18 +584,21 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state, } static int vc4_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_connector *conn; struct drm_connector_state *conn_state; int ret, i; - ret = vc4_hvs_atomic_check(crtc, state); + ret = vc4_hvs_atomic_check(crtc, crtc_state); if (ret) return ret; - for_each_new_connector_in_state(state->state, conn, conn_state, i) { + for_each_new_connector_in_state(crtc_state->state, conn, conn_state, + i) { if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index e0e0b72ea65c..34612edcabbd 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -386,16 +386,18 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = { }; static int vc4_txp_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); int ret; - ret = vc4_hvs_atomic_check(crtc, state); + ret = vc4_hvs_atomic_check(crtc, crtc_state); if (ret) return ret; - state->no_vblank = true; + crtc_state->no_vblank = true; vc4_state->feed_txp = true; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 48b3194ee051..e81183ab87e0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -111,7 +111,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, } static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { return 0; } diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index e43e4e1b268a..6a49e70bdf18 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -168,9 +168,11 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = { }; static int vkms_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state); struct drm_plane *plane; struct drm_plane_state *plane_state; int i = 0, ret; @@ -178,12 +180,12 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, if (vkms_state->active_planes) return 0; - ret = drm_atomic_add_affected_planes(state->state, crtc); + ret = drm_atomic_add_affected_planes(crtc_state->state, crtc); if (ret < 0) return ret; - drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(state->state, + drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); WARN_ON(!plane_state); @@ -199,8 +201,8 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, vkms_state->num_active_planes = i; i = 0; - drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(state->state, + drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); if (!plane_state->visible) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 312ed0881a99..a74c9454ade2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -522,8 +522,10 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *new_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, + crtc); struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); int connector_mask = drm_connector_mask(&du->connector); bool has_primary = new_state->plane_mask & diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 3ee03227607c..b3d4e7b4c8c5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -473,7 +473,7 @@ void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, bool unreference); int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state); + struct drm_atomic_state *state); void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state); void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 5802752860dd..058fbcebe6ce 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1504,9 +1504,11 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, } static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - return drm_atomic_add_affected_planes(state->state, crtc); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + return drm_atomic_add_affected_planes(crtc_state->state, crtc); } static void diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index bde42988c4b5..b97441deaf93 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -336,8 +336,7 @@ struct drm_crtc_helper_funcs { * * This function is called in the check phase of an atomic update. The * driver is not allowed to change anything outside of the free-standing - * state objects passed-in or assembled in the overall &drm_atomic_state - * update tracking structure. + * state object passed-in. * * Also beware that userspace can request its own custom modes, neither * core nor helpers filter modes to the list of probe modes reported by @@ -353,7 +352,7 @@ struct drm_crtc_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_crtc *crtc, - struct drm_crtc_state *state); + struct drm_atomic_state *state); /** * @atomic_begin: -- cgit v1.2.3 From f6ebe9f9c9233a6114eb922aba9a0c9ccc2d2e14 Mon Sep 17 00:00:00 2001 From: Maxime Ripard <maxime@cerno.tech> Date: Wed, 28 Oct 2020 13:32:22 +0100 Subject: drm/atomic: Pass the full state to CRTC atomic begin and flush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current atomic helpers have either their object state being passed as an argument or the full atomic state. The former is the pattern that was done at first, before switching to the latter for new hooks or when it was needed. Let's start convert all the remaining helpers to provide a consistent interface, starting with the CRTC's atomic_begin and atomic_flush. The conversion was done using the coccinelle script below, built tested on all the drivers and actually tested on vc4. virtual report @@ struct drm_crtc_helper_funcs *FUNCS; identifier old_crtc_state, old_state; identifier crtc; identifier f; @@ f(struct drm_crtc_state *old_crtc_state) { ... struct drm_atomic_state *old_state = old_crtc_state->state; <... - FUNCS->atomic_begin(crtc, old_crtc_state); + FUNCS->atomic_begin(crtc, old_state); ...> } @@ struct drm_crtc_helper_funcs *FUNCS; identifier old_crtc_state, old_state; identifier crtc; identifier f; @@ f(struct drm_crtc_state *old_crtc_state) { ... struct drm_atomic_state *old_state = old_crtc_state->state; <... - FUNCS->atomic_flush(crtc, old_crtc_state); + FUNCS->atomic_flush(crtc, old_state); ...> } @@ struct drm_crtc_helper_funcs *FUNCS; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; identifier dev, state; identifier f; @@ f(struct drm_device *dev, struct drm_atomic_state *state, ...) { <... - FUNCS->atomic_begin(crtc, crtc_state); + FUNCS->atomic_begin(crtc, state); ...> } @@ struct drm_crtc_helper_funcs *FUNCS; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; identifier dev, state; identifier f; @@ f(struct drm_device *dev, struct drm_atomic_state *state, ...) { <... - FUNCS->atomic_flush(crtc, crtc_state); + FUNCS->atomic_flush(crtc, state); ...> } @@ identifier crtc, old_state; @@ struct drm_crtc_helper_funcs { ... - void (*atomic_begin)(struct drm_crtc *crtc, struct drm_crtc_state *old_state); + void (*atomic_begin)(struct drm_crtc *crtc, struct drm_atomic_state *state); ... - void (*atomic_flush)(struct drm_crtc *crtc, struct drm_crtc_state *old_state); + void (*atomic_flush)(struct drm_crtc *crtc, struct drm_atomic_state *state); ... } @ crtc_atomic_func @ identifier helpers; identifier func; @@ ( static struct drm_crtc_helper_funcs helpers = { ..., .atomic_begin = func, ..., }; | static struct drm_crtc_helper_funcs helpers = { ..., .atomic_flush = func, ..., }; ) @ ignores_old_state @ identifier crtc_atomic_func.func; identifier crtc, old_state; @@ void func(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { ... when != old_state } @ adds_old_state depends on crtc_atomic_func && !ignores_old_state @ identifier crtc_atomic_func.func; identifier crtc, old_state; @@ void func(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); ... } @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; expression E; type T; @@ void func(...) { ... - T state = E; + T crtc_state = E; <+... - state + crtc_state ...+> } @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; type T; @@ void func(...) { ... - T state; + T crtc_state; <+... - state + crtc_state ...+> } @@ identifier old_state; identifier crtc; @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); ... } @@ identifier old_state; identifier crtc; @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ); @@ identifier old_state; identifier crtc; @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ) { ... } @@ identifier old_state; identifier crtc; @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ); @@ identifier old_state; identifier crtc; @@ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ) { ... } @@ identifier old_state; identifier crtc; @@ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ); @ depends on crtc_atomic_func @ identifier crtc_atomic_func.func; identifier old_state; identifier crtc; @@ void func(struct drm_crtc *crtc, - struct drm_crtc_state *old_state + struct drm_atomic_state *state ) { ... } @ include depends on adds_old_state @ @@ #include <drm/drm_atomic.h> @ no_include depends on !include && adds_old_state @ @@ + #include <drm/drm_atomic.h> #include <drm/...> Signed-off-by: Maxime Ripard <maxime@cerno.tech> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201028123222.1732139-2-maxime@cerno.tech --- drivers/gpu/drm/arm/display/komeda/komeda_crtc.c | 4 +++- drivers/gpu/drm/arm/hdlcd_crtc.c | 2 +- drivers/gpu/drm/armada/armada_crtc.c | 4 ++-- drivers/gpu/drm/ast/ast_mode.c | 5 ++++- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 4 ++-- drivers/gpu/drm/drm_atomic_helper.c | 8 ++++---- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 4 ++-- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 2 +- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c | 4 ++-- drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 4 ++-- drivers/gpu/drm/imx/dcss/dcss-crtc.c | 4 ++-- drivers/gpu/drm/imx/ipuv3-crtc.c | 4 ++-- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 17 +++++++++-------- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 16 ++++++++-------- drivers/gpu/drm/meson/meson_crtc.c | 4 ++-- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 4 ++-- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 4 ++-- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 4 ++-- drivers/gpu/drm/mxsfb/mxsfb_kms.c | 2 +- drivers/gpu/drm/omapdrm/omap_crtc.c | 4 ++-- drivers/gpu/drm/qxl/qxl_display.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 4 ++-- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 8 ++++++-- drivers/gpu/drm/sti/sti_crtc.c | 2 +- drivers/gpu/drm/stm/ltdc.c | 2 +- drivers/gpu/drm/sun4i/sun4i_crtc.c | 6 ++++-- drivers/gpu/drm/tegra/dc.c | 10 +++++----- drivers/gpu/drm/tidss/tidss_crtc.c | 4 +++- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 2 +- drivers/gpu/drm/vboxvideo/vbox_mode.c | 2 +- drivers/gpu/drm/vc4/vc4_drv.h | 3 ++- drivers/gpu/drm/vc4/vc4_hvs.c | 4 +++- drivers/gpu/drm/virtio/virtgpu_display.c | 2 +- drivers/gpu/drm/vkms/vkms_crtc.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 4 ++-- drivers/gpu/drm/xlnx/zynqmp_disp.c | 4 ++-- drivers/gpu/drm/zte/zx_vou.c | 2 +- include/drm/drm_modeset_helper_vtables.h | 4 ++-- 39 files changed, 97 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index cc65623b5455..df0b9eeb8933 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -385,8 +385,10 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, static void komeda_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old) + struct drm_atomic_state *state) { + struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, + crtc); /* commit with modeset will be handled in enable/disable */ if (drm_atomic_crtc_needs_modeset(crtc->state)) return; diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 84ac10d59485..a3234bfb0917 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -205,7 +205,7 @@ static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index d22ca1496c43..ca643f4e2064 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -429,7 +429,7 @@ static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc, } static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); @@ -443,7 +443,7 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, } static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 346dce2d654f..22f0e65fbe9a 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -779,8 +779,11 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, } static void -ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) +ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct ast_private *ast = to_ast_private(crtc->dev); struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state); struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0e533ded2a96..c17571a3cc2b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -342,7 +342,7 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, - struct drm_crtc_state *old_s) + struct drm_atomic_state *state) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -357,7 +357,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_s) + struct drm_atomic_state *state) { /* TODO: write common plane control register if available */ } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 12d8095318d0..ddd0e3239150 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2521,7 +2521,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (active_only && !new_crtc_state->active) continue; - funcs->atomic_begin(crtc, old_crtc_state); + funcs->atomic_begin(crtc, old_state); } for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { @@ -2579,7 +2579,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (active_only && !new_crtc_state->active) continue; - funcs->atomic_flush(crtc, old_crtc_state); + funcs->atomic_flush(crtc, old_state); } } EXPORT_SYMBOL(drm_atomic_helper_commit_planes); @@ -2617,7 +2617,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) crtc_funcs = crtc->helper_private; if (crtc_funcs && crtc_funcs->atomic_begin) - crtc_funcs->atomic_begin(crtc, old_crtc_state); + crtc_funcs->atomic_begin(crtc, old_state); drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { struct drm_plane_state *old_plane_state = @@ -2643,7 +2643,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) } if (crtc_funcs && crtc_funcs->atomic_flush) - crtc_funcs->atomic_flush(crtc, old_crtc_state); + crtc_funcs->atomic_flush(crtc, old_state); } EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 928f764efce8..4153f302de7c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -65,7 +65,7 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, } static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -74,7 +74,7 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, } static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 7a9e89cfdf9c..2af60d98f48f 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -21,7 +21,7 @@ #include "fsl_dcu_drm_plane.h" static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index ef18b4787195..ea962acfeae0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -390,7 +390,7 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { u32 reg; struct drm_device *dev = crtc->dev; @@ -410,7 +410,7 @@ static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc, } static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { unsigned long flags; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index cfe8ff596d55..d84d41f3e78f 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -485,7 +485,7 @@ static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void ade_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); struct ade_hw_ctx *ctx = kcrtc->hw_ctx; @@ -498,7 +498,7 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc, } static void ade_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c index 8f570eb5f471..31267c00782f 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -53,13 +53,13 @@ static const struct drm_crtc_funcs dcss_crtc_funcs = { }; static void dcss_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void dcss_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index b6d864d7a0df..7ebd99ee3240 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -240,13 +240,13 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc, } static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index aaa324bd5572..b9c156e13156 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -303,7 +303,7 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode } static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); u32 ctrl = 0; @@ -323,26 +323,27 @@ static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, } static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); - struct drm_crtc_state *state = crtc->state; - struct drm_pending_vblank_event *event = state->event; + struct drm_crtc_state *crtc_state = crtc->state; + struct drm_pending_vblank_event *event = crtc_state->event; - if (drm_atomic_crtc_needs_modeset(state)) { - ingenic_drm_crtc_update_timings(priv, &state->mode); + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + ingenic_drm_crtc_update_timings(priv, &crtc_state->mode); priv->update_clk_rate = true; } if (priv->update_clk_rate) { mutex_lock(&priv->clk_mutex); - clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000); + clk_set_rate(priv->pix_clk, + crtc_state->adjusted_mode.clock * 1000); priv->update_clk_rate = false; mutex_unlock(&priv->clk_mutex); } if (event) { - state->event = NULL; + crtc_state->event = NULL; spin_lock_irq(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) == 0) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index c28f5d7aac1a..23f5c10b0c67 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -575,24 +575,24 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); + struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state); struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - if (mtk_crtc->event && state->base.event) + if (mtk_crtc->event && crtc_state->base.event) DRM_ERROR("new event while there is still a pending event\n"); - if (state->base.event) { - state->base.event->pipe = drm_crtc_index(crtc); + if (crtc_state->base.event) { + crtc_state->base.event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); - mtk_crtc->event = state->base.event; - state->base.event = NULL; + mtk_crtc->event = crtc_state->base.event; + crtc_state->base.event = NULL; } } static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); int i; diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 247ce085886b..d70616da8ce2 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -201,7 +201,7 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, } static void meson_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); unsigned long flags; @@ -217,7 +217,7 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, } static void meson_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 169df7ca85fd..e55be2922c2f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -486,7 +486,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) } static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct drm_encoder *encoder; @@ -527,7 +527,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, } static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct dpu_crtc *dpu_crtc; struct drm_device *dev; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index af80f3baf05b..34e3186e236d 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -316,14 +316,14 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, } static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s: begin", mdp4_crtc->name); } static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 500f885c0eae..4a53d7b42e9c 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -772,13 +772,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, } static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { DBG("%s: begin", crtc->name); } static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index b0757f84a979..eb0e2b08329b 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -285,7 +285,7 @@ static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc, } static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 69a0770ba38e..d7442aa55f89 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -601,12 +601,12 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc, } static void omap_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } static void omap_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct omap_drm_private *priv = crtc->dev->dev_private; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 45fd76e04bdc..07a3e3c23f09 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -372,7 +372,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, } static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "flush"); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 460fb07b786f..b5fb941e0f53 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -785,7 +785,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -814,7 +814,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct drm_device *dev = rcrtc->crtc.dev; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fcbd758e6531..8cd39fca81a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1246,8 +1246,10 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, } static void vop_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct vop *vop = to_vop(crtc); /* @@ -1462,8 +1464,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc, } static void vop_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_atomic_state *old_state = old_crtc_state->state; struct drm_plane_state *old_plane_state, *new_plane_state; struct vop *vop = to_vop(crtc); diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 5726746f6d18..409795786f03 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -133,7 +133,7 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void sti_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_device *drm_dev = crtc->dev; struct sti_mixer *mixer = to_sti_mixer(crtc); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index e9af92d4a74b..3980677435cb 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -596,7 +596,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); struct drm_device *ddev = crtc->dev; diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 8f91391832db..45d9eb552d86 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -61,8 +61,10 @@ static int sun4i_crtc_atomic_check(struct drm_crtc *crtc, } static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct drm_device *dev = crtc->dev; struct sunxi_engine *engine = scrtc->engine; @@ -82,7 +84,7 @@ static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, } static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 52acc2f8f798..2d86627b0d4e 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1918,7 +1918,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, } static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { unsigned long flags; @@ -1937,17 +1937,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, } static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *crtc_state = to_dc_state(crtc->state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; - value = state->planes << 8 | GENERAL_UPDATE; + value = crtc_state->planes << 8 | GENERAL_UPDATE; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - value = state->planes | GENERAL_ACT_REQ; + value = crtc_state->planes | GENERAL_ACT_REQ; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); } diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 6739f489dfdf..2218da3b3ca3 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -163,8 +163,10 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss, } static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index d87a9fd4a203..40c59f4bd962 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -535,7 +535,7 @@ static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc, } static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { if (!crtc->state->event) return; diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 931c55126148..322bf7133ba1 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -223,7 +223,7 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc, } static void vbox_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 7003e7f14a48..5d3d8ed0b775 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -916,7 +916,8 @@ int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state); void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); -void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state); +void vc4_hvs_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state); void vc4_hvs_dump_state(struct drm_device *dev); void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 4d0a833366ce..0bd5ea435120 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -414,8 +414,10 @@ void vc4_hvs_atomic_disable(struct drm_crtc *crtc, } void vc4_hvs_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index e81183ab87e0..4bf74836bd53 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -117,7 +117,7 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, } static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 6a49e70bdf18..0443b7deeaef 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -228,7 +228,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, } static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -239,7 +239,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, } static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index a74c9454ade2..bc67f2b930e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -554,13 +554,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index b3d4e7b4c8c5..03f3694015ce 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -475,9 +475,9 @@ void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); void vmw_du_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc); void vmw_du_crtc_destroy_state(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 058fbcebe6ce..444865af9e36 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1513,14 +1513,14 @@ static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc, static void zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void zynqmp_disp_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { if (crtc->state->event) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c index d2a529eba3c9..904f62f3bfc1 100644 --- a/drivers/gpu/drm/zte/zx_vou.c +++ b/drivers/gpu/drm/zte/zx_vou.c @@ -473,7 +473,7 @@ static void zx_crtc_atomic_disable(struct drm_crtc *crtc, } static void zx_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index b97441deaf93..f2de050085be 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -373,7 +373,7 @@ struct drm_crtc_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_begin)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); /** * @atomic_flush: * @@ -397,7 +397,7 @@ struct drm_crtc_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_flush)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); /** * @atomic_enable: -- cgit v1.2.3 From 9bb7b689274b67ecb3641e399e76f84adc627df1 Mon Sep 17 00:00:00 2001 From: KuoHsiang Chou <kuohsiang_chou@aspeedtech.com> Date: Fri, 30 Oct 2020 15:42:12 +0800 Subject: drm/ast: Support 1600x900 with 108MHz PCLK [New] Create the setting for 1600x900 @60Hz refresh rate by 108MHz pixel-clock. Signed-off-by: KuoHsiang Chou <kuohsiang_chou@aspeedtech.com> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201030074212.22401-1-kuohsiang_chou@aspeedtech.com --- drivers/gpu/drm/ast/ast_tables.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index d665dd5af5dd..3d013946714e 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -282,6 +282,8 @@ static const struct ast_vbios_enhtable res_1360x768[] = { }; static const struct ast_vbios_enhtable res_1600x900[] = { + {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A }, {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 60, 1, 0x3A }, -- cgit v1.2.3 From 95d7a1a6f867f5e6acf62e07a463e0bea1d47b68 Mon Sep 17 00:00:00 2001 From: Bernard Zhao <bernard@vivo.com> Date: Sun, 1 Nov 2020 19:07:36 -0800 Subject: gpu/drm: delete same check in if condition In function drm_bridge_connector_get_modes_edid, drm_edid_is_valid will check weather (!edid), no need to check again in the if branch. Signed-off-by: Bernard Zhao <bernard@vivo.com> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201102030736.3833-1-bernard@vivo.com --- drivers/gpu/drm/drm_bridge_connector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index a58cbde59c34..791379816837 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -241,7 +241,7 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector, goto no_edid; edid = bridge->funcs->get_edid(bridge, connector); - if (!edid || !drm_edid_is_valid(edid)) { + if (!drm_edid_is_valid(edid)) { kfree(edid); goto no_edid; } -- cgit v1.2.3 From 7a60c2dd0f575ab14a457e99582af0ca1e072a74 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe <jgg@nvidia.com> Date: Wed, 28 Oct 2020 16:15:26 -0300 Subject: drm: Remove SCATTERLIST_MAX_SEGMENT Since commit 9a40401cfa13 ("lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values") the max_segment input to sg_alloc_table_from_pages() does not have to be any special value. The new algorithm will always create something less than what the user provides. Thus eliminate this confusing constant. - vmwgfx should use the HW capability, not mix in the OS page size for calling dma_set_max_seg_size() - i915 uses i915_sg_segment_size() both for sg_alloc_table_from_pages and for some open coded sgl construction. This doesn't change the value since rounddown(size, UINT_MAX) == SCATTERLIST_MAX_SEGMENT - drm_prime_pages_to_sg uses it as a default if max_segment is zero, UINT_MAX is fine to use directly. Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Qian Cai <cai@lca.pw> Cc: "Ursulin, Tvrtko" <tvrtko.ursulin@intel.com> Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/0-v1-44733fccd781+13d-rm_scatterlist_max_jgg@nvidia.com --- drivers/gpu/drm/drm_prime.c | 4 ++-- drivers/gpu/drm/i915/i915_scatterlist.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 3 +-- include/linux/scatterlist.h | 6 ------ tools/testing/scatterlist/main.c | 2 +- 5 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 187b55ede62e..a7b61c2d9190 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -820,8 +820,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, if (dev) max_segment = dma_max_mapping_size(dev->dev); - if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT) - max_segment = SCATTERLIST_MAX_SEGMENT; + if (max_segment == 0) + max_segment = UINT_MAX; sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, nr_pages << PAGE_SHIFT, max_segment, diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b7b59328cb76..883dd8d09d6b 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -112,7 +112,7 @@ static inline unsigned int i915_sg_segment_size(void) unsigned int size = swiotlb_max_segment(); if (size == 0) - return SCATTERLIST_MAX_SEGMENT; + size = UINT_MAX; size = rounddown(size, PAGE_SIZE); /* swiotlb_max_segment_size can return 1 byte when it means one page. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b3a60959b5d5..0c42d2c05f43 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -794,8 +794,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; - dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, - SCATTERLIST_MAX_SEGMENT)); + dma_set_max_seg_size(dev->dev, U32_MAX); if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 36c47e7e66a2..6f70572b2938 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -18,12 +18,6 @@ struct scatterlist { #endif }; -/* - * Since the above length field is an unsigned int, below we define the maximum - * length in bytes that can be stored in one scatterlist entry. - */ -#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK) - /* * These macros should be used after a dma_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c index b2c7e9f7b8d3..d264bf853034 100644 --- a/tools/testing/scatterlist/main.c +++ b/tools/testing/scatterlist/main.c @@ -50,7 +50,7 @@ static void fail(struct test *test, struct sg_table *st, const char *cond) int main(void) { - const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT; + const unsigned int sgmax = UINT_MAX; struct test *test, tests[] = { { -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 }, { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 }, -- cgit v1.2.3 From 7d6763ab77b3c047cf7d31ca7c4b799808a684a6 Mon Sep 17 00:00:00 2001 From: Boris Brezillon <boris.brezillon@collabora.com> Date: Sun, 1 Nov 2020 18:38:17 +0100 Subject: drm/panfrost: Remove unused variables in panfrost_job_close() Commit a17d609e3e21 ("drm/panfrost: Don't corrupt the queue mutex on open/close") left unused variables behind, thus generating a warning at compilation time. Remove those variables. Fixes: a17d609e3e21 ("drm/panfrost: Don't corrupt the queue mutex on open/close") Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201101173817.831769-1-boris.brezillon@collabora.com --- drivers/gpu/drm/panfrost/panfrost_job.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 4902bc6624c8..e75b7d2192f7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -613,8 +613,6 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) { - struct panfrost_device *pfdev = panfrost_priv->pfdev; - struct panfrost_job_slot *js = pfdev->js; int i; for (i = 0; i < NUM_JOB_SLOTS; i++) -- cgit v1.2.3 From 35bed3fa9ec58a5b5406c60f66faff86f00fa86b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann <tzimmermann@suse.de> Date: Wed, 21 Oct 2020 14:12:41 +0200 Subject: drivers/video: Fix -Wstringop-truncation in hdmi.c Trying to copy into the string fields with strncpy() gives a warning from gcc. Both fields are part of a packed HDMI header and do not require a terminating \0 character. ../drivers/video/hdmi.c: In function 'hdmi_spd_infoframe_init': ../drivers/video/hdmi.c:230:2: warning: 'strncpy' specified bound 8 equals destination size [-Wstringop-truncation] 230 | strncpy(frame->vendor, vendor, sizeof(frame->vendor)); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../drivers/video/hdmi.c:231:2: warning: 'strncpy' specified bound 16 equals destination size [-Wstringop-truncation] 231 | strncpy(frame->product, product, sizeof(frame->product)); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Just use memcpy() instead. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Reviewed-by: Sam Ravnborg <sam@ravnborg.org> Link: https://patchwork.freedesktop.org/patch/msgid/20201021121241.17623-1-tzimmermann@suse.de --- drivers/video/hdmi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index b7a1d6fae90d..1e4cb63d0d11 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -221,14 +221,18 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_pack); int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, const char *vendor, const char *product) { + size_t len; + memset(frame, 0, sizeof(*frame)); frame->type = HDMI_INFOFRAME_TYPE_SPD; frame->version = 1; frame->length = HDMI_SPD_INFOFRAME_SIZE; - strncpy(frame->vendor, vendor, sizeof(frame->vendor)); - strncpy(frame->product, product, sizeof(frame->product)); + len = strlen(vendor); + memcpy(frame->vendor, vendor, min(len, sizeof(frame->vendor))); + len = strlen(product); + memcpy(frame->product, product, min(len, sizeof(frame->product))); return 0; } -- cgit v1.2.3 From 3fd9886815af458ab88fc4518a9f5137beb0097e Mon Sep 17 00:00:00 2001 From: Maxime Ripard <maxime@cerno.tech> Date: Mon, 2 Nov 2020 14:38:32 +0100 Subject: drm/nouveau/kms/nv50-: Use state helper instead of crtc pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit dispnv50 references the crtc->state pointer in order to get the current CRTC state in its atomic_check hook, which would be the old CRTC state in the global atomic state. Use the drm_atomic_get_old_crtc_state helper to get that state to make it more obvious. Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Maxime Ripard <maxime@cerno.tech> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201102133834.1176740-1-maxime@cerno.tech --- drivers/gpu/drm/nouveau/dispnv50/head.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 0542ca22b33a..537c1ef2e464 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -313,11 +313,13 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) static int nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct nouveau_drm *drm = nouveau_drm(crtc->dev); struct nv50_head *head = nv50_head(crtc); - struct nv50_head_atom *armh = nv50_head_atom(crtc->state); + struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state); struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); struct nouveau_conn_atom *asyc = NULL; struct drm_connector_state *conns; -- cgit v1.2.3 From d74252bb8f0e38194e7457f7b4e5a8a33514bc24 Mon Sep 17 00:00:00 2001 From: Maxime Ripard <maxime@cerno.tech> Date: Mon, 2 Nov 2020 14:38:34 +0100 Subject: drm: Use the state pointer directly in atomic_check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that atomic_check takes the global atomic state as a parameter, we don't need to go through the pointer in the CRTC state. This was done using the following coccinelle script: @ crtc_atomic_func @ identifier helpers; identifier func; @@ static struct drm_crtc_helper_funcs helpers = { ..., .atomic_check = func, ..., }; @@ identifier crtc_atomic_func.func; identifier crtc, state; @@ func(struct drm_crtc *crtc, struct drm_atomic_state *state) { ... - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ... when != crtc_state - crtc_state->state + state ... } @@ struct drm_crtc_state *crtc_state; identifier crtc_atomic_func.func; identifier crtc, state; @@ func(struct drm_crtc *crtc, struct drm_atomic_state *state) { ... - crtc_state->state + state ... } Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Maxime Ripard <maxime@cerno.tech> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201102133834.1176740-3-maxime@cerno.tech --- drivers/gpu/drm/drm_simple_kms_helper.c | 2 +- drivers/gpu/drm/mxsfb/mxsfb_kms.c | 2 +- drivers/gpu/drm/omapdrm/omap_crtc.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 6 +++--- drivers/gpu/drm/vc4/vc4_crtc.c | 2 +- drivers/gpu/drm/xlnx/zynqmp_disp.c | 4 +--- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 4b46689634dd..743e57c1b44f 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -97,7 +97,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, if (has_primary != crtc_state->enable) return -EINVAL; - return drm_atomic_add_affected_planes(crtc_state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index eb0e2b08329b..9040835289a8 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -281,7 +281,7 @@ static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; /* TODO: Is this needed ? */ - return drm_atomic_add_affected_planes(crtc_state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index d7442aa55f89..49621b2e1ab5 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -583,7 +583,7 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } - pri_state = drm_atomic_get_new_plane_state(crtc_state->state, + pri_state = drm_atomic_get_new_plane_state(state, crtc->primary); if (pri_state) { struct omap_crtc_state *omap_crtc_state = diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 40c59f4bd962..30213708fc99 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -665,9 +665,9 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, if (!crtc_state->active) return 0; - if (crtc_state->state->planes[0].ptr != crtc->primary || - crtc_state->state->planes[0].state == NULL || - crtc_state->state->planes[0].state->crtc != crtc) { + if (state->planes[0].ptr != crtc->primary || + state->planes[0].state == NULL || + state->planes[0].state->crtc != crtc) { dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); return -EINVAL; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 06088854c647..ea710beb8e00 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -597,7 +597,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; - for_each_new_connector_in_state(crtc_state->state, conn, conn_state, + for_each_new_connector_in_state(state, conn, conn_state, i) { if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 444865af9e36..c685d94409b0 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1506,9 +1506,7 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); - return drm_atomic_add_affected_planes(crtc_state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void -- cgit v1.2.3 From 586052b0a6062e2fa98189d7f24d8cb9ccf4258b Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Mon, 2 Nov 2020 13:01:53 +0100 Subject: drm/ttm: rework no_retry handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During eviction we do want to trigger the OOM killer. Only while doing new allocations we should try to avoid that and return -ENOMEM to the application. v2: rename the flag to gfp_retry_mayfail. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/398685/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 --- drivers/gpu/drm/ttm/ttm_pool.c | 2 +- drivers/gpu/drm/ttm/ttm_tt.c | 7 ------- include/drm/ttm/ttm_bo_api.h | 2 ++ include/drm/ttm/ttm_bo_driver.h | 3 --- 6 files changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1aa516429c80..4e9dfbea31c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -516,6 +516,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), .no_wait_gpu = bp->no_wait_gpu, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, .resv = bp->resv, .flags = bp->type != ttm_bo_type_kernel ? TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index bd6e6641c3fc..c01c060e4ac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1914,9 +1914,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } adev->mman.initialized = true; - /* We opt to avoid OOM on system pages allocations */ - adev->mman.bdev.no_retry = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ r = amdgpu_vram_mgr_init(adev); if (r) { diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 1e50deefb5d5..44ec41aa78d6 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -367,7 +367,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) gfp_flags |= __GFP_ZERO; - if (tt->page_flags & TTM_PAGE_FLAG_NO_RETRY) + if (ctx->gfp_retry_mayfail) gfp_flags |= __GFP_RETRY_MAYFAIL; if (pool->use_dma32) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 8861a74ac335..cfd633c7e764 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -51,9 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) if (bo->ttm) return 0; - if (bdev->no_retry) - page_flags |= TTM_PAGE_FLAG_NO_RETRY; - switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) @@ -211,8 +208,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm) swap_space = swap_storage->f_mapping; gfp_mask = mapping_gfp_mask(swap_space); - if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_mask |= __GFP_RETRY_MAYFAIL; for (i = 0; i < ttm->num_pages; ++i) { from_page = shmem_read_mapping_page_gfp(swap_space, i, @@ -260,8 +255,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) swap_space = swap_storage->f_mapping; gfp_mask = mapping_gfp_mask(swap_space); - if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_mask |= __GFP_RETRY_MAYFAIL; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 37102e45e496..4637357ba856 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -195,6 +195,7 @@ struct ttm_bo_kmap_obj { * * @interruptible: Sleep interruptible if sleeping. * @no_wait_gpu: Return immediately if the GPU is busy. + * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. * @resv: Reservation object to allow reserved evictions with. * @flags: Including the following flags * @@ -204,6 +205,7 @@ struct ttm_bo_kmap_obj { struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; + bool gfp_retry_mayfail; struct dma_resv *resv; uint64_t bytes_moved; uint32_t flags; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e9f683fa72dc..da8208f43378 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -276,7 +276,6 @@ extern struct ttm_bo_global { * @dev_mapping: A pointer to the struct address_space representing the * device address space. * @wq: Work queue structure for the delayed delete workqueue. - * @no_retry: Don't retry allocation if it fails * */ @@ -314,8 +313,6 @@ struct ttm_bo_device { */ struct delayed_work wq; - - bool no_retry; }; static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, -- cgit v1.2.3 From c44dfe4de053914cae61ed0a36421b3017f428bd Mon Sep 17 00:00:00 2001 From: Christian König <christian.koenig@amd.com> Date: Mon, 2 Nov 2020 13:16:13 +0100 Subject: drm/ttm: replace context flags with bools v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ttm_operation_ctx structure has a mixture of flags and bools. Drop the flags and replace them with bools as well. v2: fix typos, improve comments Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/398686/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++--- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 +-- drivers/gpu/drm/ttm/ttm_memory.c | 3 ++- drivers/gpu/drm/ttm/ttm_resource.c | 2 +- include/drm/ttm/ttm_bo_api.h | 13 ++++++------- 7 files changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d50b63a93d37..8466558d0d93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -404,8 +404,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, - .resv = bo->tbo.base.resv, - .flags = 0 + .resv = bo->tbo.base.resv }; uint32_t domain; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4e9dfbea31c6..e1f64ef8c765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -518,9 +518,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, .no_wait_gpu = bp->no_wait_gpu, /* We opt to avoid OOM on system pages allocations */ .gfp_retry_mayfail = true, - .resv = bp->resv, - .flags = bp->type != ttm_bo_type_kernel ? - TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 + .allow_res_evict = bp->type != ttm_bo_type_kernel, + .resv = bp->resv }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c63b7ea1cd5d..e2a124b3affb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -637,7 +637,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->base.resv == ctx->resv) { dma_resv_assert_held(bo->base.resv); - if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT) + if (ctx->allow_res_evict) ret = true; *locked = false; if (busy) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index eeaca5d1efe3..2944fa0af493 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -315,8 +315,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - + .force_alloc = true }; ttm = bo->ttm; diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index f9a90bfaa3c1..5ed1fc8f2ace 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -542,7 +542,8 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob, { int64_t available; - if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC) + /* We allow over commit during suspend */ + if (ctx->force_alloc) return false; available = get_nr_swap_pages() + si_mem_available(); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 4ebc043e2867..b60699bf4816 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -89,7 +89,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC + .force_alloc = true }; struct ttm_bo_global *glob = &ttm_bo_glob; struct dma_fence *fence; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 4637357ba856..5ddad88ae6ed 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -196,8 +196,11 @@ struct ttm_bo_kmap_obj { * @interruptible: Sleep interruptible if sleeping. * @no_wait_gpu: Return immediately if the GPU is busy. * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. + * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple + * BOs share the same reservation object. + * @force_alloc: Don't check the memory account during suspend or CPU page + * faults. Should only be used by TTM internally. * @resv: Reservation object to allow reserved evictions with. - * @flags: Including the following flags * * Context for TTM operations like changing buffer placement or general memory * allocation. @@ -206,16 +209,12 @@ struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; bool gfp_retry_mayfail; + bool allow_res_evict; + bool force_alloc; struct dma_resv *resv; uint64_t bytes_moved; - uint32_t flags; }; -/* Allow eviction of reserved BOs */ -#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1 -/* when serving page fault or suspend, allow alloc anyway */ -#define TTM_OPT_FLAG_FORCE_ALLOC 0x2 - /** * ttm_bo_get - reference a struct ttm_buffer_object * -- cgit v1.2.3 From 0d32c2a72ca8d66378a7ec797ec3bf0a60ef5dcf Mon Sep 17 00:00:00 2001 From: Yejune Deng <yejune.deng@gmail.com> Date: Tue, 3 Nov 2020 09:48:02 +0800 Subject: drm/panfrost: Replace devm_reset_control_array_get() devm_reset_control_array_get_optional_exclusive() looks more readable Signed-off-by: Yejune Deng <yejune.deng@gmail.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Link: https://patchwork.freedesktop.org/patch/msgid/1604368082-6032-1-git-send-email-yejune.deng@gmail.com --- drivers/gpu/drm/panfrost/panfrost_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index ea8d31863c50..1daf9322954a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -18,7 +18,7 @@ static int panfrost_reset_init(struct panfrost_device *pfdev) { - pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true); + pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev); if (IS_ERR(pfdev->rstc)) { dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); return PTR_ERR(pfdev->rstc); -- cgit v1.2.3 From 24e146cdf9f5a8fb464dd98ba8357d662d37d22f Mon Sep 17 00:00:00 2001 From: Simon Ser <contact@emersion.fr> Date: Tue, 3 Nov 2020 10:31:15 +0000 Subject: drm: unify formatting for color management documentation Other properties are documented with a colon character after the property name. Consistently using a colon character allows the docs to be machine-readable. Signed-off-by: Simon Ser <contact@emersion.fr> Cc: Daniel Vetter <daniel@ffwll.ch> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/7dhjUKqlDC0IiRRON0VhWnkuoxcRDLG4EFKzyhcEA@cp7-web-043.plabs.ch --- drivers/gpu/drm/drm_color_mgmt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 138ff34b31db..3bcabc2f6e0e 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -97,12 +97,12 @@ * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They * are set up by calling drm_plane_create_color_properties(). * - * "COLOR_ENCODING" + * "COLOR_ENCODING": * Optional plane enum property to support different non RGB * color encodings. The driver can provide a subset of standard * enum values supported by the DRM plane. * - * "COLOR_RANGE" + * "COLOR_RANGE": * Optional plane enum property to support different non RGB * color parameter ranges. The driver can provide a subset of * standard enum values supported by the DRM plane. -- cgit v1.2.3