diff options
| author | Dave Airlie <airlied@redhat.com> | 2022-05-06 17:19:11 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2022-05-06 17:20:13 +1000 |
| commit | c67f84e97bafe73c47d5773105b114118ffb84df (patch) | |
| tree | d8e01ee364dda98fe99426ab62b4dd9fbd89c980 /drivers/video/fbdev/core | |
| parent | af3847a7472d2def8358b7ae94b14f1d20fd8661 (diff) | |
| parent | 6071c4c2a319da360b0bf2bc397d4fefad10b2c8 (diff) | |
| download | linux-c67f84e97bafe73c47d5773105b114118ffb84df.tar.bz2 | |
Merge tag 'drm-misc-next-2022-05-05' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.19:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- Add DRM-managed mutex initialisation
- edid: Doc improvements
- fbdev: deferred io improvements
- format-helper: consolidate format conversion helpers
- gem: Rework fence handling in drm_gem_plane_helper_prepare_fb
Driver Changes:
- ast: DisplayPort support, locking improvements
- exynos: Revert conversion to devm_drm_of_get_bridge for DSI
- mgag200: locking improvements
- mxsfb: LCDIF CRC support
- nouveau: switch to drm_gem_plane_helper_prepare_fb
- rockchip: Refactor IOMMU initialisation, make some structures
static, replace drm_detect_hdmi_monitor with
drm_display_info.is_hdmi, support swapped YUV formats,
clock improvements, rk3568 support, VOP2 support
- bridge:
- adv7511: Enable CEC for ADV7535
- it6505: Send DPCD SET_POWER to monitor at disable
- mcde_dsi: Revert conversion to devm_drm_of_get_bridge
- tc358767: Fix for eDP and DP DT endpoint parsing
- new bridge: i.MX8MP LDB
- panel:
- new panel: Startek KD070WVFPA043-C069A
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220505131127.lcqvsywo7qt3eywk@houat
Diffstat (limited to 'drivers/video/fbdev/core')
| -rw-r--r-- | drivers/video/fbdev/core/fb_defio.c | 212 | ||||
| -rw-r--r-- | drivers/video/fbdev/core/fbmem.c | 22 |
2 files changed, 156 insertions, 78 deletions
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 6aaf6d0abf39..c730253ab85c 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -36,6 +36,60 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs return page; } +static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info, + unsigned long offset, + struct page *page) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + struct list_head *pos = &fbdefio->pagereflist; + unsigned long pgoff = offset >> PAGE_SHIFT; + struct fb_deferred_io_pageref *pageref, *cur; + + if (WARN_ON_ONCE(pgoff >= info->npagerefs)) + return NULL; /* incorrect allocation size */ + + /* 1:1 mapping between pageref and page offset */ + pageref = &info->pagerefs[pgoff]; + + /* + * This check is to catch the case where a new process could start + * writing to the same page through a new PTE. This new access + * can cause a call to .page_mkwrite even if the original process' + * PTE is marked writable. + */ + if (!list_empty(&pageref->list)) + goto pageref_already_added; + + pageref->page = page; + pageref->offset = pgoff << PAGE_SHIFT; + + if (unlikely(fbdefio->sort_pagereflist)) { + /* + * We loop through the list of pagerefs before adding in + * order to keep the pagerefs sorted. This has significant + * overhead of O(n^2) with n being the number of written + * pages. If possible, drivers should try to work with + * unsorted page lists instead. + */ + list_for_each_entry(cur, &fbdefio->pagereflist, list) { + if (cur->offset > pageref->offset) + break; + } + pos = &cur->list; + } + + list_add_tail(&pageref->list, pos); + +pageref_already_added: + return pageref; +} + +static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref, + struct fb_info *info) +{ + list_del_init(&pageref->list); +} + /* this is to find and return the vmalloc-ed fb pages */ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) { @@ -59,7 +113,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) printk(KERN_ERR "no mapping available\n"); BUG_ON(!page->mapping); - page->index = vmf->pgoff; + page->index = vmf->pgoff; /* for page_mkclean() */ vmf->page = page; return 0; @@ -89,29 +143,30 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy } EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); -/* vm_ops->page_mkwrite handler */ -static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) +/* + * Adds a page to the dirty list. Call this from struct + * vm_operations_struct.page_mkwrite. + */ +static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset, + struct page *page) { - struct page *page = vmf->page; - struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; - struct list_head *pos = &fbdefio->pagelist; - - /* this is a callback we get when userspace first tries to - write to the page. we schedule a workqueue. that workqueue - will eventually mkclean the touched pages and execute the - deferred framebuffer IO. then if userspace touches a page - again, we repeat the same scheme */ - - file_update_time(vmf->vma->vm_file); + struct fb_deferred_io_pageref *pageref; + vm_fault_t ret; /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); /* first write in this cycle, notify the driver */ - if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) + if (fbdefio->first_io && list_empty(&fbdefio->pagereflist)) fbdefio->first_io(info); + pageref = fb_deferred_io_pageref_get(info, offset, page); + if (WARN_ON_ONCE(!pageref)) { + ret = VM_FAULT_OOM; + goto err_mutex_unlock; + } + /* * We want the page to remain locked from ->page_mkwrite until * the PTE is marked dirty to avoid page_mkclean() being called @@ -120,47 +175,49 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ - lock_page(page); - - /* - * This check is to catch the case where a new process could start - * writing to the same page through a new PTE. This new access - * can cause a call to .page_mkwrite even if the original process' - * PTE is marked writable. - * - * TODO: The lru field is owned by the page cache; hence the name. - * We dequeue in fb_deferred_io_work() after flushing the - * page's content into video memory. Instead of lru, fbdefio - * should have it's own field. - */ - if (!list_empty(&page->lru)) - goto page_already_added; - - if (unlikely(fbdefio->sort_pagelist)) { - /* - * We loop through the pagelist before adding in order to - * keep the pagelist sorted. This has significant overhead - * of O(n^2) with n being the number of written pages. If - * possible, drivers should try to work with unsorted page - * lists instead. - */ - struct page *cur; + lock_page(pageref->page); - list_for_each_entry(cur, &fbdefio->pagelist, lru) { - if (cur->index > page->index) - break; - } - pos = &cur->lru; - } - - list_add_tail(&page->lru, pos); - -page_already_added: mutex_unlock(&fbdefio->lock); /* come back after delay to process the deferred IO */ schedule_delayed_work(&info->deferred_work, fbdefio->delay); return VM_FAULT_LOCKED; + +err_mutex_unlock: + mutex_unlock(&fbdefio->lock); + return ret; +} + +/* + * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O + * @fb_info: The fbdev info structure + * @vmf: The VM fault + * + * This is a callback we get when userspace first tries to + * write to the page. We schedule a workqueue. That workqueue + * will eventually mkclean the touched pages and execute the + * deferred framebuffer IO. Then if userspace touches a page + * again, we repeat the same scheme. + * + * Returns: + * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise. + */ +static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) +{ + unsigned long offset = vmf->address - vmf->vma->vm_start; + struct page *page = vmf->page; + + file_update_time(vmf->vma->vm_file); + + return fb_deferred_io_track_page(info, offset, page); +} + +/* vm_ops->page_mkwrite handler */ +static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) +{ + struct fb_info *info = vmf->vma->vm_private_data; + + return fb_deferred_io_page_mkwrite(info, vmf); } static const struct vm_operations_struct fb_deferred_io_vm_ops = { @@ -181,52 +238,70 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) vma->vm_private_data = info; return 0; } +EXPORT_SYMBOL_GPL(fb_deferred_io_mmap); /* workqueue callback */ static void fb_deferred_io_work(struct work_struct *work) { - struct fb_info *info = container_of(work, struct fb_info, - deferred_work.work); - struct list_head *node, *next; - struct page *cur; + struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); + struct fb_deferred_io_pageref *pageref, *next; struct fb_deferred_io *fbdefio = info->fbdefio; /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagereflist, list) { + struct page *cur = pageref->page; lock_page(cur); page_mkclean(cur); unlock_page(cur); } - /* driver's callback with pagelist */ - fbdefio->deferred_io(info, &fbdefio->pagelist); + /* driver's callback with pagereflist */ + fbdefio->deferred_io(info, &fbdefio->pagereflist); /* clear the list */ - list_for_each_safe(node, next, &fbdefio->pagelist) { - list_del_init(node); - } + list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list) + fb_deferred_io_pageref_put(pageref, info); + mutex_unlock(&fbdefio->lock); } -void fb_deferred_io_init(struct fb_info *info) +int fb_deferred_io_init(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; - struct page *page; - unsigned int i; + struct fb_deferred_io_pageref *pagerefs; + unsigned long npagerefs, i; + int ret; BUG_ON(!fbdefio); + + if (WARN_ON(!info->fix.smem_len)) + return -EINVAL; + mutex_init(&fbdefio->lock); INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); - INIT_LIST_HEAD(&fbdefio->pagelist); + INIT_LIST_HEAD(&fbdefio->pagereflist); if (fbdefio->delay == 0) /* set a default of 1 s */ fbdefio->delay = HZ; - /* initialize all the page lists one time */ - for (i = 0; i < info->fix.smem_len; i += PAGE_SIZE) { - page = fb_deferred_io_page(info, i); - INIT_LIST_HEAD(&page->lru); + npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE); + + /* alloc a page ref for each page of the display memory */ + pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL); + if (!pagerefs) { + ret = -ENOMEM; + goto err; } + for (i = 0; i < npagerefs; ++i) + INIT_LIST_HEAD(&pagerefs[i].list); + info->npagerefs = npagerefs; + info->pagerefs = pagerefs; + + return 0; + +err: + mutex_destroy(&fbdefio->lock); + return ret; } EXPORT_SYMBOL_GPL(fb_deferred_io_init); @@ -253,6 +328,7 @@ void fb_deferred_io_cleanup(struct fb_info *info) page->mapping = NULL; } + kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 84427470367b..b445a7a00def 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1334,7 +1334,6 @@ static int fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); - int (*fb_mmap_fn)(struct fb_info *info, struct vm_area_struct *vma); unsigned long mmio_pgoff; unsigned long start; u32 len; @@ -1343,14 +1342,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return -ENODEV; mutex_lock(&info->mm_lock); - fb_mmap_fn = info->fbops->fb_mmap; - -#if IS_ENABLED(CONFIG_FB_DEFERRED_IO) - if (info->fbdefio) - fb_mmap_fn = fb_deferred_io_mmap; -#endif - - if (fb_mmap_fn) { + if (info->fbops->fb_mmap) { int res; /* @@ -1358,9 +1350,19 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) * SME protection is removed ahead of the call */ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); - res = fb_mmap_fn(info, vma); + res = info->fbops->fb_mmap(info, vma); mutex_unlock(&info->mm_lock); return res; +#if IS_ENABLED(CONFIG_FB_DEFERRED_IO) + } else if (info->fbdefio) { + /* + * FB deferred I/O wants you to handle mmap in your drivers. At a + * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap(). + */ + dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n"); + mutex_unlock(&info->mm_lock); + return -ENODEV; +#endif } /* |