From 3512f976d252bd5d07d04e9e157f0cd210c959a0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 24 Apr 2013 18:52:34 +0300 Subject: drm: Add struct drm_rect and assorted utility functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit struct drm_rect represents a simple rectangle. The utility functions are there to help driver writers. v2: Moved the region stuff into its own file, made the smaller funcs static inline, used 64bit maths in the scaled clipping function to avoid overflows (instead it will saturate to INT_MIN or INT_MAX). v3: Renamed drm_region to drm_rect, drm_region_clip to drm_rect_intersect, and drm_region_subsample to drm_rect_downscale. v4: Renamed some function parameters, improve kernel-doc comments a bit, and actually generate documentation for drm_rect.[ch]. v5: s/RETUTRNS/RETURNS/ Reviewed-by: Laurent Pinchart Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- include/drm/drm_rect.h | 132 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 include/drm/drm_rect.h (limited to 'include/drm') diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h new file mode 100644 index 000000000000..2b7278c1bc42 --- /dev/null +++ b/include/drm/drm_rect.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_RECT_H +#define DRM_RECT_H + +/** + * drm_rect - two dimensional rectangle + * @x1: horizontal starting coordinate (inclusive) + * @x2: horizontal ending coordinate (exclusive) + * @y1: vertical starting coordinate (inclusive) + * @y2: vertical ending coordinate (exclusive) + */ +struct drm_rect { + int x1, y1, x2, y2; +}; + +/** + * drm_rect_adjust_size - adjust the size of the rectangle + * @r: rectangle to be adjusted + * @dw: horizontal adjustment + * @dh: vertical adjustment + * + * Change the size of rectangle @r by @dw in the horizontal direction, + * and by @dh in the vertical direction, while keeping the center + * of @r stationary. + * + * Positive @dw and @dh increase the size, negative values decrease it. + */ +static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) +{ + r->x1 -= dw >> 1; + r->y1 -= dh >> 1; + r->x2 += (dw + 1) >> 1; + r->y2 += (dh + 1) >> 1; +} + +/** + * drm_rect_translate - translate the rectangle + * @r: rectangle to be tranlated + * @dx: horizontal translation + * @dy: vertical translation + * + * Move rectangle @r by @dx in the horizontal direction, + * and by @dy in the vertical direction. + */ +static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) +{ + r->x1 += dx; + r->y1 += dy; + r->x2 += dx; + r->y2 += dy; +} + +/** + * drm_rect_downscale - downscale a rectangle + * @r: rectangle to be downscaled + * @horz: horizontal downscale factor + * @vert: vertical downscale factor + * + * Divide the coordinates of rectangle @r by @horz and @vert. + */ +static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert) +{ + r->x1 /= horz; + r->y1 /= vert; + r->x2 /= horz; + r->y2 /= vert; +} + +/** + * drm_rect_width - determine the rectangle width + * @r: rectangle whose width is returned + * + * RETURNS: + * The width of the rectangle. + */ +static inline int drm_rect_width(const struct drm_rect *r) +{ + return r->x2 - r->x1; +} + +/** + * drm_rect_height - determine the rectangle height + * @r: rectangle whose height is returned + * + * RETURNS: + * The height of the rectangle. + */ +static inline int drm_rect_height(const struct drm_rect *r) +{ + return r->y2 - r->y1; +} + +/** + * drm_rect_visible - determine if the the rectangle is visible + * @r: rectangle whose visibility is returned + * + * RETURNS: + * %true if the rectangle is visible, %false otherwise. + */ +static inline bool drm_rect_visible(const struct drm_rect *r) +{ + return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; +} + +bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); +bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, + const struct drm_rect *clip, + int hscale, int vscale); + +#endif -- cgit v1.2.3 From 4954c4282f6b945f1dd5716f92b594a07fa4ffe3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 24 Apr 2013 18:52:35 +0300 Subject: drm: Add drm_rect_calc_{hscale, vscale}() utility functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions calculate the scaling factor based on the source and destination rectangles. There are two version of the functions, the strict ones that will return an error if the min/max scaling factor is exceeded, and the relaxed versions that will adjust the src/dst rectangles in order to keep the scaling factor withing the limits. v2: Return error instead of adjusting regions, refactor common parts into one function, and split into strict and relaxed versions. v3: Renamed drm_region to drm_rect, add "_rect_" to the function names. v4: Fix "calculcate" typos Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_rect.c | 177 +++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_rect.h | 12 +++ 2 files changed, 189 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 22091ecdbff4..dc11a2867f20 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -94,3 +94,180 @@ bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, return drm_rect_intersect(dst, clip); } EXPORT_SYMBOL(drm_rect_clip_scaled); + +static int drm_calc_scale(int src, int dst) +{ + int scale = 0; + + if (src < 0 || dst < 0) + return -EINVAL; + + if (dst == 0) + return 0; + + scale = src / dst; + + return scale; +} + +/** + * drm_rect_calc_hscale - calculate the horizontal scaling factor + * @src: source window rectangle + * @dst: destination window rectangle + * @min_hscale: minimum allowed horizontal scaling factor + * @max_hscale: maximum allowed horizontal scaling factor + * + * Calculate the horizontal scaling factor as + * (@src width) / (@dst width). + * + * RETURNS: + * The horizontal scaling factor, or errno of out of limits. + */ +int drm_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale) +{ + int src_w = drm_rect_width(src); + int dst_w = drm_rect_width(dst); + int hscale = drm_calc_scale(src_w, dst_w); + + if (hscale < 0 || dst_w == 0) + return hscale; + + if (hscale < min_hscale || hscale > max_hscale) + return -ERANGE; + + return hscale; +} +EXPORT_SYMBOL(drm_rect_calc_hscale); + +/** + * drm_rect_calc_vscale - calculate the vertical scaling factor + * @src: source window rectangle + * @dst: destination window rectangle + * @min_vscale: minimum allowed vertical scaling factor + * @max_vscale: maximum allowed vertical scaling factor + * + * Calculate the vertical scaling factor as + * (@src height) / (@dst height). + * + * RETURNS: + * The vertical scaling factor, or errno of out of limits. + */ +int drm_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_vscale, int max_vscale) +{ + int src_h = drm_rect_height(src); + int dst_h = drm_rect_height(dst); + int vscale = drm_calc_scale(src_h, dst_h); + + if (vscale < 0 || dst_h == 0) + return vscale; + + if (vscale < min_vscale || vscale > max_vscale) + return -ERANGE; + + return vscale; +} +EXPORT_SYMBOL(drm_rect_calc_vscale); + +/** + * drm_calc_hscale_relaxed - calculate the horizontal scaling factor + * @src: source window rectangle + * @dst: destination window rectangle + * @min_hscale: minimum allowed horizontal scaling factor + * @max_hscale: maximum allowed horizontal scaling factor + * + * Calculate the horizontal scaling factor as + * (@src width) / (@dst width). + * + * If the calculated scaling factor is below @min_vscale, + * decrease the height of rectangle @dst to compensate. + * + * If the calculated scaling factor is above @max_vscale, + * decrease the height of rectangle @src to compensate. + * + * RETURNS: + * The horizontal scaling factor. + */ +int drm_rect_calc_hscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_hscale, int max_hscale) +{ + int src_w = drm_rect_width(src); + int dst_w = drm_rect_width(dst); + int hscale = drm_calc_scale(src_w, dst_w); + + if (hscale < 0 || dst_w == 0) + return hscale; + + if (hscale < min_hscale) { + int max_dst_w = src_w / min_hscale; + + drm_rect_adjust_size(dst, max_dst_w - dst_w, 0); + + return min_hscale; + } + + if (hscale > max_hscale) { + int max_src_w = dst_w * max_hscale; + + drm_rect_adjust_size(src, max_src_w - src_w, 0); + + return max_hscale; + } + + return hscale; +} +EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed); + +/** + * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor + * @src: source window rectangle + * @dst: destination window rectangle + * @min_vscale: minimum allowed vertical scaling factor + * @max_vscale: maximum allowed vertical scaling factor + * + * Calculate the vertical scaling factor as + * (@src height) / (@dst height). + * + * If the calculated scaling factor is below @min_vscale, + * decrease the height of rectangle @dst to compensate. + * + * If the calculated scaling factor is above @max_vscale, + * decrease the height of rectangle @src to compensate. + * + * RETURNS: + * The vertical scaling factor. + */ +int drm_rect_calc_vscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_vscale, int max_vscale) +{ + int src_h = drm_rect_height(src); + int dst_h = drm_rect_height(dst); + int vscale = drm_calc_scale(src_h, dst_h); + + if (vscale < 0 || dst_h == 0) + return vscale; + + if (vscale < min_vscale) { + int max_dst_h = src_h / min_vscale; + + drm_rect_adjust_size(dst, 0, max_dst_h - dst_h); + + return min_vscale; + } + + if (vscale > max_vscale) { + int max_src_h = dst_h * max_vscale; + + drm_rect_adjust_size(src, 0, max_src_h - src_h); + + return max_vscale; + } + + return vscale; +} +EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed); diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 2b7278c1bc42..de24f16a3c4f 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -128,5 +128,17 @@ bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, const struct drm_rect *clip, int hscale, int vscale); +int drm_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_vscale, int max_vscale); +int drm_rect_calc_hscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_vscale, int max_vscale); #endif -- cgit v1.2.3 From e7272df342ba337e87e210470bb93d97d192f2e0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 24 Apr 2013 18:52:36 +0300 Subject: drm: Add drm_rect_debug_print() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a debug function to print the rectangle in a human readable format. v2: Renamed drm_region to drm_rect, the function from drm_region_debug to drm_rect_debug_print(), and use %+d instead of +%d in the format. v3: Use %d format for width/height in the non fixed point case as well Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_rect.c | 22 ++++++++++++++++++++++ include/drm/drm_rect.h | 1 + 2 files changed, 23 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index dc11a2867f20..7047ca025787 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /** @@ -271,3 +272,24 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src, return vscale; } EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed); + +/** + * drm_rect_debug_print - print the rectangle information + * @r: rectangle to print + * @fixed_point: rectangle is in 16.16 fixed point format + */ +void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point) +{ + int w = drm_rect_width(r); + int h = drm_rect_height(r); + + if (fixed_point) + DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", + w >> 16, ((w & 0xffff) * 15625) >> 10, + h >> 16, ((h & 0xffff) * 15625) >> 10, + r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, + r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); + else + DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); +} +EXPORT_SYMBOL(drm_rect_debug_print); diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index de24f16a3c4f..fe767b757c8c 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -140,5 +140,6 @@ int drm_rect_calc_hscale_relaxed(struct drm_rect *src, int drm_rect_calc_vscale_relaxed(struct drm_rect *src, struct drm_rect *dst, int min_vscale, int max_vscale); +void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); #endif -- cgit v1.2.3 From 0894c96bff762d0474a8722bba3d420f643db359 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 24 Apr 2013 18:52:37 +0300 Subject: drm: Add drm_rect_equals() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_rect_equals() tells whether two drm_rects are equal. Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Acked-by: Dave Airlie Signed-off-by: Daniel Vetter --- include/drm/drm_rect.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/drm') diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index fe767b757c8c..64fa265c6ffb 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -124,6 +124,21 @@ static inline bool drm_rect_visible(const struct drm_rect *r) return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; } +/** + * drm_rect_equals - determine if two rectangles are equal + * @r1: first rectangle + * @r2: second rectangle + * + * RETURNS: + * %true if the rectangles are equal, %false otherwise. + */ +static inline bool drm_rect_equals(const struct drm_rect *r1, + const struct drm_rect *r2) +{ + return r1->x1 == r2->x1 && r1->x2 == r2->x2 && + r1->y1 == r2->y1 && r1->y2 == r2->y2; +} + bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, const struct drm_rect *clip, -- cgit v1.2.3 From 039735369c8fb105d0a090c949b7f894425121d8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 May 2013 17:16:45 +0300 Subject: drm: Fix drm_rect documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'struct' keyword was missing so struct drm_rect documentation never ended up in the generated docs. Also move the drm_rect documentations to a new section alognside the various helper functions and add a short description about the intended purpose of drm_rect. v2: Move to new section and add general description Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- Documentation/DocBook/drm.tmpl | 8 ++++++-- include/drm/drm_rect.h | 9 ++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include/drm') diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 7c7af25b330c..91ee107d5d0e 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -1653,8 +1653,6 @@ void intel_crt_init(struct drm_device *dev) KMS API Functions !Edrivers/gpu/drm/drm_crtc.c -!Edrivers/gpu/drm/drm_rect.c -!Finclude/drm/drm_rect.h @@ -2163,6 +2161,12 @@ void intel_crt_init(struct drm_device *dev) EDID Helper Functions Reference !Edrivers/gpu/drm/drm_edid.c + + Rectangle Utilities Reference +!Pinclude/drm/drm_rect.h rect utils +!Iinclude/drm/drm_rect.h +!Edrivers/gpu/drm/drm_rect.c + diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 64fa265c6ffb..d1286297567b 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -25,7 +25,14 @@ #define DRM_RECT_H /** - * drm_rect - two dimensional rectangle + * DOC: rect utils + * + * Utility functions to help manage rectangular areas for + * clipping, scaling, etc. calculations. + */ + +/** + * struct drm_rect - two dimensional rectangle * @x1: horizontal starting coordinate (inclusive) * @x2: horizontal ending coordinate (exclusive) * @y1: vertical starting coordinate (inclusive) -- cgit v1.2.3 From 247d36d75128ba1f63702e0e6185d9a7a23ee5cb Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 13 May 2013 23:58:41 +0000 Subject: drm (ast, cirrus, mgag200, nouveau, savage, vmwgfx): Remove drm_mtrr_{add, del} This replaces drm_mtrr_{add,del} with arch_phys_wc_{add,del}. The interface is simplified (because the base and size parameters to drm_mtrr_del never did anything), and it no longer adds MTRRs on systems that don't need them. Reviewed-by: Daniel Vetter Signed-off-by: Andy Lutomirski Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_ttm.c | 13 +++-------- drivers/gpu/drm/cirrus/cirrus_ttm.c | 15 ++++-------- drivers/gpu/drm/mgag200/mgag200_ttm.c | 14 ++++-------- drivers/gpu/drm/nouveau/nouveau_ttm.c | 13 ++++------- drivers/gpu/drm/savage/savage_bci.c | 43 ++++++++++++----------------------- drivers/gpu/drm/savage/savage_drv.h | 5 +--- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 10 ++++---- include/drm/drmP.h | 29 ----------------------- 8 files changed, 35 insertions(+), 107 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 09da3393c527..a5a1a034033d 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -271,26 +271,19 @@ int ast_mm_init(struct ast_private *ast) return ret; } - ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), - DRM_MTRR_WC); + ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); return 0; } void ast_mm_fini(struct ast_private *ast) { - struct drm_device *dev = ast->dev; ttm_bo_device_release(&ast->ttm.bdev); ast_ttm_global_release(ast); - if (ast->fb_mtrr >= 0) { - drm_mtrr_del(ast->fb_mtrr, - pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); - ast->fb_mtrr = -1; - } + arch_phys_wc_del(ast->fb_mtrr); } void ast_ttm_placement(struct ast_bo *bo, int domain) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 2ed8cfc740c9..36b9b0bab1da 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -271,9 +271,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus) return ret; } - cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), - DRM_MTRR_WC); + cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); cirrus->mm_inited = true; return 0; @@ -281,8 +280,6 @@ int cirrus_mm_init(struct cirrus_device *cirrus) void cirrus_mm_fini(struct cirrus_device *cirrus) { - struct drm_device *dev = cirrus->dev; - if (!cirrus->mm_inited) return; @@ -290,12 +287,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) cirrus_ttm_global_release(cirrus); - if (cirrus->fb_mtrr >= 0) { - drm_mtrr_del(cirrus->fb_mtrr, - pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); - cirrus->fb_mtrr = -1; - } + arch_phys_wc_del(cirrus->fb_mtrr); + cirrus->fb_mtrr = 0; } void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 401c9891d3a8..0004f7740b8e 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -270,26 +270,20 @@ int mgag200_mm_init(struct mga_device *mdev) return ret; } - mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), - DRM_MTRR_WC); + mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); return 0; } void mgag200_mm_fini(struct mga_device *mdev) { - struct drm_device *dev = mdev->dev; ttm_bo_device_release(&mdev->ttm.bdev); mgag200_ttm_global_release(mdev); - if (mdev->fb_mtrr >= 0) { - drm_mtrr_del(mdev->fb_mtrr, - pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); - mdev->fb_mtrr = -1; - } + arch_phys_wc_del(mdev->fb_mtrr); + mdev->fb_mtrr = 0; } void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index f19a15a3bc03..3da985eb38cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -396,9 +396,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } - drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), - pci_resource_len(dev->pdev, 1), - DRM_MTRR_WC); + drm->ttm.mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1)); /* GART init */ if (drm->agp.stat != ENABLED) { @@ -433,10 +432,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm) nouveau_ttm_global_release(drm); - if (drm->ttm.mtrr >= 0) { - drm_mtrr_del(drm->ttm.mtrr, - pci_resource_start(drm->dev->pdev, 1), - pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC); - drm->ttm.mtrr = -1; - } + arch_phys_wc_del(drm->ttm.mtrr); + drm->ttm.mtrr = 0; } diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index b55c1d661147..bd6b2cf508d5 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -570,9 +570,6 @@ int savage_driver_firstopen(struct drm_device *dev) unsigned int fb_rsrc, aper_rsrc; int ret = 0; - dev_priv->mtrr[0].handle = -1; - dev_priv->mtrr[1].handle = -1; - dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = pci_resource_start(dev->pdev, 0); @@ -584,21 +581,14 @@ int savage_driver_firstopen(struct drm_device *dev) if (pci_resource_len(dev->pdev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ - dev_priv->mtrr[0].base = fb_base; - dev_priv->mtrr[0].size = 0x01000000; - dev_priv->mtrr[0].handle = - drm_mtrr_add(dev_priv->mtrr[0].base, - dev_priv->mtrr[0].size, DRM_MTRR_WC); - dev_priv->mtrr[1].base = fb_base + 0x02000000; - dev_priv->mtrr[1].size = 0x02000000; - dev_priv->mtrr[1].handle = - drm_mtrr_add(dev_priv->mtrr[1].base, - dev_priv->mtrr[1].size, DRM_MTRR_WC); - dev_priv->mtrr[2].base = fb_base + 0x04000000; - dev_priv->mtrr[2].size = 0x04000000; - dev_priv->mtrr[2].handle = - drm_mtrr_add(dev_priv->mtrr[2].base, - dev_priv->mtrr[2].size, DRM_MTRR_WC); + dev_priv->mtrr_handles[0] = + arch_phys_wc_add(fb_base, 0x01000000); + dev_priv->mtrr_handles[1] = + arch_phys_wc_add(fb_base + 0x02000000, + 0x02000000); + dev_priv->mtrr_handles[2] = + arch_phys_wc_add(fb_base + 0x04000000, + 0x04000000); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) @@ -616,11 +606,9 @@ int savage_driver_firstopen(struct drm_device *dev) if (pci_resource_len(dev->pdev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ - dev_priv->mtrr[0].base = fb_base; - dev_priv->mtrr[0].size = 0x08000000; - dev_priv->mtrr[0].handle = - drm_mtrr_add(dev_priv->mtrr[0].base, - dev_priv->mtrr[0].size, DRM_MTRR_WC); + dev_priv->mtrr_handles[0] = + arch_phys_wc_add(fb_base, + 0x08000000); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) @@ -660,11 +648,10 @@ void savage_driver_lastclose(struct drm_device *dev) drm_savage_private_t *dev_priv = dev->dev_private; int i; - for (i = 0; i < 3; ++i) - if (dev_priv->mtrr[i].handle >= 0) - drm_mtrr_del(dev_priv->mtrr[i].handle, - dev_priv->mtrr[i].base, - dev_priv->mtrr[i].size, DRM_MTRR_WC); + for (i = 0; i < 3; ++i) { + arch_phys_wc_del(dev_priv->mtrr_handles[i]); + dev_priv->mtrr_handles[i] = 0; + } } int savage_driver_unload(struct drm_device *dev) diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h index df2aac6636f7..c05082a59f6f 100644 --- a/drivers/gpu/drm/savage/savage_drv.h +++ b/drivers/gpu/drm/savage/savage_drv.h @@ -160,10 +160,7 @@ typedef struct drm_savage_private { drm_local_map_t *cmd_dma; drm_local_map_t fake_dma; - struct { - int handle; - unsigned long base, size; - } mtrr[3]; + int mtrr_handles[3]; /* BCI and status-related stuff */ volatile uint32_t *status_ptr, *bci_ptr; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 07dfd823cc30..78e21649d48a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -565,8 +565,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->has_gmr = false; } - dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, - dev_priv->mmio_size, DRM_MTRR_WC); + dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, + dev_priv->mmio_size); dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, dev_priv->mmio_size); @@ -664,8 +664,7 @@ out_no_device: out_err4: iounmap(dev_priv->mmio_virt); out_err3: - drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, - dev_priv->mmio_size, DRM_MTRR_WC); + arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); @@ -709,8 +708,7 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); - drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, - dev_priv->mmio_size, DRM_MTRR_WC); + arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 63d17ee9eb48..5a87655508fc 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1250,37 +1250,8 @@ static inline int drm_core_has_MTRR(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_USE_MTRR); } - -#define DRM_MTRR_WC MTRR_TYPE_WRCOMB - -static inline int drm_mtrr_add(unsigned long offset, unsigned long size, - unsigned int flags) -{ - return mtrr_add(offset, size, flags, 1); -} - -static inline int drm_mtrr_del(int handle, unsigned long offset, - unsigned long size, unsigned int flags) -{ - return mtrr_del(handle, offset, size); -} - #else #define drm_core_has_MTRR(dev) (0) - -#define DRM_MTRR_WC 0 - -static inline int drm_mtrr_add(unsigned long offset, unsigned long size, - unsigned int flags) -{ - return 0; -} - -static inline int drm_mtrr_del(int handle, unsigned long offset, - unsigned long size, unsigned int flags) -{ - return 0; -} #endif static inline void drm_device_set_unplugged(struct drm_device *dev) -- cgit v1.2.3 From 03dae7c567d24c49e826a033df45802ac9d1d6c8 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 13 May 2013 23:58:47 +0000 Subject: drm: Remove mtrr_add and mtrr_del fallback hack for non-MTRR systems There are no users left in drivers/gpu. Reviewed-by: Daniel Vetter Signed-off-by: Andy Lutomirski Signed-off-by: Dave Airlie --- include/drm/drmP.h | 5 +---- include/drm/drm_os_linux.h | 16 ---------------- 2 files changed, 1 insertion(+), 20 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5a87655508fc..b06f5afe10ce 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -55,16 +55,13 @@ #include #include #include +#include #include #if defined(__alpha__) || defined(__powerpc__) #include /* For pte_wrprotect */ #endif -#include #include #include -#ifdef CONFIG_MTRR -#include -#endif #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) #include #include diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 675ddf4b441f..815fafc6b4ad 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -65,22 +65,6 @@ struct no_agp_kern { #define DRM_AGP_KERN struct no_agp_kern #endif -#if !(__OS_HAS_MTRR) -static __inline__ int mtrr_add(unsigned long base, unsigned long size, - unsigned int type, char increment) -{ - return -ENODEV; -} - -static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) -{ - return -ENODEV; -} - -#define MTRR_TYPE_WRCOMB 1 - -#endif - /** Other copying of data to kernel space */ #define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ copy_from_user(arg1, arg2, arg3) -- cgit v1.2.3 From a38911a3fede294e2adfd2deea8104dfbbd760c5 Mon Sep 17 00:00:00 2001 From: Wang Xingchao Date: Thu, 30 May 2013 22:07:11 +0800 Subject: i915/drm: Add private api for power well usage Haswell Display audio depends on power well in graphic side, it should request power well before use it and release power well after use. I915 will not shutdown power well if it detects audio is using. This patch protects display audio crash for Intel Haswell C3 stepping board. Signed-off-by: Wang Xingchao Reviewed-by: Takashi Iwai Reviewed-by: Damien Lespiau Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 6 +++ drivers/gpu/drm/i915/i915_drv.h | 12 ++++++ drivers/gpu/drm/i915/intel_drv.h | 4 ++ drivers/gpu/drm/i915/intel_pm.c | 81 ++++++++++++++++++++++++++++++++++++---- include/drm/i915_powerwell.h | 36 ++++++++++++++++++ 5 files changed, 132 insertions(+), 7 deletions(-) create mode 100644 include/drm/i915_powerwell.h (limited to 'include/drm') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 22534c3b4fe5..fd8898c8f8e3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1656,6 +1656,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Start out suspended */ dev_priv->mm.suspended = 1; + if (HAS_POWER_WELL(dev)) + i915_init_power_well(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { @@ -1712,6 +1715,9 @@ int i915_driver_unload(struct drm_device *dev) intel_gpu_ips_teardown(); + if (HAS_POWER_WELL(dev)) + i915_remove_power_well(dev); + i915_teardown_sysfs(dev); if (dev_priv->mm.inactive_shrinker.shrink) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 215aa63e3f47..87f7f88b1030 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -740,6 +740,15 @@ struct intel_ilk_power_mgmt { struct drm_i915_gem_object *renderctx; }; +/* Power well structure for haswell */ +struct i915_power_well { + struct drm_device *device; + spinlock_t lock; + /* power well enable/disable usage count */ + int count; + int i915_request; +}; + struct i915_dri1_state { unsigned allow_batchbuffer : 1; u32 __iomem *gfx_hws_cpu_addr; @@ -1099,6 +1108,9 @@ typedef struct drm_i915_private { * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; + /* Haswell power well */ + struct i915_power_well power_well; + enum no_fbc_reason no_fbc_reason; struct drm_mm_node *compressed_fb; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6bbebf824078..1735cdc86770 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -768,6 +768,10 @@ extern void intel_update_fbc(struct drm_device *dev); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_teardown(void); +/* Power well */ +extern int i915_init_power_well(struct drm_device *dev); +extern void i915_remove_power_well(struct drm_device *dev); + extern bool intel_display_power_enabled(struct drm_device *dev, enum intel_display_power_domain domain); extern void intel_init_power_well(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1e1b48ec6c46..a417d7b196c2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5024,18 +5024,12 @@ bool intel_display_power_enabled(struct drm_device *dev, } } -void intel_set_power_well(struct drm_device *dev, bool enable) +static void __intel_set_power_well(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; bool is_enabled, enable_requested; uint32_t tmp; - if (!HAS_POWER_WELL(dev)) - return; - - if (!i915_disable_power_well && !enable) - return; - tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE; enable_requested = tmp & HSW_PWR_WELL_ENABLE; @@ -5058,6 +5052,79 @@ void intel_set_power_well(struct drm_device *dev, bool enable) } } +static struct i915_power_well *hsw_pwr; + +/* Display audio driver power well request */ +void i915_request_power_well(void) +{ + if (WARN_ON(!hsw_pwr)) + return; + + spin_lock_irq(&hsw_pwr->lock); + if (!hsw_pwr->count++ && + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, true); + spin_unlock_irq(&hsw_pwr->lock); +} +EXPORT_SYMBOL_GPL(i915_request_power_well); + +/* Display audio driver power well release */ +void i915_release_power_well(void) +{ + if (WARN_ON(!hsw_pwr)) + return; + + spin_lock_irq(&hsw_pwr->lock); + WARN_ON(!hsw_pwr->count); + if (!--hsw_pwr->count && + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, false); + spin_unlock_irq(&hsw_pwr->lock); +} +EXPORT_SYMBOL_GPL(i915_release_power_well); + +int i915_init_power_well(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + hsw_pwr = &dev_priv->power_well; + + hsw_pwr->device = dev; + spin_lock_init(&hsw_pwr->lock); + hsw_pwr->count = 0; + + return 0; +} + +void i915_remove_power_well(struct drm_device *dev) +{ + hsw_pwr = NULL; +} + +void intel_set_power_well(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_power_well *power_well = &dev_priv->power_well; + + if (!HAS_POWER_WELL(dev)) + return; + + if (!i915_disable_power_well && !enable) + return; + + spin_lock_irq(&power_well->lock); + power_well->i915_request = enable; + + /* only reject "disable" power well request */ + if (power_well->count && !enable) { + spin_unlock_irq(&power_well->lock); + return; + } + + __intel_set_power_well(dev, enable); + spin_unlock_irq(&power_well->lock); +} + /* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h new file mode 100644 index 000000000000..cfdc884405b7 --- /dev/null +++ b/include/drm/i915_powerwell.h @@ -0,0 +1,36 @@ +/************************************************************************** + * + * Copyright 2013 Intel Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ + +#ifndef _I915_POWERWELL_H_ +#define _I915_POWERWELL_H_ + +/* For use by hda_i915 driver */ +extern void i915_request_power_well(void); +extern void i915_release_power_well(void); + +#endif /* _I915_POWERWELL_H_ */ -- cgit v1.2.3 From 1c5aafa6eee2d5712f774676d407e5ab6dae9a1b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 16 Apr 2013 14:14:52 +0200 Subject: drm/gem: Split drm_gem_mmap() into object search and object mapping The drm_gem_mmap() function first finds the GEM object to be mapped based on the fake mmap offset and then maps the object. Split the object mapping code into a standalone drm_gem_mmap_obj() function that can be used to implement dma-buf mmap() operations. Signed-off-by: Laurent Pinchart Reviewed-by: Rob Clark --- drivers/gpu/drm/drm_gem.c | 83 +++++++++++++++++++++++++++++------------------ include/drm/drmP.h | 2 ++ 2 files changed, 54 insertions(+), 31 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index cf919e36e8ae..43217138816d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -644,6 +644,55 @@ void drm_gem_vm_close(struct vm_area_struct *vma) } EXPORT_SYMBOL(drm_gem_vm_close); +/** + * drm_gem_mmap_obj - memory map a GEM object + * @obj: the GEM object to map + * @obj_size: the object size to be mapped, in bytes + * @vma: VMA for the area to be mapped + * + * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops + * provided by the driver. Depending on their requirements, drivers can either + * provide a fault handler in their gem_vm_ops (in which case any accesses to + * the object will be trapped, to perform migration, GTT binding, surface + * register allocation, or performance monitoring), or mmap the buffer memory + * synchronously after calling drm_gem_mmap_obj. + * + * This function is mainly intended to implement the DMABUF mmap operation, when + * the GEM object is not looked up based on its fake offset. To implement the + * DRM mmap operation, drivers should use the drm_gem_mmap() function. + * + * Return 0 or success or -EINVAL if the object size is smaller than the VMA + * size, or if no gem_vm_ops are provided. + */ +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma) +{ + struct drm_device *dev = obj->dev; + + /* Check for valid size. */ + if (obj_size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!dev->driver->gem_vm_ops) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = dev->driver->gem_vm_ops; + vma->vm_private_data = obj; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_reference(obj); + + drm_vm_open_locked(dev, vma); + return 0; +} +EXPORT_SYMBOL(drm_gem_mmap_obj); /** * drm_gem_mmap - memory map routine for GEM objects @@ -653,11 +702,9 @@ EXPORT_SYMBOL(drm_gem_vm_close); * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * - * If we find the object based on the offset passed in (vma->vm_pgoff will + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on - * the object), we set up the driver fault handler so that any accesses - * to the object can be trapped, to perform migration, GTT binding, surface - * register allocation, or performance monitoring. + * the object) and map it with a call to drm_gem_mmap_obj(). */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { @@ -665,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_local_map *map = NULL; - struct drm_gem_object *obj; struct drm_hash_item *hash; int ret = 0; @@ -686,32 +732,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) goto out_unlock; } - /* Check for valid size. */ - if (map->size < vma->vm_end - vma->vm_start) { - ret = -EINVAL; - goto out_unlock; - } - - obj = map->handle; - if (!obj->dev->driver->gem_vm_ops) { - ret = -EINVAL; - goto out_unlock; - } - - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_ops = obj->dev->driver->gem_vm_ops; - vma->vm_private_data = map->handle; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - - /* Take a ref for this mapping of the object, so that the fault - * handler can dereference the mmap offset's pointer to the object. - * This reference is cleaned up by the corresponding vm_close - * (which should happen whether the vma was created by this call, or - * by a vm_open due to mremap or partial unmap or whatever). - */ - drm_gem_object_reference(obj); - - drm_vm_open_locked(dev, vma); + ret = drm_gem_mmap_obj(map->handle, map->size, vma); out_unlock: mutex_unlock(&dev->struct_mutex); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index b06f5afe10ce..79fb4c7b6c72 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1616,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev, void drm_gem_object_handle_free(struct drm_gem_object *obj); void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma); +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); #include -- cgit v1.2.3 From 71d7282a0f1abb488e5be4d154893579624bc683 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 17 Feb 2013 01:57:30 +0100 Subject: drm: GEM CMA: Add DRM PRIME support Signed-off-by: Laurent Pinchart Reviewed-by: Rob Clark --- drivers/gpu/drm/drm_gem_cma_helper.c | 317 ++++++++++++++++++++++++++++++++++- include/drm/drm_gem_cma_helper.h | 9 + 2 files changed, 323 insertions(+), 3 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e7a1ea..f94fcd752815 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -82,6 +83,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; + struct sg_table *sgt = NULL; + int ret; size = round_up(size, PAGE_SIZE); @@ -94,11 +97,29 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); - drm_gem_cma_free_object(&cma_obj->base); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto error; + } + + sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL); + if (sgt == NULL) { + ret = -ENOMEM; + goto error; } + ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr, + cma_obj->paddr, size); + if (ret < 0) + goto error; + + cma_obj->sgt = sgt; + return cma_obj; + +error: + kfree(sgt); + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -156,9 +177,16 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) cma_obj = to_drm_gem_cma_obj(gem_obj); - if (cma_obj->vaddr) + if (cma_obj->vaddr) { dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->sgt) { + sg_free_table(cma_obj->sgt); + kfree(cma_obj->sgt); + } + } else if (gem_obj->import_attach) { + drm_prime_gem_destroy(gem_obj, cma_obj->sgt); + } drm_gem_object_release(gem_obj); @@ -291,3 +319,286 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m } EXPORT_SYMBOL_GPL(drm_gem_cma_describe); #endif + +/* ----------------------------------------------------------------------------- + * DMA-BUF + */ + +struct drm_gem_cma_dmabuf_attachment { + struct sg_table sgt; + enum dma_data_direction dir; +}; + +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach; + + cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL); + if (!cma_attach) + return -ENOMEM; + + cma_attach->dir = DMA_NONE; + attach->priv = cma_attach; + + return 0; +} + +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct sg_table *sgt; + + if (cma_attach == NULL) + return; + + sgt = &cma_attach->sgt; + + if (cma_attach->dir != DMA_NONE) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, + cma_attach->dir); + + sg_free_table(sgt); + kfree(cma_attach); + attach->priv = NULL; +} + +static struct sg_table * +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; + struct drm_device *drm = cma_obj->base.dev; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + unsigned int i; + int nents, ret; + + DRM_DEBUG_PRIME("\n"); + + if (WARN_ON(dir == DMA_NONE)) + return ERR_PTR(-EINVAL); + + /* Return the cached mapping when possible. */ + if (cma_attach->dir == dir) + return &cma_attach->sgt; + + /* Two mappings with different directions for the same attachment are + * not allowed. + */ + if (WARN_ON(cma_attach->dir != DMA_NONE)) + return ERR_PTR(-EBUSY); + + sgt = &cma_attach->sgt; + + ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to alloc sgt.\n"); + return ERR_PTR(-ENOMEM); + } + + mutex_lock(&drm->struct_mutex); + + rd = cma_obj->sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); + if (!nents) { + DRM_ERROR("failed to map sgl with iommu.\n"); + sg_free_table(sgt); + sgt = ERR_PTR(-EIO); + goto done; + } + + cma_attach->dir = dir; + attach->priv = cma_attach; + + DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size); + +done: + mutex_unlock(&drm->struct_mutex); + return sgt; +} + +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + /* Nothing to do. */ +} + +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* + * drm_gem_cma_dmabuf_release() call means that file object's + * f_count is 0 and it calls drm_gem_object_handle_unreference() + * to drop the references that these values had been increased + * at drm_prime_handle_to_fd() + */ + if (cma_obj->base.export_dma_buf == dmabuf) { + cma_obj->base.export_dma_buf = NULL; + + /* + * drop this gem object refcount to release allocated buffer + * and resources. + */ + drm_gem_object_unreference_unlocked(&cma_obj->base); + } +} + +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + struct drm_gem_object *gem_obj = &cma_obj->base; + int ret; + + ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); + if (ret < 0) + return ret; + + return drm_gem_cma_mmap_obj(cma_obj, vma); +} + +static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + return cma_obj->vaddr; +} + +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { + .attach = drm_gem_cma_dmabuf_attach, + .detach = drm_gem_cma_dmabuf_detach, + .map_dma_buf = drm_gem_cma_dmabuf_map, + .unmap_dma_buf = drm_gem_cma_dmabuf_unmap, + .kmap = drm_gem_cma_dmabuf_kmap, + .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, + .kunmap = drm_gem_cma_dmabuf_kunmap, + .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic, + .mmap = drm_gem_cma_dmabuf_mmap, + .vmap = drm_gem_cma_dmabuf_vmap, + .release = drm_gem_cma_dmabuf_release, +}; + +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm, + struct drm_gem_object *obj, int flags) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, + cma_obj->base.size, flags); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export); + +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm, + struct dma_buf *dma_buf) +{ + struct drm_gem_cma_object *cma_obj; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + int ret; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* is this one of own objects? */ + if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) { + struct drm_gem_object *obj; + + cma_obj = dma_buf->priv; + obj = &cma_obj->base; + + /* is it from our device? */ + if (obj->dev == drm) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); + dma_buf_put(dma_buf); + return obj; + } + } + + /* Create a CMA GEM buffer. */ + cma_obj = __drm_gem_cma_create(drm, dma_buf->size); + if (IS_ERR(cma_obj)) + return ERR_PTR(PTR_ERR(cma_obj)); + + /* Attach to the buffer and map it. Make sure the mapping is contiguous + * on the device memory bus, as that's all we support. + */ + attach = dma_buf_attach(dma_buf, drm->dev); + if (IS_ERR(attach)) { + ret = -EINVAL; + goto error_gem_free; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = sgt ? PTR_ERR(sgt) : -ENOMEM; + goto error_buf_detach; + } + + if (sgt->nents != 1) { + ret = -EINVAL; + goto error_buf_unmap; + } + + cma_obj->base.import_attach = attach; + cma_obj->paddr = sg_dma_address(sgt->sgl); + cma_obj->sgt = sgt; + + DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, + dma_buf->size); + + return &cma_obj->base; + +error_buf_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +error_buf_detach: + dma_buf_detach(dma_buf, attach); +error_gem_free: + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 63397ced9254..6e17251e9b28 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -4,6 +4,9 @@ struct drm_gem_cma_object { struct drm_gem_object base; dma_addr_t paddr; + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM CMA */ void *vaddr; }; @@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev, + struct drm_gem_object *obj, + int flags); +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev, + struct dma_buf *dma_buf); + #endif /* __DRM_GEM_CMA_HELPER_H__ */ -- cgit v1.2.3 From 102d6dba306c825cd5c310f73868b130931f47aa Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 9 Apr 2013 09:18:44 +0200 Subject: drm: add unpin function to prime helpers Prevents buffers from being pinned forever. Signed-off-by: Maarten Lankhorst Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 13 +++++++++++-- include/drm/drmP.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index dcde35231e25..1a36b0c22732 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -92,10 +92,13 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; if (obj->export_dma_buf == dma_buf) { /* drop the reference on the export fd holds */ obj->export_dma_buf = NULL; + if (dev->driver->gem_prime_unpin) + dev->driver->gem_prime_unpin(obj); drm_gem_object_unreference_unlocked(obj); } } @@ -185,13 +188,19 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { struct dma_buf *drm_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { + struct dma_buf *buf; + if (dev->driver->gem_prime_pin) { int ret = dev->driver->gem_prime_pin(obj); if (ret) return ERR_PTR(ret); } - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, - 0600); + buf = dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, + 0600); + + if (IS_ERR(buf) && dev->driver->gem_prime_unpin) + dev->driver->gem_prime_unpin(obj); + return buf; } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index b06f5afe10ce..57f60a743da0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -930,6 +930,7 @@ struct drm_driver { struct dma_buf *dma_buf); /* low-level interface used by drm_gem_prime_{import,export} */ int (*gem_prime_pin)(struct drm_gem_object *obj); + void (*gem_prime_unpin)(struct drm_gem_object *obj); struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, size_t size, -- cgit v1.2.3 From 6ba6d03e69125ef42a63e90d45e49c659ea3c34f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 10 Jun 2013 11:15:10 +0300 Subject: drm: Print pretty names for pixel formats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than just printing the pixel format as a hex number, decode the fourcc into human readable form, and also decode the LE vs. BE flag. Keep printing the raw hex number too in case it contains non-printable characters. Some examples what the new drm_get_format_name() produces: DRM_FORMAT_XRGB8888: "XR24 little-endian (0x34325258)" DRM_FORMAT_YUYV: "YUYV little-endian (0x56595559)" DRM_FORMAT_RGB565|DRM_FORMAT_BIG_ENDIAN: "RG16 big-endian (0xb6314752)" Unprintable characters: "D??? big-endian (0xff7f0244)" v2: Fix patch author Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 29 +++++++++++++++++++++++++++-- include/drm/drm_crtc.h | 1 + 2 files changed, 28 insertions(+), 2 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e7e92429d10f..079996ad02a7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -29,6 +29,7 @@ * Dave Airlie * Jesse Barnes */ +#include #include #include #include @@ -252,6 +253,28 @@ char *drm_get_connector_status_name(enum drm_connector_status status) } EXPORT_SYMBOL(drm_get_connector_status_name); +static char printable_char(int c) +{ + return isascii(c) && isprint(c) ? c : '?'; +} + +char *drm_get_format_name(uint32_t format) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "%c%c%c%c %s-endian (0x%08x)", + printable_char(format & 0xff), + printable_char((format >> 8) & 0xff), + printable_char((format >> 16) & 0xff), + printable_char((format >> 24) & 0x7f), + format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", + format); + + return buf; +} +EXPORT_SYMBOL(drm_get_format_name); + /** * drm_mode_object_get - allocate a new modeset identifier * @dev: DRM device @@ -1834,7 +1857,8 @@ int drm_mode_setplane(struct drm_device *dev, void *data, if (fb->pixel_format == plane->format_types[i]) break; if (i == plane->format_count) { - DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format); + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->pixel_format)); ret = -EINVAL; goto out; } @@ -2312,7 +2336,8 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) ret = format_check(r); if (ret) { - DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format); + DRM_DEBUG_KMS("bad framebuffer format %s\n", + drm_get_format_name(r->pixel_format)); return ret; } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index adb3f9b625f6..2cbbfd44c6df 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1094,5 +1094,6 @@ extern int drm_format_num_planes(uint32_t format); extern int drm_format_plane_cpp(uint32_t format, int plane); extern int drm_format_horz_chroma_subsampling(uint32_t format); extern int drm_format_vert_chroma_subsampling(uint32_t format); +extern char *drm_get_format_name(uint32_t format); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3 From d20d3174806ef6589cb912a488657d21fcd7ece2 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 7 Jun 2013 15:43:07 +0000 Subject: drm: Constify the pretty-print functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The structures and strings involved with various pretty-print functions aren't meant to be modified, so make them all const. The exception is drm_connector_enum_list which does get modified in drm_connector_init(). While at it move the drm_get_connector_status_name() prototype from drmP.h to drm_crtc.h where it belongs. Signed-off-by: Ville Syrjälä Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 30 +++++++++++++++--------------- include/drm/drmP.h | 1 - include/drm/drm_crtc.h | 17 +++++++++-------- 3 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 079996ad02a7..44c3421899bb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -92,7 +92,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ - char *fnname(int val) \ + const char *fnname(int val) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(list); i++) { \ @@ -105,7 +105,7 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); /* * Global properties */ -static struct drm_prop_enum_list drm_dpms_enum_list[] = +static const struct drm_prop_enum_list drm_dpms_enum_list[] = { { DRM_MODE_DPMS_ON, "On" }, { DRM_MODE_DPMS_STANDBY, "Standby" }, { DRM_MODE_DPMS_SUSPEND, "Suspend" }, @@ -117,7 +117,7 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) /* * Optional properties */ -static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = +static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { { DRM_MODE_SCALE_NONE, "None" }, { DRM_MODE_SCALE_FULLSCREEN, "Full" }, @@ -125,7 +125,7 @@ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; -static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = +static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] = { { DRM_MODE_DITHERING_OFF, "Off" }, { DRM_MODE_DITHERING_ON, "On" }, @@ -135,7 +135,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = /* * Non-global properties, but "required" for certain connectors. */ -static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = +static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ @@ -144,7 +144,7 @@ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) -static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = +static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ @@ -154,7 +154,7 @@ static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, drm_dvi_i_subconnector_enum_list) -static struct drm_prop_enum_list drm_tv_select_enum_list[] = +static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ @@ -165,7 +165,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] = DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) -static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = +static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ @@ -177,7 +177,7 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) -static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { +static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = { { DRM_MODE_DIRTY_OFF, "Off" }, { DRM_MODE_DIRTY_ON, "On" }, { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, @@ -185,7 +185,7 @@ static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { struct drm_conn_prop_enum_list { int type; - char *name; + const char *name; int count; }; @@ -211,7 +211,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, }; -static struct drm_prop_enum_list drm_encoder_enum_list[] = +static const struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, @@ -220,7 +220,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, }; -char *drm_get_encoder_name(struct drm_encoder *encoder) +const char *drm_get_encoder_name(const struct drm_encoder *encoder) { static char buf[32]; @@ -231,7 +231,7 @@ char *drm_get_encoder_name(struct drm_encoder *encoder) } EXPORT_SYMBOL(drm_get_encoder_name); -char *drm_get_connector_name(struct drm_connector *connector) +const char *drm_get_connector_name(const struct drm_connector *connector) { static char buf[32]; @@ -242,7 +242,7 @@ char *drm_get_connector_name(struct drm_connector *connector) } EXPORT_SYMBOL(drm_get_connector_name); -char *drm_get_connector_status_name(enum drm_connector_status status) +const char *drm_get_connector_status_name(enum drm_connector_status status) { if (status == connector_status_connected) return "connected"; @@ -258,7 +258,7 @@ static char printable_char(int c) return isascii(c) && isprint(c) ? c : '?'; } -char *drm_get_format_name(uint32_t format) +const char *drm_get_format_name(uint32_t format) { static char buf[32]; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 57f60a743da0..e4f765c80fb0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1599,7 +1599,6 @@ extern void drm_sysfs_destroy(void); extern int drm_sysfs_device_add(struct drm_minor *minor); extern void drm_sysfs_hotplug_event(struct drm_device *dev); extern void drm_sysfs_device_remove(struct drm_minor *minor); -extern char *drm_get_connector_status_name(enum drm_connector_status status); extern int drm_sysfs_connector_add(struct drm_connector *connector); extern void drm_sysfs_connector_remove(struct drm_connector *connector); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 2cbbfd44c6df..53c33e28a2f7 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -897,12 +897,13 @@ extern void drm_plane_cleanup(struct drm_plane *plane); extern void drm_encoder_cleanup(struct drm_encoder *encoder); -extern char *drm_get_connector_name(struct drm_connector *connector); -extern char *drm_get_dpms_name(int val); -extern char *drm_get_dvi_i_subconnector_name(int val); -extern char *drm_get_dvi_i_select_name(int val); -extern char *drm_get_tv_subconnector_name(int val); -extern char *drm_get_tv_select_name(int val); +extern const char *drm_get_connector_name(const struct drm_connector *connector); +extern const char *drm_get_connector_status_name(enum drm_connector_status status); +extern const char *drm_get_dpms_name(int val); +extern const char *drm_get_dvi_i_subconnector_name(int val); +extern const char *drm_get_dvi_i_select_name(int val); +extern const char *drm_get_tv_subconnector_name(int val); +extern const char *drm_get_tv_select_name(int val); extern void drm_fb_release(struct drm_file *file_priv); extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); extern bool drm_probe_ddc(struct i2c_adapter *adapter); @@ -994,7 +995,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_dithering_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev); -extern char *drm_get_encoder_name(struct drm_encoder *encoder); +extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); @@ -1094,6 +1095,6 @@ extern int drm_format_num_planes(uint32_t format); extern int drm_format_plane_cpp(uint32_t format, int plane); extern int drm_format_horz_chroma_subsampling(uint32_t format); extern int drm_format_vert_chroma_subsampling(uint32_t format); -extern char *drm_get_format_name(uint32_t format); +extern const char *drm_get_format_name(uint32_t format); #endif /* __DRM_CRTC_H__ */ -- cgit v1.2.3 From 9125e6186822b2698da17690416cd1b55c030115 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 3 Jun 2013 16:10:40 +0300 Subject: drm: Add drm_plane_force_disable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_plane_force_disable() will forcibly disable the plane even if user had previously requested the plane to be enabled. This can be used to force planes to be off when restoring the fbdev mode. The code was simply pulled from drm_framebuffer_remove(), which now calls the new function as well. v2: Check plane->fb in drm_plane_force_disable(), drop bogus comment about disabling crtc Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 29 +++++++++++++++++++---------- include/drm/drm_crtc.h | 1 + 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index baee5752ca6b..c8042a1c83b8 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -592,16 +592,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - if (plane->fb == fb) { - /* should turn off the crtc */ - ret = plane->funcs->disable_plane(plane); - if (ret) - DRM_ERROR("failed to disable plane with busy fb\n"); - /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(plane->fb); - plane->fb = NULL; - plane->crtc = NULL; - } + if (plane->fb == fb) + drm_plane_force_disable(plane); } drm_modeset_unlock_all(dev); } @@ -891,6 +883,23 @@ void drm_plane_cleanup(struct drm_plane *plane) } EXPORT_SYMBOL(drm_plane_cleanup); +void drm_plane_force_disable(struct drm_plane *plane) +{ + int ret; + + if (!plane->fb) + return; + + ret = plane->funcs->disable_plane(plane); + if (ret) + DRM_ERROR("failed to disable plane with busy fb\n"); + /* disconnect the plane from the fb and crtc: */ + __drm_framebuffer_unreference(plane->fb); + plane->fb = NULL; + plane->crtc = NULL; +} +EXPORT_SYMBOL(drm_plane_force_disable); + /** * drm_mode_create - create a new display mode * @dev: DRM device diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 53c33e28a2f7..da137e732ac2 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -894,6 +894,7 @@ extern int drm_plane_init(struct drm_device *dev, const uint32_t *formats, uint32_t format_count, bool priv); extern void drm_plane_cleanup(struct drm_plane *plane); +extern void drm_plane_force_disable(struct drm_plane *plane); extern void drm_encoder_cleanup(struct drm_encoder *encoder); -- cgit v1.2.3 From 778ad903f951a45a309a5a70dbdcf38eccabeaf0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 3 Jun 2013 16:11:42 +0300 Subject: drm: Remove some unused stuff from drm_plane MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's a bunch of unused members inside drm_plane, bloating the size of the structure needlessly. Eliminate them. v2: Remove all of it from kernel-doc too Reviewed-by: Laurent Pinchart Reviewed-by: Daniel Vetter Signed-off-by: Ville Syrjälä Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 2 +- include/drm/drm_crtc.h | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 755d9bb0b4e2..c9f9f3ded9e1 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1805,7 +1805,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data, plane_resp->plane_id = plane->base.id; plane_resp->possible_crtcs = plane->possible_crtcs; - plane_resp->gamma_size = plane->gamma_size; + plane_resp->gamma_size = 0; /* * This ioctl is called twice, once to determine how much space is diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index da137e732ac2..9779ea11e63d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -654,11 +654,7 @@ struct drm_plane_funcs { * @format_count: number of formats supported * @crtc: currently bound CRTC * @fb: currently bound fb - * @gamma_size: size of gamma table - * @gamma_store: gamma correction table - * @enabled: enabled flag * @funcs: helper functions - * @helper_private: storage for drver layer * @properties: property tracking for this plane */ struct drm_plane { @@ -674,14 +670,7 @@ struct drm_plane { struct drm_crtc *crtc; struct drm_framebuffer *fb; - /* CRTC gamma size for reporting to userspace */ - uint32_t gamma_size; - uint16_t *gamma_store; - - bool enabled; - const struct drm_plane_funcs *funcs; - void *helper_private; struct drm_object_properties properties; }; -- cgit v1.2.3 From 5cef29aa5227e6347145940a7bccde92fd9a1afa Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 15 Jun 2013 00:13:16 +0200 Subject: drm: fix fb leak in setcrtc Drivers are allowed (actually have to) disable unrelated crtcs in their ->set_config callback (when we steal all the connectors from that crtc). If they do that they'll clear crtc->fb to NULL. Which results in a refcount leak, since the drm core is keeping track of that reference. To fix this track the old fb of all crtcs and adjust references for all of them. Of course, since we only hold an additional reference for the fb for the current crtc we need to increase refcounts before we drop the old one. This approach has the benefit that it inches us a bit closer to an atomic modeset world, where we want to update the config of all crtcs in one step. This regression has been introduce in the framebuffer refcount conversion, specifically in commit b0d1232589df5575c5971224ac4cb30e7e525884 Author: Daniel Vetter Date: Tue Dec 11 01:07:12 2012 +0100 drm: refcounting for crtc framebuffers Reported-by: Russell King Cc: Russell King Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 22 ++++++++++++++++------ include/drm/drm_crtc.h | 4 ++++ 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a7dc1e266c98..02838e66af76 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1972,21 +1972,31 @@ out: int drm_mode_set_config_internal(struct drm_mode_set *set) { struct drm_crtc *crtc = set->crtc; - struct drm_framebuffer *fb, *old_fb; + struct drm_framebuffer *fb; + struct drm_crtc *tmp; int ret; - old_fb = crtc->fb; + /* + * NOTE: ->set_config can also disable other crtcs (if we steal all + * connectors from it), hence we need to refcount the fbs across all + * crtcs. Atomic modeset will have saner semantics ... + */ + list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) + tmp->old_fb = tmp->fb; + fb = set->fb; ret = crtc->funcs->set_config(set); if (ret == 0) { /* crtc->fb must be updated by ->set_config, enforces this. */ WARN_ON(fb != crtc->fb); + } - if (old_fb) - drm_framebuffer_unreference(old_fb); - if (fb) - drm_framebuffer_reference(fb); + list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { + if (tmp->fb) + drm_framebuffer_reference(tmp->fb); + if (tmp->old_fb) + drm_framebuffer_unreference(tmp->old_fb); } return ret; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 9779ea11e63d..b9ddd3d42acf 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -409,6 +409,10 @@ struct drm_crtc { /* framebuffer the connector is currently bound to */ struct drm_framebuffer *fb; + /* Temporary tracking of the old fb while a modeset is ongoing. Used + * by drm_mode_set_config_internal to implement correct refcounting. */ + struct drm_framebuffer *old_fb; + bool enabled; /* Requested mode from modesetting. */ -- cgit v1.2.3 From 160954b7bca43da7cd3cfbce310e6df919a8216e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Jun 2013 00:17:25 +0200 Subject: drm: kms_helper: don't lose hotplug event There's a race window (small for hpd, 10s large for polled outputs) where userspace could sneak in with an unrelated connnector probe ioctl call and eat the hotplug event (since neither the hpd nor the poll code see a state change). To avoid this, check whether the connector state changes in all other ->detect calls (in the current helper code that's only probe_single) and if that's the case, fire off a hotplug event. Note that we can't directly call the hotplug event handler, since that expects that no locks are held (due to reentrancy with the fb code to update the kms console). Also, this requires that drivers using the probe_single helper function set up the poll work. All current drivers do that already, and with the reworked hpd handling there'll be no downside to unconditionally setting up the poll work any more. Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 32 +++++++++++++++++++++++++++++++- include/drm/drm_crtc.h | 1 + 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..f6829ba58e86 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -122,6 +122,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, int count = 0; int mode_flags = 0; bool verbose_prune = true; + enum drm_connector_status old_status; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -137,7 +138,32 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, if (connector->funcs->force) connector->funcs->force(connector); } else { + old_status = connector->status; + connector->status = connector->funcs->detect(connector, true); + + /* + * Normally either the driver's hpd code or the poll loop should + * pick up any changes and fire the hotplug event. But if + * userspace sneaks in a probe, we might miss a change. Hence + * check here, and if anything changed start the hotplug code. + */ + if (old_status != connector->status) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + connector->base.id, + drm_get_connector_name(connector), + old_status, connector->status); + + /* + * The hotplug event code might call into the fb + * helpers, and so expects that we do not hold any + * locks. Fire up the poll struct instead, it will + * disable itself again. + */ + dev->mode_config.delayed_event = true; + schedule_delayed_work(&dev->mode_config.output_poll_work, + 0); + } } /* Re-enable polling in case the global poll config changed. */ @@ -985,7 +1011,11 @@ static void output_poll_execute(struct work_struct *work) struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; enum drm_connector_status old_status; - bool repoll = false, changed = false; + bool repoll = false, changed; + + /* Pick up any changes detected by the probe functions. */ + changed = dev->mode_config.delayed_event; + dev->mode_config.delayed_event = false; if (!drm_kms_helper_poll) return; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b9ddd3d42acf..0fd007af8de9 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -808,6 +808,7 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; bool poll_running; + bool delayed_event; struct delayed_work output_poll_work; /* pointers to standard properties */ -- cgit v1.2.3 From 516184bd01b56aad8bb53bb6ffe51f55cb8e3112 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 7 Jun 2013 11:52:42 -0400 Subject: drm/radeon: add current Bonaire PCI ids Signed-off-by: Alex Deucher --- include/drm/drm_pciids.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/drm') diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index bb1bc485390b..23f89df3a3a0 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -152,6 +152,14 @@ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ -- cgit v1.2.3 From 26e2235d5f0d3d30c6d9e7cabd37ce38da35ce5d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 Jan 2013 12:30:24 -0500 Subject: drm/radeon: add current KB pci ids Signed-off-by: Alex Deucher --- include/drm/drm_pciids.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/drm') diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 23f89df3a3a0..34efaf64cc87 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -588,6 +588,22 @@ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ -- cgit v1.2.3 From 4c813d4d759c0e6b83bfd73795e9526493556dc2 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 20 Jun 2013 11:48:52 +1000 Subject: drm: add hotspot support for cursors. So it looks like for virtual hw cursors on QXL we need to inform the "hw" device what the cursor hotspot parameters are. This makes sense if you think the host has to draw the cursor and interpret clicks from it. However the current modesetting interface doesn't support passing the hotspot information from userspace. This implements a new cursor ioctl, that takes the hotspot info as well, userspace can try calling the new interface and if it gets -ENOSYS it means its on an older kernel and can just fallback. Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 35 +++++++++++++++++++++++++++++------ drivers/gpu/drm/drm_drv.c | 1 + include/drm/drm_crtc.h | 5 +++++ include/uapi/drm/drm.h | 1 + include/uapi/drm/drm_mode.h | 13 +++++++++++++ 5 files changed, 49 insertions(+), 6 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 02838e66af76..fc83bb9eb514 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2178,10 +2178,10 @@ out: return ret; } -int drm_mode_cursor_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) +static int drm_mode_cursor_common(struct drm_device *dev, + struct drm_mode_cursor2 *req, + struct drm_file *file_priv) { - struct drm_mode_cursor *req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; @@ -2201,13 +2201,17 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, mutex_lock(&crtc->mutex); if (req->flags & DRM_MODE_CURSOR_BO) { - if (!crtc->funcs->cursor_set) { + if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { ret = -ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ - ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, - req->width, req->height); + if (crtc->funcs->cursor_set2) + ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle, + req->width, req->height, req->hot_x, req->hot_y); + else + ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, + req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { @@ -2222,6 +2226,25 @@ out: mutex_unlock(&crtc->mutex); return ret; + +} +int drm_mode_cursor_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_cursor *req = data; + struct drm_mode_cursor2 new_req; + + memcpy(&new_req, req, sizeof(struct drm_mode_cursor)); + new_req.hot_x = new_req.hot_y = 0; + + return drm_mode_cursor_common(dev, &new_req, file_priv); +} + +int drm_mode_cursor2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_cursor2 *req = data; + return drm_mode_cursor_common(dev, req, file_priv); } /* Original addfb only supported RGB formats, so figure out which one */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9cc247f55502..99fcd7c32ea2 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -166,6 +166,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 0fd007af8de9..663c3ab47752 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -339,6 +339,9 @@ struct drm_crtc_funcs { /* cursor controls */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); + int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height, + int32_t hot_x, int32_t hot_y); int (*cursor_move)(struct drm_crtc *crtc, int x, int y); /* Set gamma on the CRTC */ @@ -1018,6 +1021,8 @@ extern int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_cursor2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_addfb2(struct drm_device *dev, diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 5a57be68bab7..238a166b9fe6 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -732,6 +732,7 @@ struct drm_prime_handle { #define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) +#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) /** * Device specific ioctls should only be in their respective headers diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 090e5331ab7e..53db7cea373b 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -388,6 +388,19 @@ struct drm_mode_cursor { __u32 handle; }; +struct drm_mode_cursor2 { + __u32 flags; + __u32 crtc_id; + __s32 x; + __s32 y; + __u32 width; + __u32 height; + /* driver specific handle */ + __u32 handle; + __s32 hot_x; + __s32 hot_y; +}; + struct drm_mode_crtc_lut { __u32 crtc_id; __u32 gamma_size; -- cgit v1.2.3 From 210a0b9e212370ed8c2784c2115e7ff4bb1259bd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 22 Mar 2013 10:35:50 -0400 Subject: drm: add some additional fixed point helpers (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Required for certain driver calculations. Code was written by Christian König and ported to the drm by me. v2: fix 64 bit divides v3: fix 64 bit for real (math64.h) Signed-off-by: Alex Deucher --- include/drm/drm_fixed.h | 94 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) (limited to 'include/drm') diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h index 0ead502e17d2..f5e1168c7647 100644 --- a/include/drm/drm_fixed.h +++ b/include/drm/drm_fixed.h @@ -20,10 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie + * Christian König */ #ifndef DRM_FIXED_H #define DRM_FIXED_H +#include + typedef union dfixed { u32 full; } fixed20_12; @@ -65,4 +68,95 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) tmp /= 2; return lower_32_bits(tmp); } + +#define DRM_FIXED_POINT 32 +#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) +#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) +#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) + +static inline s64 drm_int2fixp(int a) +{ + return ((s64)a) << DRM_FIXED_POINT; +} + +static inline int drm_fixp2int(int64_t a) +{ + return ((s64)a) >> DRM_FIXED_POINT; +} + +static inline s64 drm_fixp_msbset(int64_t a) +{ + unsigned shift, sign = (a >> 63) & 1; + + for (shift = 62; shift > 0; --shift) + if ((a >> shift) != sign) + return shift; + + return 0; +} + +static inline s64 drm_fixp_mul(s64 a, s64 b) +{ + unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); + s64 result; + + if (shift > 63) { + shift = shift - 63; + a >>= shift >> 1; + b >>= shift >> 1; + } else + shift = 0; + + result = a * b; + + if (shift > DRM_FIXED_POINT) + return result << (shift - DRM_FIXED_POINT); + + if (shift < DRM_FIXED_POINT) + return result >> (DRM_FIXED_POINT - shift); + + return result; +} + +static inline s64 drm_fixp_div(s64 a, s64 b) +{ + unsigned shift = 63 - drm_fixp_msbset(a); + s64 result; + + a <<= shift; + + if (shift < DRM_FIXED_POINT) + b >>= (DRM_FIXED_POINT - shift); + + result = div64_s64(a, b); + + if (shift > DRM_FIXED_POINT) + return result >> (shift - DRM_FIXED_POINT); + + return result; +} + +static inline s64 drm_fixp_exp(s64 x) +{ + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); + s64 sum = DRM_FIXED_ONE, term, y = x; + u64 count = 1; + + if (x < 0) + y = -1 * x; + + term = y; + + while (term >= tolerance) { + sum = sum + term; + count = count + 1; + term = drm_fixp_mul(term, div64_s64(y, count)); + } + + if (x < 0) + sum = drm_fixp_div(1, sum); + + return sum; +} + #endif -- cgit v1.2.3 From ecff665f5e3f1c6909353e00b9420e45ae23d995 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 27 Jun 2013 13:48:17 +0200 Subject: drm/ttm: make ttm reservation calls behave like reservation calls This commit converts the source of the val_seq counter to the ww_mutex api. The reservation objects are converted later, because there is still a lockdep splat in nouveau that has to resolved first. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_gem.c | 38 ++++++++++++++------- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_cs.c | 18 +++++----- drivers/gpu/drm/radeon/radeon_object.c | 5 +-- drivers/gpu/drm/radeon/radeon_object.h | 3 +- drivers/gpu/drm/radeon/radeon_uvd.c | 27 +++++++-------- drivers/gpu/drm/ttm/ttm_bo.c | 50 +++++++++++++++++---------- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 58 +++++++++++++++++--------------- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 14 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 23 ++++++++----- include/drm/ttm/ttm_bo_api.h | 2 +- include/drm/ttm/ttm_bo_driver.h | 32 +++++++++++++----- include/drm/ttm/ttm_execbuf_util.h | 13 +++++-- 13 files changed, 172 insertions(+), 112 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 705470600e48..e35d4688711c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -277,10 +277,12 @@ struct validate_op { struct list_head vram_list; struct list_head gart_list; struct list_head both_list; + struct ww_acquire_ctx ticket; }; static void -validate_fini_list(struct list_head *list, struct nouveau_fence *fence) +validate_fini_list(struct list_head *list, struct nouveau_fence *fence, + struct ww_acquire_ctx *ticket) { struct list_head *entry, *tmp; struct nouveau_bo *nvbo; @@ -297,17 +299,24 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_del(&nvbo->entry); nvbo->reserved_by = NULL; - ttm_bo_unreserve(&nvbo->bo); + ttm_bo_unreserve_ticket(&nvbo->bo, ticket); drm_gem_object_unreference_unlocked(nvbo->gem); } } static void -validate_fini(struct validate_op *op, struct nouveau_fence* fence) +validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) { - validate_fini_list(&op->vram_list, fence); - validate_fini_list(&op->gart_list, fence); - validate_fini_list(&op->both_list, fence); + validate_fini_list(&op->vram_list, fence, &op->ticket); + validate_fini_list(&op->gart_list, fence, &op->ticket); + validate_fini_list(&op->both_list, fence, &op->ticket); +} + +static void +validate_fini(struct validate_op *op, struct nouveau_fence *fence) +{ + validate_fini_no_ticket(op, fence); + ww_acquire_fini(&op->ticket); } static int @@ -317,13 +326,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, { struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_device *dev = chan->drm->dev; - struct nouveau_drm *drm = nouveau_drm(dev); - uint32_t sequence; int trycnt = 0; int ret, i; struct nouveau_bo *res_bo = NULL; - sequence = atomic_add_return(1, &drm->ttm.validate_sequence); + ww_acquire_init(&op->ticket, &reservation_ww_class); retry: if (++trycnt > 100000) { NV_ERROR(cli, "%s failed and gave up.\n", __func__); @@ -338,6 +345,7 @@ retry: gem = drm_gem_object_lookup(dev, file_priv, b->handle); if (!gem) { NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -ENOENT; } @@ -352,21 +360,23 @@ retry: NV_ERROR(cli, "multiple instances of buffer %d on " "validation list\n", b->handle); drm_gem_object_unreference_unlocked(gem); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } - ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); if (ret) { - validate_fini(op, NULL); + validate_fini_no_ticket(op, NULL); if (unlikely(ret == -EAGAIN)) { - sequence = atomic_add_return(1, &drm->ttm.validate_sequence); ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, - sequence); + &op->ticket); if (!ret) res_bo = nvbo; } if (unlikely(ret)) { + ww_acquire_done(&op->ticket); + ww_acquire_fini(&op->ticket); drm_gem_object_unreference_unlocked(gem); if (ret != -ERESTARTSYS) NV_ERROR(cli, "fail reserve\n"); @@ -390,6 +400,7 @@ retry: NV_ERROR(cli, "invalid valid domains: 0x%08x\n", b->valid_domains); list_add_tail(&nvbo->entry, &op->both_list); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } @@ -397,6 +408,7 @@ retry: goto retry; } + ww_acquire_done(&op->ticket); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a424949005c9..7e3fef4e6938 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -979,6 +979,7 @@ struct radeon_cs_parser { u32 cs_flags; u32 ring; s32 priority; + struct ww_acquire_ctx ticket; }; extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 4f6b22b799ba..13a130fb3517 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -106,7 +106,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) radeon_bo_list_add_object(&p->relocs[i].lobj, &p->validated); } - return radeon_bo_list_validate(&p->validated, p->ring); + return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring); } static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) @@ -314,15 +314,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ -static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) +static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) { unsigned i; if (!error) { - ttm_eu_fence_buffer_objects(&parser->validated, + ttm_eu_fence_buffer_objects(&parser->ticket, + &parser->validated, parser->ib.fence); - } else { - ttm_eu_backoff_reservation(&parser->validated); + } else if (backoff) { + ttm_eu_backoff_reservation(&parser->ticket, + &parser->validated); } if (parser->relocs != NULL) { @@ -535,7 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - radeon_cs_parser_fini(&parser, r); + radeon_cs_parser_fini(&parser, r, false); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; @@ -544,7 +546,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); - radeon_cs_parser_fini(&parser, r); + radeon_cs_parser_fini(&parser, r, false); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; @@ -563,7 +565,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; } out: - radeon_cs_parser_fini(&parser, r); + radeon_cs_parser_fini(&parser, r, true); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 07af5a95bb62..71287bb7b433 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -349,14 +349,15 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, } } -int radeon_bo_list_validate(struct list_head *head, int ring) +int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, + struct list_head *head, int ring) { struct radeon_bo_list *lobj; struct radeon_bo *bo; u32 domain; int r; - r = ttm_eu_reserve_buffers(head); + r = ttm_eu_reserve_buffers(ticket, head); if (unlikely(r != 0)) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index e2cb80a96b51..3e62a3a0fe54 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -128,7 +128,8 @@ extern int radeon_bo_init(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev); extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head); -extern int radeon_bo_list_validate(struct list_head *head, int ring); +extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, + struct list_head *head, int ring); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index ce5a10c8d338..41efcec28cd8 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -550,6 +550,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, struct radeon_fence **fence) { struct ttm_validate_buffer tv; + struct ww_acquire_ctx ticket; struct list_head head; struct radeon_ib ib; uint64_t addr; @@ -561,7 +562,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, INIT_LIST_HEAD(&head); list_add(&tv.head, &head); - r = ttm_eu_reserve_buffers(&head); + r = ttm_eu_reserve_buffers(&ticket, &head); if (r) return r; @@ -569,16 +570,12 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, radeon_uvd_force_into_uvd_segment(bo); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - if (r) { - ttm_eu_backoff_reservation(&head); - return r; - } + if (r) + goto err; r = radeon_ib_get(rdev, ring, &ib, NULL, 16); - if (r) { - ttm_eu_backoff_reservation(&head); - return r; - } + if (r) + goto err; addr = radeon_bo_gpu_offset(bo); ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); @@ -592,11 +589,9 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL); - if (r) { - ttm_eu_backoff_reservation(&head); - return r; - } - ttm_eu_fence_buffer_objects(&head, ib.fence); + if (r) + goto err; + ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); if (fence) *fence = radeon_fence_ref(ib.fence); @@ -604,6 +599,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, radeon_ib_free(rdev, &ib); radeon_bo_unref(&bo); return 0; + +err: + ttm_eu_backoff_reservation(&ticket, &head); + return r; } /* multiple fence commands without any stream commands in between can diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9b07b7d44a58..b912375b9c18 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) { int ret; @@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, /** * Deadlock avoidance for multi-bo reserving. */ - if (use_sequence && bo->seq_valid) { + if (use_ticket && bo->seq_valid) { /** * We've already reserved this one. */ - if (unlikely(sequence == bo->val_seq)) + if (unlikely(ticket->stamp == bo->val_seq)) return -EDEADLK; /** * Already reserved by a thread that will not back * off for us. We need to back off. */ - if (unlikely(sequence - bo->val_seq < (1 << 31))) + if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX)) return -EAGAIN; } @@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, return ret; } - if (use_sequence) { + if (use_ticket) { bool wake_up = false; + /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ - if (unlikely((bo->val_seq - sequence < (1 << 31)) + if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX) || !bo->seq_valid)) wake_up = true; @@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, * written before val_seq was, and just means some slightly * increased cpu usage */ - bo->val_seq = sequence; + bo->val_seq = ticket->stamp; bo->seq_valid = true; if (wake_up) wake_up_all(&bo->event_queue); @@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence) + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; - ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, - sequence); + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, + ticket); if (likely(ret == 0)) { spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); @@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, } int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) + bool interruptible, + struct ww_acquire_ctx *ticket) { bool wake_up = false; int ret; while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - WARN_ON(bo->seq_valid && sequence == bo->val_seq); + WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq); ret = ttm_bo_wait_unreserved(bo, interruptible); @@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, return ret; } - if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid) wake_up = true; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ - bo->val_seq = sequence; + bo->val_seq = ticket->stamp; bo->seq_valid = true; if (wake_up) wake_up_all(&bo->event_queue); @@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, } int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence) + bool interruptible, struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; int put_count, ret; - ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); if (likely(!ret)) { spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); @@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_reserve_slowpath); -void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) { ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); @@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); - ttm_bo_unreserve_locked(bo); + ttm_bo_unreserve_ticket_locked(bo, NULL); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); +void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) +{ + struct ttm_bo_global *glob = bo->glob; + + spin_lock(&glob->lru_lock); + ttm_bo_unreserve_ticket_locked(bo, ticket); + spin_unlock(&glob->lru_lock); +} +EXPORT_SYMBOL(ttm_bo_unreserve_ticket); + /* * Call bo->mutex locked. */ diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 7b90def15674..efcb734e5543 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,7 +32,8 @@ #include #include -static void ttm_eu_backoff_reservation_locked(struct list_head *list) +static void ttm_eu_backoff_reservation_locked(struct list_head *list, + struct ww_acquire_ctx *ticket) { struct ttm_validate_buffer *entry; @@ -41,14 +42,15 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list) if (!entry->reserved) continue; + entry->reserved = false; if (entry->removed) { - ttm_bo_add_to_lru(bo); + ttm_bo_unreserve_ticket_locked(bo, ticket); entry->removed = false; + } else { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); } - entry->reserved = false; - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); } } @@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } } -void ttm_eu_backoff_reservation(struct list_head *list) +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; @@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); + ttm_eu_backoff_reservation_locked(list, ticket); + ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list) +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; - uint32_t val_seq; if (list_empty(list)) return 0; @@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; + ww_acquire_init(ticket, &reservation_ww_class); spin_lock(&glob->lru_lock); - val_seq = entry->bo->bdev->val_seq++; retry: list_for_each_entry(entry, list, head) { @@ -140,7 +144,7 @@ retry: if (entry->reserved) continue; - ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); + ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket); switch (ret) { case 0: break; @@ -148,8 +152,9 @@ retry: ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ret = ttm_bo_reserve_nolru(bo, true, false, - true, val_seq); + true, ticket); spin_lock(&glob->lru_lock); + if (!ret) break; @@ -158,21 +163,13 @@ retry: /* fallthrough */ case -EAGAIN: - ttm_eu_backoff_reservation_locked(list); - - /* - * temporarily increase sequence number every retry, - * to prevent us from seeing our old reservation - * sequence when someone else reserved the buffer, - * but hasn't updated the seq_valid/seqno members yet. - */ - val_seq = entry->bo->bdev->val_seq++; - + ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); if (unlikely(ret != 0)) - return ret; + goto err_fini; + spin_lock(&glob->lru_lock); entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { @@ -191,21 +188,25 @@ retry: } } + ww_acquire_done(ticket); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - return 0; err: - ttm_eu_backoff_reservation_locked(list); + ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); +err_fini: + ww_acquire_done(ticket); + ww_acquire_fini(ticket); return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); -void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; @@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); - ttm_bo_unreserve_locked(bo); + ttm_bo_unreserve_ticket_locked(bo, ticket); entry->reserved = false; } spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); + ww_acquire_fini(ticket); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 394e6476105b..599f6469a1eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1432,6 +1432,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_fence_obj *fence = NULL; struct vmw_resource *error_resource; struct list_head resource_list; + struct ww_acquire_ctx ticket; uint32_t handle; void *cmd; int ret; @@ -1488,7 +1489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err; - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); + ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); if (unlikely(ret != 0)) goto out_err; @@ -1537,7 +1538,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, DRM_ERROR("Fence submission error. Syncing.\n"); vmw_resource_list_unreserve(&sw_context->resource_list, false); - ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, + ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, (void *) fence); if (unlikely(dev_priv->pinned_bo != NULL && @@ -1570,7 +1571,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, out_err: vmw_resource_relocations_free(&sw_context->res_relocations); vmw_free_relocations(sw_context); - ttm_eu_backoff_reservation(&sw_context->validate_nodes); + ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); vmw_resource_list_unreserve(&sw_context->resource_list, true); vmw_clear_validations(sw_context); if (unlikely(dev_priv->pinned_bo != NULL && @@ -1644,6 +1645,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, struct list_head validate_list; struct ttm_validate_buffer pinned_val, query_val; struct vmw_fence_obj *lfence = NULL; + struct ww_acquire_ctx ticket; if (dev_priv->pinned_bo == NULL) goto out_unlock; @@ -1657,7 +1659,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, list_add_tail(&query_val.head, &validate_list); do { - ret = ttm_eu_reserve_buffers(&validate_list); + ret = ttm_eu_reserve_buffers(&ticket, &validate_list); } while (ret == -ERESTARTSYS); if (unlikely(ret != 0)) { @@ -1684,7 +1686,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, NULL); fence = lfence; } - ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); + ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); if (lfence != NULL) vmw_fence_obj_unreference(&lfence); @@ -1696,7 +1698,7 @@ out_unlock: return; out_no_emit: - ttm_eu_backoff_reservation(&validate_list); + ttm_eu_backoff_reservation(&ticket, &validate_list); out_no_reserve: ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index bc784254e78e..ced79465a095 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -990,9 +990,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, * @val_buf: On successful return contains data about the * reserved and validated backup buffer. */ -int vmw_resource_check_buffer(struct vmw_resource *res, - bool interruptible, - struct ttm_validate_buffer *val_buf) +static int +vmw_resource_check_buffer(struct vmw_resource *res, + struct ww_acquire_ctx *ticket, + bool interruptible, + struct ttm_validate_buffer *val_buf) { struct list_head val_list; bool backup_dirty = false; @@ -1007,7 +1009,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(&val_list); + ret = ttm_eu_reserve_buffers(ticket, &val_list); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1025,7 +1027,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(&val_list); + ttm_eu_backoff_reservation(ticket, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) @@ -1069,7 +1071,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) *. * @val_buf: Backup buffer information. */ -void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) +static void +vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, + struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1078,7 +1082,7 @@ void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(&val_list); + ttm_eu_backoff_reservation(ticket, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1092,12 +1096,13 @@ int vmw_resource_do_evict(struct vmw_resource *res) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; + struct ww_acquire_ctx ticket; int ret; BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, true, &val_buf); + ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1112,7 +1117,7 @@ int vmw_resource_do_evict(struct vmw_resource *res) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&val_buf); + vmw_resource_backoff_reservation(&ticket, &val_buf); return ret; } diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 3cb5d848fb66..0a992b016fe9 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -234,7 +234,7 @@ struct ttm_buffer_object { struct list_head ddestroy; struct list_head swap; struct list_head io_reserve_lru; - uint32_t val_seq; + unsigned long val_seq; bool seq_valid; /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 9c8dca79808e..ec18c5f3e6e9 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -38,6 +38,7 @@ #include #include #include +#include struct ttm_backend_func { /** @@ -778,7 +779,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. - * @use_sequence: If @bo is already reserved, Only sleep waiting for + * @use_ticket: If @bo is already reserved, Only sleep waiting for * it to become unreserved if @sequence < (@bo)->sequence. * * Locks a buffer object for validation. (Or prevents other processes from @@ -819,7 +820,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); */ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, uint32_t sequence); + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket); /** * ttm_bo_reserve_slowpath_nolru: @@ -836,7 +838,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, */ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, bool interruptible, - uint32_t sequence); + struct ww_acquire_ctx *ticket); /** @@ -850,7 +852,8 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, * held by us, this function cannot deadlock any more. */ extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, uint32_t sequence); + bool interruptible, + struct ww_acquire_ctx *ticket); /** * ttm_bo_reserve_nolru: @@ -876,8 +879,8 @@ extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, */ extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait, bool use_sequence, - uint32_t sequence); + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket); /** * ttm_bo_unreserve @@ -889,14 +892,25 @@ extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); /** - * ttm_bo_unreserve_locked + * ttm_bo_unreserve_ticket + * @bo: A pointer to a struct ttm_buffer_object. + * @ticket: ww_acquire_ctx used for reserving * + * Unreserve a previous reservation of @bo made with @ticket. + */ +extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, + struct ww_acquire_ctx *ticket); + +/** + * ttm_bo_unreserve_locked * @bo: A pointer to a struct ttm_buffer_object. + * @ticket: ww_acquire_ctx used for reserving, or NULL * - * Unreserve a previous reservation of @bo. + * Unreserve a previous reservation of @bo made with @ticket. * Needs to be called with struct ttm_bo_global::lru_lock held. */ -extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); +extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, + struct ww_acquire_ctx *ticket); /* * ttm_bo_util.c diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 547e19f06e57..ba71ef91f4d5 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -33,6 +33,7 @@ #include #include +#include /** * struct ttm_validate_buffer @@ -57,17 +58,20 @@ struct ttm_validate_buffer { /** * function ttm_eu_backoff_reservation * + * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. * * Undoes all buffer validation reservations for bos pointed to by * the list entries. */ -extern void ttm_eu_backoff_reservation(struct list_head *list); +extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); /** * function ttm_eu_reserve_buffers * + * @ticket: [out] ww_acquire_ctx returned by call. * @list: thread private list of ttm_validate_buffer structs. * * Tries to reserve bos pointed to by the list entries for validation. @@ -90,11 +94,13 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); * has failed. */ -extern int ttm_eu_reserve_buffers(struct list_head *list); +extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list); /** * function ttm_eu_fence_buffer_objects. * + * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. * @sync_obj: The new sync object for the buffers. * @@ -104,6 +110,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list); * */ -extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); +extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, void *sync_obj); #endif -- cgit v1.2.3 From 5e338405119a80aa59e811626739122d1c15045d Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 27 Jun 2013 13:48:19 +0200 Subject: drm/ttm: convert to the reservation api Now that the code is compatible in semantics, flip the switch. Use ww_mutex instead of the homegrown implementation. ww_mutex uses -EDEADLK to signal that the caller has to back off, and -EALREADY to indicate this buffer is already held by the caller. ttm used -EAGAIN and -EDEADLK for those, respectively. So some changes were needed to handle this correctly. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- drivers/gpu/drm/qxl/qxl_object.h | 5 - drivers/gpu/drm/ttm/ttm_bo.c | 190 +++++++++------------------------ drivers/gpu/drm/ttm/ttm_bo_util.c | 6 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 43 +++----- include/drm/ttm/ttm_bo_api.h | 25 ++--- include/drm/ttm/ttm_execbuf_util.h | 1 - 7 files changed, 79 insertions(+), 193 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e35d4688711c..2b2077d1e2f7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -368,7 +368,7 @@ retry: ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); if (ret) { validate_fini_no_ticket(op, NULL); - if (unlikely(ret == -EAGAIN)) { + if (unlikely(ret == -EDEADLK)) { ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, &op->ticket); if (!ret) diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index b4fd89fbd8b7..ee7ad79ce781 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -57,11 +57,6 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo) return bo->tbo.num_pages << PAGE_SHIFT; } -static inline bool qxl_bo_is_reserved(struct qxl_bo *bo) -{ - return !!atomic_read(&bo->tbo.reserved); -} - static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) { return bo->tbo.addr_space_offset; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b912375b9c18..5f9fe8044afc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); + if (bo->resv == &bo->ttm_resv) + reservation_object_fini(&bo->ttm_resv); + if (bo->destroy) bo->destroy(bo); else { @@ -158,18 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } -static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - bool interruptible) -{ - if (interruptible) { - return wait_event_interruptible(bo->event_queue, - !ttm_bo_is_reserved(bo)); - } else { - wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); - return 0; - } -} - void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; @@ -218,65 +209,27 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool no_wait, bool use_ticket, struct ww_acquire_ctx *ticket) { - int ret; + int ret = 0; - while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - /** - * Deadlock avoidance for multi-bo reserving. - */ - if (use_ticket && bo->seq_valid) { - /** - * We've already reserved this one. - */ - if (unlikely(ticket->stamp == bo->val_seq)) - return -EDEADLK; - /** - * Already reserved by a thread that will not back - * off for us. We need to back off. - */ - if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX)) - return -EAGAIN; - } + if (no_wait) { + bool success; - if (no_wait) + /* not valid any more, fix your locking! */ + if (WARN_ON(ticket)) return -EBUSY; - ret = ttm_bo_wait_unreserved(bo, interruptible); - - if (unlikely(ret)) - return ret; - } - - if (use_ticket) { - bool wake_up = false; - - /** - * Wake up waiters that may need to recheck for deadlock, - * if we decreased the sequence number. - */ - if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX) - || !bo->seq_valid)) - wake_up = true; - - /* - * In the worst case with memory ordering these values can be - * seen in the wrong order. However since we call wake_up_all - * in that case, this will hopefully not pose a problem, - * and the worst case would only cause someone to accidentally - * hit -EAGAIN in ttm_bo_reserve when they see old value of - * val_seq. However this would only happen if seq_valid was - * written before val_seq was, and just means some slightly - * increased cpu usage - */ - bo->val_seq = ticket->stamp; - bo->seq_valid = true; - if (wake_up) - wake_up_all(&bo->event_queue); - } else { - bo->seq_valid = false; + success = ww_mutex_trylock(&bo->resv->lock); + return success ? 0 : -EBUSY; } - return 0; + if (interruptible) + ret = ww_mutex_lock_interruptible(&bo->resv->lock, + ticket); + else + ret = ww_mutex_lock(&bo->resv->lock, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; } EXPORT_SYMBOL(ttm_bo_reserve); @@ -313,50 +266,27 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } -int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, - bool interruptible, - struct ww_acquire_ctx *ticket) -{ - bool wake_up = false; - int ret; - - while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { - WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq); - - ret = ttm_bo_wait_unreserved(bo, interruptible); - - if (unlikely(ret)) - return ret; - } - - if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid) - wake_up = true; - - /** - * Wake up waiters that may need to recheck for deadlock, - * if we decreased the sequence number. - */ - bo->val_seq = ticket->stamp; - bo->seq_valid = true; - if (wake_up) - wake_up_all(&bo->event_queue); - - return 0; -} - int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, bool interruptible, struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; - int put_count, ret; + int put_count = 0; + int ret = 0; + + if (interruptible) + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + else + ww_mutex_lock_slow(&bo->resv->lock, ticket); - ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); - if (likely(!ret)) { + if (likely(ret == 0)) { spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); - } + } else if (ret == -EINTR) + ret = -ERESTARTSYS; + return ret; } EXPORT_SYMBOL(ttm_bo_reserve_slowpath); @@ -364,8 +294,7 @@ EXPORT_SYMBOL(ttm_bo_reserve_slowpath); void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) { ttm_bo_add_to_lru(bo); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); } void ttm_bo_unreserve(struct ttm_buffer_object *bo) @@ -558,17 +487,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) } ttm_bo_mem_put(bo, &bo->mem); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - - /* - * Since the final reference to this bo may not be dropped by - * the current task we have to put a memory barrier here to make - * sure the changes done in this function are always visible. - * - * This function only needs protection against the final kref_put. - */ - smp_mb__before_atomic_dec(); + ww_mutex_unlock (&bo->resv->lock); } static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) @@ -600,10 +519,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - if (!ret) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - } + if (!ret) + ww_mutex_unlock(&bo->resv->lock); kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); @@ -653,8 +570,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); spin_unlock(&glob->lru_lock); ret = driver->sync_obj_wait(sync_obj, false, interruptible); @@ -692,8 +608,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, spin_unlock(&bdev->fence_lock); if (ret || unlikely(list_empty(&bo->ddestroy))) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); spin_unlock(&glob->lru_lock); return ret; } @@ -1253,6 +1168,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, int ret = 0; unsigned long num_pages; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; + bool locked; ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (ret) { @@ -1279,8 +1195,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); - atomic_set(&bo->reserved, 1); - init_waitqueue_head(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); @@ -1298,37 +1212,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.bus.io_reserved_count = 0; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); - bo->seq_valid = false; bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; bo->sg = sg; + bo->resv = &bo->ttm_resv; + reservation_object_init(bo->resv); atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, placement); - if (unlikely(ret != 0)) - goto out_err; /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ - if (bo->type == ttm_bo_type_device || - bo->type == ttm_bo_type_sg) { + if (likely(!ret) && + (bo->type == ttm_bo_type_device || + bo->type == ttm_bo_type_sg)) ret = ttm_bo_setup_vm(bo); - if (ret) - goto out_err; - } - ret = ttm_bo_validate(bo, placement, interruptible, false); - if (ret) - goto out_err; + locked = ww_mutex_trylock(&bo->resv->lock); + WARN_ON(!locked); - ttm_bo_unreserve(bo); - return 0; + if (likely(!ret)) + ret = ttm_bo_validate(bo, placement, interruptible, false); -out_err: ttm_bo_unreserve(bo); - ttm_bo_unref(&bo); + + if (unlikely(ret)) + ttm_bo_unref(&bo); return ret; } @@ -1941,8 +1852,7 @@ out: * already swapped buffer. */ - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index af894584dd90..319cf4127c5b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -433,6 +433,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object *fbo; struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; + int ret; fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); if (!fbo) @@ -445,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); @@ -463,6 +463,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; fbo->acc_size = 0; + fbo->resv = &fbo->ttm_resv; + reservation_object_init(fbo->resv); + ret = ww_mutex_trylock(&fbo->resv->lock); + WARN_ON(!ret); *new_obj = fbo; return 0; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index efcb734e5543..7392da557be2 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -48,8 +48,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list, entry->removed = false; } else { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ww_mutex_unlock(&bo->resv->lock); } } } @@ -134,8 +133,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, glob = entry->bo->glob; ww_acquire_init(ticket, &reservation_ww_class); - spin_lock(&glob->lru_lock); - retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; @@ -144,42 +141,34 @@ retry: if (entry->reserved) continue; - ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket); - switch (ret) { - case 0: - break; - case -EBUSY: - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - ret = ttm_bo_reserve_nolru(bo, true, false, - true, ticket); - spin_lock(&glob->lru_lock); - - if (!ret) - break; - if (unlikely(ret != -EAGAIN)) - goto err; + ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); - /* fallthrough */ - case -EAGAIN: + if (ret == -EDEADLK) { + /* uh oh, we lost out, drop every reservation and try + * to only reserve this buffer, then start over if + * this succeeds. + */ + spin_lock(&glob->lru_lock); ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); - if (unlikely(ret != 0)) + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + if (unlikely(ret != 0)) { + if (ret == -EINTR) + ret = -ERESTARTSYS; goto err_fini; + } - spin_lock(&glob->lru_lock); entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { ret = -EBUSY; goto err; } goto retry; - default: + } else if (ret) goto err; - } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { @@ -189,12 +178,14 @@ retry: } ww_acquire_done(ticket); + spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); return 0; err: + spin_lock(&glob->lru_lock); ttm_eu_backoff_reservation_locked(list, ticket); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0a992b016fe9..31ad860c10cd 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -39,6 +39,7 @@ #include #include #include +#include struct ttm_bo_device; @@ -153,7 +154,6 @@ struct ttm_tt; * Lru lists may keep one refcount, the delayed delete list, and kref != 0 * keeps one refcount. When this refcount reaches zero, * the object is destroyed. - * @event_queue: Queue for processes waiting on buffer object status change. * @mem: structure describing current placement. * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member @@ -164,12 +164,6 @@ struct ttm_tt; * @lru: List head for the lru list. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. - * @val_seq: Sequence of the validation holding the @reserved lock. - * Used to avoid starvation when many processes compete to validate the - * buffer. This member is protected by the bo_device::lru_lock. - * @seq_valid: The value of @val_seq is valid. This value is protected by - * the bo_device::lru_lock. - * @reserved: Deadlock-free lock used for synchronization state transitions. * @sync_obj: Pointer to a synchronization object. * @priv_flags: Flags describing buffer object internal state. * @vm_rb: Rb node for the vm rb tree. @@ -209,10 +203,9 @@ struct ttm_buffer_object { struct kref kref; struct kref list_kref; - wait_queue_head_t event_queue; /** - * Members protected by the bo::reserved lock. + * Members protected by the bo::resv::reserved lock. */ struct ttm_mem_reg mem; @@ -234,15 +227,6 @@ struct ttm_buffer_object { struct list_head ddestroy; struct list_head swap; struct list_head io_reserve_lru; - unsigned long val_seq; - bool seq_valid; - - /** - * Members protected by the bdev::lru_lock - * only when written to. - */ - - atomic_t reserved; /** * Members protected by struct buffer_object_device::fence_lock @@ -272,6 +256,9 @@ struct ttm_buffer_object { uint32_t cur_placement; struct sg_table *sg; + + struct reservation_object *resv; + struct reservation_object ttm_resv; }; /** @@ -736,7 +723,7 @@ extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); */ static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) { - return atomic_read(&bo->reserved); + return ww_mutex_is_locked(&bo->resv->lock); } #endif diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index ba71ef91f4d5..ec8a1d306510 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -33,7 +33,6 @@ #include #include -#include /** * struct ttm_validate_buffer -- cgit v1.2.3 From 3482032457f50cae196f6397ebec7f5f2ad3cf7d Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 27 Jun 2013 13:48:24 +0200 Subject: drm/ttm: inline ttm_bo_reserve and related calls Makes lockdep a lot more useful. Signed-off-by: Maarten Lankhorst Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 105 ++------------------ drivers/gpu/drm/ttm/ttm_execbuf_util.c | 9 +- include/drm/ttm/ttm_bo_driver.h | 175 ++++++++++++++++++++------------- 3 files changed, 117 insertions(+), 172 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5f9fe8044afc..a8a27f51e419 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -182,6 +182,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } } +EXPORT_SYMBOL(ttm_bo_add_to_lru); int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { @@ -204,35 +205,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; } -int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, - struct ww_acquire_ctx *ticket) -{ - int ret = 0; - - if (no_wait) { - bool success; - - /* not valid any more, fix your locking! */ - if (WARN_ON(ticket)) - return -EBUSY; - - success = ww_mutex_trylock(&bo->resv->lock); - return success ? 0 : -EBUSY; - } - - if (interruptible) - ret = ww_mutex_lock_interruptible(&bo->resv->lock, - ticket); - else - ret = ww_mutex_lock(&bo->resv->lock, ticket); - if (ret == -EINTR) - return -ERESTARTSYS; - return ret; -} -EXPORT_SYMBOL(ttm_bo_reserve); - static void ttm_bo_ref_bug(struct kref *list_kref) { BUG(); @@ -245,77 +217,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); } -int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, - struct ww_acquire_ctx *ticket) -{ - struct ttm_bo_global *glob = bo->glob; - int put_count = 0; - int ret; - - ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, - ticket); - if (likely(ret == 0)) { - spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); - } - - return ret; -} - -int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, struct ww_acquire_ctx *ticket) -{ - struct ttm_bo_global *glob = bo->glob; - int put_count = 0; - int ret = 0; - - if (interruptible) - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); - else - ww_mutex_lock_slow(&bo->resv->lock, ticket); - - if (likely(ret == 0)) { - spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); - } else if (ret == -EINTR) - ret = -ERESTARTSYS; - - return ret; -} -EXPORT_SYMBOL(ttm_bo_reserve_slowpath); - -void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) { - ttm_bo_add_to_lru(bo); - ww_mutex_unlock(&bo->resv->lock); -} - -void ttm_bo_unreserve(struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->glob; - - spin_lock(&glob->lru_lock); - ttm_bo_unreserve_ticket_locked(bo, NULL); - spin_unlock(&glob->lru_lock); -} -EXPORT_SYMBOL(ttm_bo_unreserve); - -void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) -{ - struct ttm_bo_global *glob = bo->glob; + int put_count; - spin_lock(&glob->lru_lock); - ttm_bo_unreserve_ticket_locked(bo, ticket); - spin_unlock(&glob->lru_lock); + spin_lock(&bo->glob->lru_lock); + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&bo->glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); } -EXPORT_SYMBOL(ttm_bo_unreserve_ticket); +EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); /* * Call bo->mutex locked. diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 7392da557be2..6c911789ae5c 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -44,12 +44,10 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list, entry->reserved = false; if (entry->removed) { - ttm_bo_unreserve_ticket_locked(bo, ticket); + ttm_bo_add_to_lru(bo); entry->removed = false; - - } else { - ww_mutex_unlock(&bo->resv->lock); } + ww_mutex_unlock(&bo->resv->lock); } } @@ -220,7 +218,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); - ttm_bo_unreserve_ticket_locked(bo, ticket); + ttm_bo_add_to_lru(bo); + ww_mutex_unlock(&bo->resv->lock); entry->reserved = false; } spin_unlock(&bdev->fence_lock); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ec18c5f3e6e9..984fc2d571a1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -772,6 +773,55 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_reserve_nolru: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @use_ticket: If @bo is already reserved, Only sleep waiting for + * it to become unreserved if @ticket->stamp is older. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; + + if (no_wait) { + bool success; + if (WARN_ON(ticket)) + return -EBUSY; + + success = ww_mutex_trylock(&bo->resv->lock); + return success ? 0 : -EBUSY; + } + + if (interruptible) + ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); + else + ret = ww_mutex_lock(&bo->resv->lock, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; +} /** * ttm_bo_reserve: @@ -780,7 +830,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * @interruptible: Sleep interruptible if waiting. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @use_ticket: If @bo is already reserved, Only sleep waiting for - * it to become unreserved if @sequence < (@bo)->sequence. + * it to become unreserved if @ticket->stamp is older. * * Locks a buffer object for validation. (Or prevents other processes from * locking it for validation) and removes it from lru lists, while taking @@ -794,7 +844,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * Processes attempting to reserve multiple buffers other than for eviction, * (typically execbuf), should first obtain a unique 32-bit * validation sequence number, - * and call this function with @use_sequence == 1 and @sequence == the unique + * and call this function with @use_ticket == 1 and @ticket->stamp == the unique * sequence number. If upon call of this function, the buffer object is already * reserved, the validation sequence is checked against the validation * sequence of the process currently reserving the buffer, @@ -809,37 +859,31 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * will eventually succeed, preventing both deadlocks and starvation. * * Returns: - * -EAGAIN: The reservation may cause a deadlock. + * -EDEADLK: The reservation may cause a deadlock. * Release all buffer reservations, wait for @bo to become unreserved and * try again. (only if use_sequence == 1). * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. * -EBUSY: The function needed to sleep, but @no_wait was true - * -EDEADLK: Bo already reserved using @sequence. This error code will only - * be returned if @use_sequence is set to true. + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. */ -extern int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, - struct ww_acquire_ctx *ticket); +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) +{ + int ret; -/** - * ttm_bo_reserve_slowpath_nolru: - * @bo: A pointer to a struct ttm_buffer_object. - * @interruptible: Sleep interruptible if waiting. - * @sequence: Set (@bo)->sequence to this value after lock - * - * This is called after ttm_bo_reserve returns -EAGAIN and we backed off - * from all our other reservations. Because there are no other reservations - * held by us, this function cannot deadlock any more. - * - * Will not remove reserved buffers from the lru lists. - * Otherwise identical to ttm_bo_reserve_slowpath. - */ -extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, - bool interruptible, - struct ww_acquire_ctx *ticket); + WARN_ON(!atomic_read(&bo->kref.refcount)); + + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, + ticket); + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); + return ret; +} /** * ttm_bo_reserve_slowpath: @@ -851,45 +895,27 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, * from all our other reservations. Because there are no other reservations * held by us, this function cannot deadlock any more. */ -extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, - struct ww_acquire_ctx *ticket); +static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; -/** - * ttm_bo_reserve_nolru: - * - * @bo: A pointer to a struct ttm_buffer_object. - * @interruptible: Sleep interruptible if waiting. - * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. - * @use_sequence: If @bo is already reserved, Only sleep waiting for - * it to become unreserved if @sequence < (@bo)->sequence. - * - * Will not remove reserved buffers from the lru lists. - * Otherwise identical to ttm_bo_reserve. - * - * Returns: - * -EAGAIN: The reservation may cause a deadlock. - * Release all buffer reservations, wait for @bo to become unreserved and - * try again. (only if use_sequence == 1). - * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by - * a signal. Release all buffer reservations and return to user-space. - * -EBUSY: The function needed to sleep, but @no_wait was true - * -EDEADLK: Bo already reserved using @sequence. This error code will only - * be returned if @use_sequence is set to true. - */ -extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, - struct ww_acquire_ctx *ticket); + WARN_ON(!atomic_read(&bo->kref.refcount)); -/** - * ttm_bo_unreserve - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Unreserve a previous reservation of @bo. - */ -extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); + if (interruptible) + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + else + ww_mutex_lock_slow(&bo->resv->lock, ticket); + + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); + else if (ret == -EINTR) + ret = -ERESTARTSYS; + + return ret; +} /** * ttm_bo_unreserve_ticket @@ -898,19 +924,28 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); * * Unreserve a previous reservation of @bo made with @ticket. */ -extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, - struct ww_acquire_ctx *ticket); +static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, + struct ww_acquire_ctx *t) +{ + if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + spin_lock(&bo->glob->lru_lock); + ttm_bo_add_to_lru(bo); + spin_unlock(&bo->glob->lru_lock); + } + ww_mutex_unlock(&bo->resv->lock); +} /** - * ttm_bo_unreserve_locked + * ttm_bo_unreserve + * * @bo: A pointer to a struct ttm_buffer_object. - * @ticket: ww_acquire_ctx used for reserving, or NULL * - * Unreserve a previous reservation of @bo made with @ticket. - * Needs to be called with struct ttm_bo_global::lru_lock held. + * Unreserve a previous reservation of @bo. */ -extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, - struct ww_acquire_ctx *ticket); +static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) +{ + ttm_bo_unreserve_ticket(bo, NULL); +} /* * ttm_bo_util.c -- cgit v1.2.3 From 8f262540e61c7caaff791bde8336196ac7aca77a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 27 Jun 2013 13:48:28 +0200 Subject: drm/ttm: get rid of ttm_bo_is_reserved Signed-off-by: Maarten Lankhorst Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_bo_api.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'include/drm') diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 31ad860c10cd..8a6aa56ece52 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -712,18 +712,4 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -/** - * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved - * - * @bo: The buffer object to check. - * - * This function returns an indication if a bo is reserved or not, and should - * only be used to print an error when it is not from incorrect api usage, since - * there's no guarantee that it is the caller that is holding the reservation. - */ -static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) -{ - return ww_mutex_is_locked(&bo->resv->lock); -} - #endif -- cgit v1.2.3 From d482e5fa299c2cfbb4700143dd766273730e2357 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 28 Jun 2013 20:31:34 +1000 Subject: Revert "drm: kms_helper: don't lose hotplug event" This reverts commit 160954b7bca43da7cd3cfbce310e6df919a8216e. This was rearming the workqueue with a 0 timeout, causing a WARN_ON, and possible loop. Daniel writes: "I've looked a bit into this and I think we need to have a separate work struct for recovering these lost hotplug events since the continuous self-rearming case is a real risk (e.g. if a connector flip-flops all the time). At least I don't see a sane way to block out re-arming with the current code in a simple way. So reverting the offender seems like the right thing and I'll go back to the drawing board for 3.12." Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 32 +------------------------------- include/drm/drm_crtc.h | 1 - 2 files changed, 1 insertion(+), 32 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f6829ba58e86..738a4294d820 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -122,7 +122,6 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, int count = 0; int mode_flags = 0; bool verbose_prune = true; - enum drm_connector_status old_status; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -138,32 +137,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, if (connector->funcs->force) connector->funcs->force(connector); } else { - old_status = connector->status; - connector->status = connector->funcs->detect(connector, true); - - /* - * Normally either the driver's hpd code or the poll loop should - * pick up any changes and fire the hotplug event. But if - * userspace sneaks in a probe, we might miss a change. Hence - * check here, and if anything changed start the hotplug code. - */ - if (old_status != connector->status) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - drm_get_connector_name(connector), - old_status, connector->status); - - /* - * The hotplug event code might call into the fb - * helpers, and so expects that we do not hold any - * locks. Fire up the poll struct instead, it will - * disable itself again. - */ - dev->mode_config.delayed_event = true; - schedule_delayed_work(&dev->mode_config.output_poll_work, - 0); - } } /* Re-enable polling in case the global poll config changed. */ @@ -1011,11 +985,7 @@ static void output_poll_execute(struct work_struct *work) struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; enum drm_connector_status old_status; - bool repoll = false, changed; - - /* Pick up any changes detected by the probe functions. */ - changed = dev->mode_config.delayed_event; - dev->mode_config.delayed_event = false; + bool repoll = false, changed = false; if (!drm_kms_helper_poll) return; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 663c3ab47752..fa12a2fa4293 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -811,7 +811,6 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; bool poll_running; - bool delayed_event; struct delayed_work output_poll_work; /* pointers to standard properties */ -- cgit v1.2.3 From 77ef8bbc87be7ad10b410247efc6d0f10676b401 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Mon, 1 Jul 2013 20:32:58 +0200 Subject: drm: make drm_mm_init() return void There is no reason to return "int" as this function never fails. Furthermore, several drivers (ast, sis) already depend on this. Signed-off-by: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 8 ++------ drivers/gpu/drm/drm_mm.c | 4 +--- drivers/gpu/drm/ttm/ttm_bo.c | 6 +----- drivers/gpu/drm/ttm/ttm_bo_manager.c | 8 +------- include/drm/drm_mm.h | 6 +++--- 5 files changed, 8 insertions(+), 24 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bcedaf7b73b9..603f256152ef 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -108,12 +108,8 @@ drm_gem_init(struct drm_device *dev) return -ENOMEM; } - if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE)) { - drm_ht_remove(&mm->offset_hash); - kfree(mm); - return -ENOMEM; - } + drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE); return 0; } diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 07cf99cc8862..7917729ee61d 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -669,7 +669,7 @@ int drm_mm_clean(struct drm_mm * mm) } EXPORT_SYMBOL(drm_mm_clean); -int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) +void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->hole_stack); INIT_LIST_HEAD(&mm->unused_nodes); @@ -690,8 +690,6 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); mm->color_adjust = NULL; - - return 0; } EXPORT_SYMBOL(drm_mm_init); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6e6975c8596f..cb9dd674670c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1455,9 +1455,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, goto out_no_sys; bdev->addr_space_rb = RB_ROOT; - ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); - if (unlikely(ret != 0)) - goto out_no_addr_mm; + drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); @@ -1471,8 +1469,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, mutex_unlock(&glob->device_list_mutex); return 0; -out_no_addr_mm: - ttm_bo_clean_mm(bdev, 0); out_no_sys: return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 9212494e9072..e4367f91472a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -103,18 +103,12 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { struct ttm_range_manager *rman; - int ret; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; - ret = drm_mm_init(&rman->mm, 0, p_size); - if (ret) { - kfree(rman); - return ret; - } - + drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); man->priv = rman; return 0; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 88591ef8fa24..de9242542f05 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -275,9 +275,9 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range_color( return drm_mm_search_free_in_range_generic(mm, size, alignment, color, start, end, best_match); } -extern int drm_mm_init(struct drm_mm *mm, - unsigned long start, - unsigned long size); +extern void drm_mm_init(struct drm_mm *mm, + unsigned long start, + unsigned long size); extern void drm_mm_takedown(struct drm_mm *mm); extern int drm_mm_clean(struct drm_mm *mm); extern int drm_mm_pre_get(struct drm_mm *mm); -- cgit v1.2.3 From 69163ea82732894e8c1e17df4010372ed078efdd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 1 Jul 2013 22:05:53 +0200 Subject: drm/mm: kill color_search_free/get_block drm/i915 is the only user of the color allocation handling and switched to insert_node a while ago. So we can ditch this. Signed-off-by: Daniel Vetter Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie --- include/drm/drm_mm.h | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index de9242542f05..4d06edb56d5f 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -177,17 +177,6 @@ static inline struct drm_mm_node *drm_mm_get_block_range( return drm_mm_get_block_range_generic(parent, size, alignment, 0, start, end, 0); } -static inline struct drm_mm_node *drm_mm_get_color_block_range( - struct drm_mm_node *parent, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end) -{ - return drm_mm_get_block_range_generic(parent, size, alignment, color, - start, end, 0); -} static inline struct drm_mm_node *drm_mm_get_block_atomic_range( struct drm_mm_node *parent, unsigned long size, @@ -255,26 +244,7 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range( return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, start, end, best_match); } -static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long color, - bool best_match) -{ - return drm_mm_search_free_generic(mm,size, alignment, color, best_match); -} -static inline struct drm_mm_node *drm_mm_search_free_in_range_color( - const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end, - bool best_match) -{ - return drm_mm_search_free_in_range_generic(mm, size, alignment, color, - start, end, best_match); -} + extern void drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); -- cgit v1.2.3 From 7c397cd97b8f46659698396b420bd48c3e6703e6 Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Fri, 28 Jun 2013 14:24:53 +0900 Subject: drm: add mmap function to prime helpers This adds to call low-level mmap() from prime helpers. Signed-off-by: Joonyoung Shim Acked-by: Laurent Pinchart Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_prime.c | 8 +++++++- include/drm/drmP.h | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index ff5fecef367b..85e450e3241c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -245,7 +245,13 @@ static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { - return -EINVAL; + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + + if (!dev->driver->gem_prime_mmap) + return -ENOSYS; + + return dev->driver->gem_prime_mmap(obj, vma); } static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 82670ac0d774..12083dc862a9 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -937,6 +937,8 @@ struct drm_driver { struct sg_table *sgt); void *(*gem_prime_vmap)(struct drm_gem_object *obj); void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); + int (*gem_prime_mmap)(struct drm_gem_object *obj, + struct vm_area_struct *vma); /* vga arb irq handler */ void (*vgaarb_irq)(struct drm_device *dev, bool state); -- cgit v1.2.3 From 78467dc5f70fb9bee4a32c0c3714c99b0b5465c7 Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Fri, 28 Jun 2013 14:24:54 +0900 Subject: drm/cma: add low-level hook functions to use prime helpers Instead of using the dma_buf functionality for GEM CMA, we can use prime helpers if we can provide low-level hook functions for GEM CMA. Signed-off-by: Joonyoung Shim Acked-by: Laurent Pinchart Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem_cma_helper.c | 79 ++++++++++++++++++++++++++++++++++++ include/drm/drm_gem_cma_helper.h | 9 ++++ 2 files changed, 88 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index ce063970d68c..83a45e5cf04e 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -602,3 +602,82 @@ error_gem_free: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); + +/* low-level interface prime helpers */ +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + struct sg_table *sgt; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr, + cma_obj->paddr, obj->size); + if (ret < 0) + goto out; + + return sgt; + +out: + kfree(sgt); + return NULL; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); + +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, + struct sg_table *sgt) +{ + struct drm_gem_cma_object *cma_obj; + + if (sgt->nents != 1) + return ERR_PTR(-EINVAL); + + /* Create a CMA GEM buffer. */ + cma_obj = __drm_gem_cma_create(dev, size); + if (IS_ERR(cma_obj)) + return ERR_PTR(PTR_ERR(cma_obj)); + + cma_obj->paddr = sg_dma_address(sgt->sgl); + cma_obj->sgt = sgt; + + DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size); + + return &cma_obj->base; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); + +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj; + struct drm_device *dev = obj->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&dev->struct_mutex); + if (ret < 0) + return ret; + + cma_obj = to_drm_gem_cma_obj(obj); + return drm_gem_cma_mmap_obj(cma_obj, vma); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); + +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return cma_obj->vaddr; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); + +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + /* Nothing to do */ +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 6e17251e9b28..9d39d2a51906 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -54,4 +54,13 @@ struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev, struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev, struct dma_buf *dma_buf); +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, + struct sg_table *sgt); +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + #endif /* __DRM_GEM_CMA_HELPER_H__ */ -- cgit v1.2.3 From 6d35dea107834eb549c1fba28fea6ec39c81d0ba Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Fri, 28 Jun 2013 14:24:55 +0900 Subject: drm/cma: remove GEM CMA specific dma_buf functionality We can use prime helpers instead. Signed-off-by: Joonyoung Shim Acked-by: Laurent Pinchart Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem_cma_helper.c | 286 ----------------------------------- include/drm/drm_gem_cma_helper.h | 6 - 2 files changed, 292 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 83a45e5cf04e..ece72a8ac245 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -317,292 +317,6 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m EXPORT_SYMBOL_GPL(drm_gem_cma_describe); #endif -/* ----------------------------------------------------------------------------- - * DMA-BUF - */ - -struct drm_gem_cma_dmabuf_attachment { - struct sg_table sgt; - enum dma_data_direction dir; -}; - -static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev, - struct dma_buf_attachment *attach) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach; - - cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL); - if (!cma_attach) - return -ENOMEM; - - cma_attach->dir = DMA_NONE; - attach->priv = cma_attach; - - return 0; -} - -static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; - struct sg_table *sgt; - - if (cma_attach == NULL) - return; - - sgt = &cma_attach->sgt; - - if (cma_attach->dir != DMA_NONE) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, - cma_attach->dir); - - sg_free_table(sgt); - kfree(cma_attach); - attach->priv = NULL; -} - -static struct sg_table * -drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; - struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; - struct drm_device *drm = cma_obj->base.dev; - struct scatterlist *rd, *wr; - struct sg_table *sgt; - unsigned int i; - int nents, ret; - - DRM_DEBUG_PRIME("\n"); - - if (WARN_ON(dir == DMA_NONE)) - return ERR_PTR(-EINVAL); - - /* Return the cached mapping when possible. */ - if (cma_attach->dir == dir) - return &cma_attach->sgt; - - /* Two mappings with different directions for the same attachment are - * not allowed. - */ - if (WARN_ON(cma_attach->dir != DMA_NONE)) - return ERR_PTR(-EBUSY); - - sgt = &cma_attach->sgt; - - ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL); - if (ret) { - DRM_ERROR("failed to alloc sgt.\n"); - return ERR_PTR(-ENOMEM); - } - - mutex_lock(&drm->struct_mutex); - - rd = cma_obj->sgt->sgl; - wr = sgt->sgl; - for (i = 0; i < sgt->orig_nents; ++i) { - sg_set_page(wr, sg_page(rd), rd->length, rd->offset); - rd = sg_next(rd); - wr = sg_next(wr); - } - - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sg_free_table(sgt); - sgt = ERR_PTR(-EIO); - goto done; - } - - cma_attach->dir = dir; - attach->priv = cma_attach; - - DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size); - -done: - mutex_unlock(&drm->struct_mutex); - return sgt; -} - -static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - /* Nothing to do. */ -} - -static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - - DRM_DEBUG_PRIME("%s\n", __FILE__); - - /* - * drm_gem_cma_dmabuf_release() call means that file object's - * f_count is 0 and it calls drm_gem_object_handle_unreference() - * to drop the references that these values had been increased - * at drm_prime_handle_to_fd() - */ - if (cma_obj->base.export_dma_buf == dmabuf) { - cma_obj->base.export_dma_buf = NULL; - - /* - * drop this gem object refcount to release allocated buffer - * and resources. - */ - drm_gem_object_unreference_unlocked(&cma_obj->base); - } -} - -static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, - struct vm_area_struct *vma) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - struct drm_gem_object *gem_obj = &cma_obj->base; - struct drm_device *dev = gem_obj->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); - mutex_unlock(&dev->struct_mutex); - if (ret < 0) - return ret; - - return drm_gem_cma_mmap_obj(cma_obj, vma); -} - -static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - - return cma_obj->vaddr; -} - -static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { - .attach = drm_gem_cma_dmabuf_attach, - .detach = drm_gem_cma_dmabuf_detach, - .map_dma_buf = drm_gem_cma_dmabuf_map, - .unmap_dma_buf = drm_gem_cma_dmabuf_unmap, - .kmap = drm_gem_cma_dmabuf_kmap, - .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, - .kunmap = drm_gem_cma_dmabuf_kunmap, - .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic, - .mmap = drm_gem_cma_dmabuf_mmap, - .vmap = drm_gem_cma_dmabuf_vmap, - .release = drm_gem_cma_dmabuf_release, -}; - -struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm, - struct drm_gem_object *obj, int flags) -{ - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - - return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, - cma_obj->base.size, flags); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export); - -struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm, - struct dma_buf *dma_buf) -{ - struct drm_gem_cma_object *cma_obj; - struct dma_buf_attachment *attach; - struct sg_table *sgt; - int ret; - - DRM_DEBUG_PRIME("%s\n", __FILE__); - - /* is this one of own objects? */ - if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) { - struct drm_gem_object *obj; - - cma_obj = dma_buf->priv; - obj = &cma_obj->base; - - /* is it from our device? */ - if (obj->dev == drm) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - dma_buf_put(dma_buf); - return obj; - } - } - - /* Create a CMA GEM buffer. */ - cma_obj = __drm_gem_cma_create(drm, dma_buf->size); - if (IS_ERR(cma_obj)) - return ERR_PTR(PTR_ERR(cma_obj)); - - /* Attach to the buffer and map it. Make sure the mapping is contiguous - * on the device memory bus, as that's all we support. - */ - attach = dma_buf_attach(dma_buf, drm->dev); - if (IS_ERR(attach)) { - ret = -EINVAL; - goto error_gem_free; - } - - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(sgt)) { - ret = sgt ? PTR_ERR(sgt) : -ENOMEM; - goto error_buf_detach; - } - - if (sgt->nents != 1) { - ret = -EINVAL; - goto error_buf_unmap; - } - - cma_obj->base.import_attach = attach; - cma_obj->paddr = sg_dma_address(sgt->sgl); - cma_obj->sgt = sgt; - - DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, - dma_buf->size); - - return &cma_obj->base; - -error_buf_unmap: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); -error_buf_detach: - dma_buf_detach(dma_buf, attach); -error_gem_free: - drm_gem_cma_free_object(&cma_obj->base); - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); - /* low-level interface prime helpers */ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) { diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 9d39d2a51906..c34f27f80bcc 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -48,12 +48,6 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif -struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, - int flags); -struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf); - struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, -- cgit v1.2.3