From 5b560c3a99a0d1a65132ce6f2f5a8505536613e4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 21 Oct 2016 22:22:43 +0300 Subject: drm/msm/mdp5: Use per-plane rotation property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The global mode_config.rotation_property is going away, switch over to per-plane rotation_property. v2: Drop the BIT() Cc: Rob Clark Cc: Jilai Wang Cc: Archit Taneja Signed-off-by: Ville Syrjälä Reviewed-by: Rob Clark Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477077768-4274-2-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..2653ad893ebc 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -75,15 +75,11 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) return; - if (!dev->mode_config.rotation_property) - dev->mode_config.rotation_property = - drm_mode_create_rotation_property(dev, - DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); - - if (dev->mode_config.rotation_property) - drm_object_attach_property(&plane->base, - dev->mode_config.rotation_property, - DRM_ROTATE_0); + drm_plane_create_rotation_property(plane, + DRM_ROTATE_0, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); } /* helper to install properties which are common to planes and crtcs */ -- cgit v1.2.3 From 574a37b1bb07499778e6f46b56b6dda18151ad04 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 21 Oct 2016 22:22:44 +0300 Subject: drm/msm/mdp5: Advertize 180 degree rotation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the hardware can apparently do both X and Y reflection, we can advertize also 180 degree rotation as thats just X+Y reflection. v2: Drop the BIT() Cc: Rob Clark Cc: Jilai Wang Cc: Archit Taneja Signed-off-by: Ville Syrjälä Reviewed-by: Rob Clark Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477077768-4274-3-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 2653ad893ebc..cf50d3ec8d1b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -78,6 +78,7 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, drm_plane_create_rotation_property(plane, DRM_ROTATE_0, DRM_ROTATE_0 | + DRM_ROTATE_180 | DRM_REFLECT_X | DRM_REFLECT_Y); } @@ -285,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, plane_enabled(old_state), plane_enabled(state)); if (plane_enabled(state)) { + unsigned int rotation; + format = to_mdp_format(msm_framebuffer_format(state->fb)); if (MDP_FORMAT_IS_YUV(format) && !pipe_supports_yuv(mdp5_plane->caps)) { @@ -305,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - hflip = !!(state->rotation & DRM_REFLECT_X); - vflip = !!(state->rotation & DRM_REFLECT_Y); + rotation = drm_rotation_simplify(state->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); + hflip = !!(rotation & DRM_REFLECT_X); + vflip = !!(rotation & DRM_REFLECT_Y); + if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { dev_err(plane->dev->dev, @@ -677,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; + unsigned int rotation; bool vflip, hflip; unsigned long flags; int ret; @@ -739,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, config |= get_scale_config(format, src_h, crtc_h, false); DBG("scale config = %x", config); - hflip = !!(pstate->rotation & DRM_REFLECT_X); - vflip = !!(pstate->rotation & DRM_REFLECT_Y); + rotation = drm_rotation_simplify(pstate->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); + hflip = !!(rotation & DRM_REFLECT_X); + vflip = !!(rotation & DRM_REFLECT_Y); spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); -- cgit v1.2.3 From 6686df8cf1cf589c54343372e3524bf52cda038e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 21 Oct 2016 22:22:45 +0300 Subject: drm: RIP mode_config->rotation_property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that all drivers have been converted over to the per-plane rotation property, we can just nuke the global rotation property. v2: Rebase due to BIT(),__builtin_ffs() & co. Deal with superfluous code shuffling Signed-off-by: Ville Syrjälä Reviewed-by: Joonas Lahtinen Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477077768-4274-4-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 6 ++---- drivers/gpu/drm/drm_blend.c | 32 ++++---------------------------- drivers/gpu/drm/drm_fb_helper.c | 7 +------ include/drm/drm_blend.h | 2 -- include/drm/drm_crtc.h | 5 ----- 5 files changed, 7 insertions(+), 45 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f81706387889..1b5a32df9a9a 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; - } else if (property == config->rotation_property || - property == plane->rotation_property) { + } else if (property == plane->rotation_property) { if (!is_power_of_2(val & DRM_ROTATE_MASK)) return -EINVAL; state->rotation = val; @@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; - } else if (property == config->rotation_property || - property == plane->rotation_property) { + } else if (property == plane->rotation_property) { *val = state->rotation; } else if (property == plane->zpos_property) { *val = state->zpos; diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index e52aece30900..1f2412c7ccfd 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -89,7 +89,7 @@ * On top of this basic transformation additional properties can be exposed by * the driver: * - * - Rotation is set up with drm_mode_create_rotation_property(). It adds a + * - Rotation is set up with drm_plane_create_rotation_property(). It adds a * rotation and reflection step between the source and destination rectangles. * Without this property the rectangle is only scaled, but not rotated or * reflected. @@ -105,18 +105,12 @@ */ /** - * drm_mode_create_rotation_property - create a new rotation property - * @dev: DRM device + * drm_plane_create_rotation_property - create a new rotation property + * @plane: drm plane + * @rotation: initial value of the rotation property * @supported_rotations: bitmask of supported rotations and reflections * * This creates a new property with the selected support for transformations. - * The resulting property should be stored in @rotation_property in - * &drm_mode_config. It then must be attached to each plane which supports - * rotations using drm_object_attach_property(). - * - * FIXME: Probably better if the rotation property is created on each plane, - * like the zpos property. Otherwise it's not possible to allow different - * rotation modes on different planes. * * Since a rotation by 180° degress is the same as reflecting both along the x * and the y axis the rotation property is somewhat redundant. Drivers can use @@ -144,24 +138,6 @@ * rotation. After reflection, the rotation is applied to the image sampled from * the source rectangle, before scaling it to fit the destination rectangle. */ -struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, - unsigned int supported_rotations) -{ - static const struct drm_prop_enum_list props[] = { - { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, - { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, - { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, - { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, - { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, - { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, - }; - - return drm_property_create_bitmask(dev, 0, "rotation", - props, ARRAY_SIZE(props), - supported_rotations); -} -EXPORT_SYMBOL(drm_mode_create_rotation_property); - int drm_plane_create_rotation_property(struct drm_plane *plane, unsigned int rotation, unsigned int supported_rotations) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e0d428f9d1cb..83dbae0fabcf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) if (plane->type != DRM_PLANE_TYPE_PRIMARY) drm_plane_force_disable(plane); - if (plane->rotation_property) { + if (plane->rotation_property) drm_mode_plane_set_obj_prop(plane, plane->rotation_property, DRM_ROTATE_0); - } else if (dev->mode_config.rotation_property) { - drm_mode_plane_set_obj_prop(plane, - dev->mode_config.rotation_property, - DRM_ROTATE_0); - } } for (i = 0; i < fb_helper->crtc_count; i++) { diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index fd351924e1c5..13221cf9b3eb 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -52,8 +52,6 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation) return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); } -struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, - unsigned int supported_rotations); int drm_plane_create_rotation_property(struct drm_plane *plane, unsigned int rotation, unsigned int supported_rotations); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 284c1b3aec10..bc860cfc67ca 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1155,11 +1155,6 @@ struct drm_mode_config { * CURSOR, PRIMARY and OVERLAY legacy uses of planes. */ struct drm_property *plane_type_property; - /** - * @rotation_property: Optional property for planes or CRTCs to specifiy - * rotation. - */ - struct drm_property *rotation_property; /** * @prop_src_x: Default atomic plane property for the plane source * position in the connected &drm_framebuffer. -- cgit v1.2.3 From c20ea8fd498636b8f7408bf10a305d58a4a05927 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sat, 22 Oct 2016 17:14:42 +0800 Subject: drm/i2c/tda998x: mark symbol static where possible We get 1 warning when building kernel with W=1: drivers/gpu/drm/i2c/tda998x_drv.c:1292:5: warning: no previous prototype for 'tda998x_audio_digital_mute' [-Wmissing-prototypes] In fact, this function is only used in the file in which it is declared and don't need a declaration, but can be made static. So this patch marks this function with 'static'. Signed-off-by: Baoyou Xie Reviewed-by: Arnd Bergmann Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477127682-3615-1-git-send-email-baoyou.xie@linaro.org --- drivers/gpu/drm/i2c/tda998x_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9798d400d817..af8683e0dd54 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) mutex_unlock(&priv->audio_mutex); } -int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) +static int +tda998x_audio_digital_mute(struct device *dev, void *data, bool enable) { struct tda998x_priv *priv = dev_get_drvdata(dev); -- cgit v1.2.3 From a5725ab0497ad91a2df7c01a78bf1a0cc5be4526 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sat, 22 Oct 2016 17:17:44 +0800 Subject: drm/msm/adreno: move function declarations to header file We get 2 warnings when building kernel with W=1: drivers/gpu/drm/msm/adreno/a3xx_gpu.c:535:17: warning: no previous prototype for 'a3xx_gpu_init' [-Wmissing-prototypes] drivers/gpu/drm/msm/adreno/a4xx_gpu.c:624:17: warning: no previous prototype for 'a4xx_gpu_init' [-Wmissing-prototypes] In fact, both functions are declared in drivers/gpu/drm/msm/adreno/adreno_device.c, but should be declared in a header file. So this patch moves both function declarations to drivers/gpu/drm/msm/adreno/adreno_gpu.h. Signed-off-by: Baoyou Xie Reviewed-by: Arnd Bergmann Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477127865-9381-1-git-send-email-baoyou.xie@linaro.org --- drivers/gpu/drm/msm/adreno/adreno_device.c | 3 --- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 5127b75dbf40..7250ffc6322f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -25,9 +25,6 @@ bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); -struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); -struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); - static const struct adreno_info gpulist[] = { { .rev = ADRENO_REV(3, 0, 5, ANY_ID), diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index a54f6e036b4a..07d99bdf7c99 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, gpu_write(&gpu->base, reg - 1, data); } +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); + #endif /* __ADRENO_GPU_H__ */ -- cgit v1.2.3 From c170a14e20299f600f5d5949933cf3b300fee375 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sat, 22 Oct 2016 17:17:45 +0800 Subject: drm/msm: add missing header dependencies We get 2 warnings when building kernel with W=1: drivers/gpu/drm/msm/msm_debugfs.c:141:5: warning: no previous prototype for 'msm_debugfs_init' [-Wmissing-prototypes] drivers/gpu/drm/msm/msm_debugfs.c:158:6: warning: no previous prototype for 'msm_debugfs_cleanup' [-Wmissing-prototypes] In fact, these functions are declared in drivers/gpu/drm/msm/msm_debugfs.h. So this patch adds missing header dependencies. Signed-off-by: Baoyou Xie Reviewed-by: Arnd Bergmann Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477127865-9381-2-git-send-email-baoyou.xie@linaro.org --- drivers/gpu/drm/msm/msm_debugfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 663f2b6ef091..3c853733c99a 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -18,6 +18,7 @@ #ifdef CONFIG_DEBUG_FS #include "msm_drv.h" #include "msm_gpu.h" +#include "msm_debugfs.h" static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) { -- cgit v1.2.3 From 14544d0937bf0160018a47ba74fff7fd61c22731 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 24 Oct 2016 12:38:21 +0100 Subject: drm/edid: Only print the bad edid when aborting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, if drm.debug is enabled, we get a DRM_ERROR message on the intermediate edid reads. This causes transient failures in CI which flags up the sporadic EDID read failures, which are recovered by rereading the EDID automatically. This patch combines the reporting done by drm_do_get_edid() itself with the bad block printing from get_edid_block(), into a single warning associated with the connector once all attempts to retrieve the EDID fail. v2: Print the whole EDID, marking up the bad/zero blocks. This requires recording the whole of the raw edid, then a second pass to reduce it to the valid extensions. v3: Fix invalid/valid extension fumble. References: https://bugs.freedesktop.org/show_bug.cgi?id=98228 Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161024113821.26263-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/drm_edid.c | 79 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 23 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 95de47ba1e77..9506933b41cd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len) return ret == xfers ? 0 : -1; } +static void connector_bad_edid(struct drm_connector *connector, + u8 *edid, int num_blocks) +{ + int i; + + if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) + return; + + dev_warn(connector->dev->dev, + "%s: EDID is invalid:\n", + connector->name); + for (i = 0; i < num_blocks; i++) { + u8 *block = edid + i * EDID_LENGTH; + char prefix[20]; + + if (drm_edid_is_zero(block, EDID_LENGTH)) + sprintf(prefix, "\t[%02x] ZERO ", i); + else if (!drm_edid_block_valid(block, i, false, NULL)) + sprintf(prefix, "\t[%02x] BAD ", i); + else + sprintf(prefix, "\t[%02x] GOOD ", i); + + print_hex_dump(KERN_WARNING, + prefix, DUMP_PREFIX_NONE, 16, 1, + block, EDID_LENGTH, false); + } +} + /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; - bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, for (i = 0; i < 4; i++) { if (get_edid_block(data, edid, 0, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(edid, 0, print_bad_edid, + if (drm_edid_block_valid(edid, 0, false, &connector->edid_corrupt)) break; if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) { @@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, goto carp; /* if there's no extensions, we're done */ - if (edid[0x7e] == 0) + valid_extensions = edid[0x7e]; + if (valid_extensions == 0) return (struct edid *)edid; - new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); + new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; edid = new; for (j = 1; j <= edid[0x7e]; j++) { - u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH; + u8 *block = edid + j * EDID_LENGTH; for (i = 0; i < 4; i++) { if (get_edid_block(data, block, j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block, j, - print_bad_edid, NULL)) { - valid_extensions++; + if (drm_edid_block_valid(block, j, false, NULL)) break; - } } - if (i == 4 && print_bad_edid) { - dev_warn(connector->dev->dev, - "%s: Ignoring invalid EDID block %d.\n", - connector->name, j); - - connector->bad_edid_counter++; - } + if (i == 4) + valid_extensions--; } if (valid_extensions != edid[0x7e]) { + u8 *base; + + connector_bad_edid(connector, edid, edid[0x7e] + 1); + edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; edid[0x7e] = valid_extensions; - new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); + + new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; + + base = new; + for (i = 0; i <= edid[0x7e]; i++) { + u8 *block = edid + i * EDID_LENGTH; + + if (!drm_edid_block_valid(block, i, false, NULL)) + continue; + + memcpy(base, block, EDID_LENGTH); + base += EDID_LENGTH; + } + + kfree(edid); edid = new; } return (struct edid *)edid; carp: - if (print_bad_edid) { - dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", - connector->name, j); - } - connector->bad_edid_counter++; - + connector_bad_edid(connector, edid, 1); out: kfree(edid); return NULL; -- cgit v1.2.3 From 714cf7ea505502186de0e87e9f029a4c46f03631 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 24 Oct 2016 19:33:24 +0300 Subject: drm/dp: Factor out helper to distinguish between branch and sink devices This check is open-coded in a few places, so it makes sense to simplify things by having a helper for it similar to the rest of DPCD feature helpers. v2: (Jani) - Move the helper to drm_dp_helper.h. - Split out this change to a separate patch. Cc: Jani Nikula Cc: dri-devel@lists.freedesktop.org Signed-off-by: Imre Deak Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1477326811-30431-2-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 11 ++++------- include/drm/drm_dp_helper.h | 6 ++++++ 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index acd0c51f74d5..01bc67194dba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1452,8 +1452,7 @@ static void intel_dp_print_hw_revision(struct intel_dp *intel_dp) if ((drm_debug & DRM_UT_KMS) == 0) return; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return; len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1); @@ -1471,8 +1470,7 @@ static void intel_dp_print_sw_revision(struct intel_dp *intel_dp) if ((drm_debug & DRM_UT_KMS) == 0) return; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return; len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2); @@ -3607,8 +3605,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (!is_edp(intel_dp) && !intel_dp->sink_count) return false; - if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(intel_dp->dpcd)) return true; /* native DP sink */ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) @@ -4096,7 +4093,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) return connector_status_connected; /* if there's no downstream port, we're done */ - if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) + if (!drm_dp_is_branch(dpcd)) return connector_status_connected; /* If we're HPD-aware, SINK_COUNT changes dynamically */ diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2a79882cb68e..55bbeb0ff594 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; } +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + /* * DisplayPort AUX channel */ -- cgit v1.2.3 From f54d1867005c3323f5d8ad83eed823e84226c429 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Oct 2016 13:00:45 +0100 Subject: dma-buf: Rename struct fence to dma_fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson Reviewed-by: Gustavo Padovan Acked-by: Sumit Semwal Acked-by: Christian König Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk --- Documentation/sync_file.txt | 14 +- drivers/base/Kconfig | 6 +- drivers/dma-buf/Kconfig | 2 +- drivers/dma-buf/Makefile | 2 +- drivers/dma-buf/dma-buf.c | 28 +- drivers/dma-buf/dma-fence-array.c | 146 +++++++ drivers/dma-buf/dma-fence.c | 537 ++++++++++++++++++++++++ drivers/dma-buf/fence-array.c | 145 ------- drivers/dma-buf/fence.c | 534 ----------------------- drivers/dma-buf/reservation.c | 94 +++-- drivers/dma-buf/seqno-fence.c | 18 +- drivers/dma-buf/sw_sync.c | 48 +-- drivers/dma-buf/sync_debug.c | 13 +- drivers/dma-buf/sync_debug.h | 9 +- drivers/dma-buf/sync_file.c | 63 +-- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 54 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 58 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 24 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 48 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 26 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 26 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 79 ++-- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 12 +- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 6 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/si_dma.c | 6 +- drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 4 +- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 67 +-- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 26 +- drivers/gpu/drm/amd/scheduler/sched_fence.c | 48 ++- drivers/gpu/drm/drm_atomic.c | 2 +- drivers/gpu/drm/drm_atomic_helper.c | 8 +- drivers/gpu/drm/drm_fops.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 46 +- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 4 +- drivers/gpu/drm/i915/i915_gem_request.c | 32 +- drivers/gpu/drm/i915/i915_gem_request.h | 18 +- drivers/gpu/drm/i915/i915_sw_fence.c | 41 +- drivers/gpu/drm/i915/i915_sw_fence.h | 8 +- drivers/gpu/drm/i915/i915_trace.h | 2 +- drivers/gpu/drm/i915/intel_breadcrumbs.c | 4 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/msm/msm_drv.h | 2 +- drivers/gpu/drm/msm/msm_fence.c | 28 +- drivers/gpu/drm/msm/msm_fence.h | 2 +- drivers/gpu/drm/msm/msm_gem.c | 14 +- drivers/gpu/drm/msm/msm_gem.h | 2 +- drivers/gpu/drm/msm/msm_gem_submit.c | 8 +- drivers/gpu/drm/msm/msm_gpu.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 6 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 80 ++-- drivers/gpu/drm/nouveau/nouveau_fence.h | 6 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- drivers/gpu/drm/nouveau/nv04_fence.c | 2 +- drivers/gpu/drm/nouveau/nv10_fence.c | 2 +- drivers/gpu/drm/nouveau/nv17_fence.c | 2 +- drivers/gpu/drm/nouveau/nv50_fence.c | 2 +- drivers/gpu/drm/nouveau/nv84_fence.c | 2 +- drivers/gpu/drm/qxl/qxl_drv.h | 4 +- drivers/gpu/drm/qxl/qxl_release.c | 35 +- drivers/gpu/drm/radeon/radeon.h | 10 +- drivers/gpu/drm/radeon/radeon_device.c | 2 +- drivers/gpu/drm/radeon/radeon_display.c | 8 +- drivers/gpu/drm/radeon/radeon_fence.c | 56 +-- drivers/gpu/drm/radeon/radeon_sync.c | 6 +- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 24 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 22 +- drivers/gpu/drm/ttm/ttm_bo_vm.c | 8 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 3 +- drivers/gpu/drm/vgem/vgem_fence.c | 53 +-- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 +- drivers/gpu/drm/virtio/virtgpu_fence.c | 26 +- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 12 +- drivers/gpu/drm/virtio/virtgpu_kms.c | 2 +- drivers/gpu/drm/virtio/virtgpu_plane.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 44 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 8 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- include/drm/drmP.h | 4 +- include/drm/drm_crtc.h | 2 +- include/drm/drm_plane.h | 2 +- include/drm/ttm/ttm_bo_api.h | 2 +- include/drm/ttm/ttm_bo_driver.h | 6 +- include/drm/ttm/ttm_execbuf_util.h | 2 +- include/linux/dma-buf.h | 4 +- include/linux/dma-fence-array.h | 86 ++++ include/linux/dma-fence.h | 437 +++++++++++++++++++ include/linux/fence-array.h | 84 ---- include/linux/fence.h | 424 ------------------- include/linux/reservation.h | 28 +- include/linux/seqno-fence.h | 20 +- include/linux/sync_file.h | 14 +- include/trace/events/dma_fence.h | 128 ++++++ include/trace/events/fence.h | 128 ------ 114 files changed, 2206 insertions(+), 2168 deletions(-) create mode 100644 drivers/dma-buf/dma-fence-array.c create mode 100644 drivers/dma-buf/dma-fence.c delete mode 100644 drivers/dma-buf/fence-array.c delete mode 100644 drivers/dma-buf/fence.c create mode 100644 include/linux/dma-fence-array.h create mode 100644 include/linux/dma-fence.h delete mode 100644 include/linux/fence-array.h delete mode 100644 include/linux/fence.h create mode 100644 include/trace/events/dma_fence.h delete mode 100644 include/trace/events/fence.h (limited to 'drivers/gpu') diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt index b63a68531afd..269681a6faec 100644 --- a/Documentation/sync_file.txt +++ b/Documentation/sync_file.txt @@ -6,7 +6,7 @@ This document serves as a guide for device drivers writers on what the sync_file API is, and how drivers can support it. Sync file is the carrier of -the fences(struct fence) that are needed to synchronize between drivers or +the fences(struct dma_fence) that are needed to synchronize between drivers or across process boundaries. The sync_file API is meant to be used to send and receive fence information @@ -32,9 +32,9 @@ in-fences and out-fences Sync files can go either to or from userspace. When a sync_file is sent from the driver to userspace we call the fences it contains 'out-fences'. They are related to a buffer that the driver is processing or is going to process, so -the driver creates an out-fence to be able to notify, through fence_signal(), -when it has finished using (or processing) that buffer. Out-fences are fences -that the driver creates. +the driver creates an out-fence to be able to notify, through +dma_fence_signal(), when it has finished using (or processing) that buffer. +Out-fences are fences that the driver creates. On the other hand if the driver receives fence(s) through a sync_file from userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that @@ -47,7 +47,7 @@ Creating Sync Files When a driver needs to send an out-fence userspace it creates a sync_file. Interface: - struct sync_file *sync_file_create(struct fence *fence); + struct sync_file *sync_file_create(struct dma_fence *fence); The caller pass the out-fence and gets back the sync_file. That is just the first step, next it needs to install an fd on sync_file->file. So it gets an @@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences from it. Interface: - struct fence *sync_file_get_fence(int fd); + struct dma_fence *sync_file_get_fence(int fd); The returned reference is owned by the caller and must be disposed of -afterwards using fence_put(). In case of error, a NULL is returned instead. +afterwards using dma_fence_put(). In case of error, a NULL is returned instead. References: [1] struct sync_file in include/linux/sync_file.h diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index fdf44cac08e6..37bf25c6b4a6 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -248,11 +248,11 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. -config FENCE_TRACE - bool "Enable verbose FENCE_TRACE messages" +config DMA_FENCE_TRACE + bool "Enable verbose DMA_FENCE_TRACE messages" depends on DMA_SHARED_BUFFER help - Enable the FENCE_TRACE printks. This will add extra + Enable the DMA_FENCE_TRACE printks. This will add extra spam to the console log, but will make it easier to diagnose lockup related problems for dma-buffers shared across multiple devices. diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 2585821b24ab..ed3b785bae37 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -7,7 +7,7 @@ config SYNC_FILE select DMA_SHARED_BUFFER ---help--- The Sync File Framework adds explicit syncronization via - userspace. It enables send/receive 'struct fence' objects to/from + userspace. It enables send/receive 'struct dma_fence' objects to/from userspace via Sync File fds for synchronization between drivers via userspace components. It has been ported from Android. diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 210a10bfad2b..c33bf8863147 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,3 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cf04d249a6a4..e72e64484131 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } -static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) +static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; unsigned long flags; @@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) struct dma_buf *dmabuf; struct reservation_object *resv; struct reservation_object_list *fobj; - struct fence *fence_excl; + struct dma_fence *fence_excl; unsigned long events; unsigned shared_count, seq; @@ -187,20 +187,20 @@ retry: spin_unlock_irq(&dmabuf->poll.lock); if (events & pevents) { - if (!fence_get_rcu(fence_excl)) { + if (!dma_fence_get_rcu(fence_excl)) { /* force a recheck */ events &= ~pevents; dma_buf_poll_cb(NULL, &dcb->cb); - } else if (!fence_add_callback(fence_excl, &dcb->cb, - dma_buf_poll_cb)) { + } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, + dma_buf_poll_cb)) { events &= ~pevents; - fence_put(fence_excl); + dma_fence_put(fence_excl); } else { /* * No callback queued, wake up any additional * waiters. */ - fence_put(fence_excl); + dma_fence_put(fence_excl); dma_buf_poll_cb(NULL, &dcb->cb); } } @@ -222,9 +222,9 @@ retry: goto out; for (i = 0; i < shared_count; ++i) { - struct fence *fence = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - if (!fence_get_rcu(fence)) { + if (!dma_fence_get_rcu(fence)) { /* * fence refcount dropped to zero, this means * that fobj has been freed @@ -235,13 +235,13 @@ retry: dma_buf_poll_cb(NULL, &dcb->cb); break; } - if (!fence_add_callback(fence, &dcb->cb, - dma_buf_poll_cb)) { - fence_put(fence); + if (!dma_fence_add_callback(fence, &dcb->cb, + dma_buf_poll_cb)) { + dma_fence_put(fence); events &= ~POLLOUT; break; } - fence_put(fence); + dma_fence_put(fence); } /* No callback queued, wake up any additional waiters. */ diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c new file mode 100644 index 000000000000..67eb7c8fb88c --- /dev/null +++ b/drivers/dma-buf/dma-fence-array.c @@ -0,0 +1,146 @@ +/* + * dma-fence-array: aggregate fences to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan + * Christian König + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include + +static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) +{ + return "dma_fence_array"; +} + +static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static void dma_fence_array_cb_func(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct dma_fence_array_cb *array_cb = + container_of(cb, struct dma_fence_array_cb, cb); + struct dma_fence_array *array = array_cb->array; + + if (atomic_dec_and_test(&array->num_pending)) + dma_fence_signal(&array->base); + dma_fence_put(&array->base); +} + +static bool dma_fence_array_enable_signaling(struct dma_fence *fence) +{ + struct dma_fence_array *array = to_dma_fence_array(fence); + struct dma_fence_array_cb *cb = (void *)(&array[1]); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) { + cb[i].array = array; + /* + * As we may report that the fence is signaled before all + * callbacks are complete, we need to take an additional + * reference count on the array so that we do not free it too + * early. The core fence handling will only hold the reference + * until we signal the array as complete (but that is now + * insufficient). + */ + dma_fence_get(&array->base); + if (dma_fence_add_callback(array->fences[i], &cb[i].cb, + dma_fence_array_cb_func)) { + dma_fence_put(&array->base); + if (atomic_dec_and_test(&array->num_pending)) + return false; + } + } + + return true; +} + +static bool dma_fence_array_signaled(struct dma_fence *fence) +{ + struct dma_fence_array *array = to_dma_fence_array(fence); + + return atomic_read(&array->num_pending) <= 0; +} + +static void dma_fence_array_release(struct dma_fence *fence) +{ + struct dma_fence_array *array = to_dma_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + dma_fence_put(array->fences[i]); + + kfree(array->fences); + dma_fence_free(fence); +} + +const struct dma_fence_ops dma_fence_array_ops = { + .get_driver_name = dma_fence_array_get_driver_name, + .get_timeline_name = dma_fence_array_get_timeline_name, + .enable_signaling = dma_fence_array_enable_signaling, + .signaled = dma_fence_array_signaled, + .wait = dma_fence_default_wait, + .release = dma_fence_array_release, +}; +EXPORT_SYMBOL(dma_fence_array_ops); + +/** + * dma_fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any: [in] signal on any fence in the array + * + * Allocate a dma_fence_array object and initialize the base fence with + * dma_fence_init(). + * In case of error it returns NULL. + * + * The caller should allocate the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is taken and dma_fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct dma_fence_array *array; + size_t size = sizeof(*array); + + /* Allocate the callback structures behind the array. */ + size += num_fences * sizeof(struct dma_fence_array_cb); + array = kzalloc(size, GFP_KERNEL); + if (!array) + return NULL; + + spin_lock_init(&array->lock); + dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, + context, seqno); + + array->num_fences = num_fences; + atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); + array->fences = fences; + + return array; +} +EXPORT_SYMBOL(dma_fence_array_create); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c new file mode 100644 index 000000000000..3a7bf009c21c --- /dev/null +++ b/drivers/dma-buf/dma-fence.c @@ -0,0 +1,537 @@ +/* + * Fence mechanism for dma-buf and to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); + +/* + * fence context counter: each execution context should have its own + * fence context, this allows checking if fences belong to the same + * context or not. One device can have multiple separate contexts, + * and they're used if some engine can run independently of another. + */ +static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); + +/** + * dma_fence_context_alloc - allocate an array of fence contexts + * @num: [in] amount of contexts to allocate + * + * This function will return the first index of the number of fences allocated. + * The fence context is used for setting fence->context to a unique number. + */ +u64 dma_fence_context_alloc(unsigned num) +{ + BUG_ON(!num); + return atomic64_add_return(num, &dma_fence_context_counter) - num; +} +EXPORT_SYMBOL(dma_fence_context_alloc); + +/** + * dma_fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + * + * Unlike dma_fence_signal, this function must be called with fence->lock held. + */ +int dma_fence_signal_locked(struct dma_fence *fence) +{ + struct dma_fence_cb *cur, *tmp; + int ret = 0; + + lockdep_assert_held(fence->lock); + + if (WARN_ON(!fence)) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + ret = -EINVAL; + + /* + * we might have raced with the unlocked dma_fence_signal, + * still run through all callbacks + */ + } else + trace_dma_fence_signaled(fence); + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + return ret; +} +EXPORT_SYMBOL(dma_fence_signal_locked); + +/** + * dma_fence_signal - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + */ +int dma_fence_signal(struct dma_fence *fence) +{ + unsigned long flags; + + if (!fence) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return -EINVAL; + + trace_dma_fence_signaled(fence); + + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { + struct dma_fence_cb *cur, *tmp; + + spin_lock_irqsave(fence->lock, flags); + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + spin_unlock_irqrestore(fence->lock, flags); + } + return 0; +} +EXPORT_SYMBOL(dma_fence_signal); + +/** + * dma_fence_wait_timeout - sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly (buf-mgr between reservation and committing) + * holds a reference to the fence, otherwise the fence might be + * freed before return, resulting in undefined behavior. + */ +signed long +dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) +{ + signed long ret; + + if (WARN_ON(timeout < 0)) + return -EINVAL; + + if (timeout == 0) + return dma_fence_is_signaled(fence); + + trace_dma_fence_wait_start(fence); + ret = fence->ops->wait(fence, intr, timeout); + trace_dma_fence_wait_end(fence); + return ret; +} +EXPORT_SYMBOL(dma_fence_wait_timeout); + +void dma_fence_release(struct kref *kref) +{ + struct dma_fence *fence = + container_of(kref, struct dma_fence, refcount); + + trace_dma_fence_destroy(fence); + + BUG_ON(!list_empty(&fence->cb_list)); + + if (fence->ops->release) + fence->ops->release(fence); + else + dma_fence_free(fence); +} +EXPORT_SYMBOL(dma_fence_release); + +void dma_fence_free(struct dma_fence *fence) +{ + kfree_rcu(fence, rcu); +} +EXPORT_SYMBOL(dma_fence_free); + +/** + * dma_fence_enable_sw_signaling - enable signaling on fence + * @fence: [in] the fence to enable + * + * this will request for sw signaling to be enabled, to make the fence + * complete as soon as possible + */ +void dma_fence_enable_sw_signaling(struct dma_fence *fence) +{ + unsigned long flags; + + if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + trace_dma_fence_enable_signal(fence); + + spin_lock_irqsave(fence->lock, flags); + + if (!fence->ops->enable_signaling(fence)) + dma_fence_signal_locked(fence); + + spin_unlock_irqrestore(fence->lock, flags); + } +} +EXPORT_SYMBOL(dma_fence_enable_sw_signaling); + +/** + * dma_fence_add_callback - add a callback to be called when the fence + * is signaled + * @fence: [in] the fence to wait on + * @cb: [in] the callback to register + * @func: [in] the function to call + * + * cb will be initialized by dma_fence_add_callback, no initialization + * by the caller is required. Any number of callbacks can be registered + * to a fence, but a callback can only be registered to one fence at a time. + * + * Note that the callback can be called from an atomic context. If + * fence is already signaled, this function will return -ENOENT (and + * *not* call the callback) + * + * Add a software callback to the fence. Same restrictions apply to + * refcount as it does to dma_fence_wait, however the caller doesn't need to + * keep a refcount to fence afterwards: when software access is enabled, + * the creator of the fence is required to keep the fence alive until + * after it signals with dma_fence_signal. The callback itself can be called + * from irq context. + * + */ +int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, + dma_fence_func_t func) +{ + unsigned long flags; + int ret = 0; + bool was_set; + + if (WARN_ON(!fence || !func)) + return -EINVAL; + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + INIT_LIST_HEAD(&cb->node); + return -ENOENT; + } + + spin_lock_irqsave(fence->lock, flags); + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + ret = -ENOENT; + else if (!was_set) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + ret = -ENOENT; + } + } + + if (!ret) { + cb->func = func; + list_add_tail(&cb->node, &fence->cb_list); + } else + INIT_LIST_HEAD(&cb->node); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_add_callback); + +/** + * dma_fence_remove_callback - remove a callback from the signaling list + * @fence: [in] the fence to wait on + * @cb: [in] the callback to remove + * + * Remove a previously queued callback from the fence. This function returns + * true if the callback is successfully removed, or false if the fence has + * already been signaled. + * + * *WARNING*: + * Cancelling a callback should only be done if you really know what you're + * doing, since deadlocks and race conditions could occur all too easily. For + * this reason, it should only ever be done on hardware lockup recovery, + * with a reference held to the fence. + */ +bool +dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(fence->lock, flags); + + ret = !list_empty(&cb->node); + if (ret) + list_del_init(&cb->node); + + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_remove_callback); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +static void +dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct default_wait_cb *wait = + container_of(cb, struct default_wait_cb, base); + + wake_up_state(wait->task, TASK_NORMAL); +} + +/** + * dma_fence_default_wait - default sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. + */ +signed long +dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) +{ + struct default_wait_cb cb; + unsigned long flags; + signed long ret = timeout; + bool was_set; + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return timeout; + + spin_lock_irqsave(fence->lock, flags); + + if (intr && signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + goto out; + + if (!was_set) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + goto out; + } + } + + cb.base.func = dma_fence_default_wait_cb; + cb.task = current; + list_add(&cb.base.node, &fence->cb_list); + + while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { + if (intr) + __set_current_state(TASK_INTERRUPTIBLE); + else + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(fence->lock, flags); + + ret = schedule_timeout(ret); + + spin_lock_irqsave(fence->lock, flags); + if (ret > 0 && intr && signal_pending(current)) + ret = -ERESTARTSYS; + } + + if (!list_empty(&cb.base.node)) + list_del(&cb.base.node); + __set_current_state(TASK_RUNNING); + +out: + spin_unlock_irqrestore(fence->lock, flags); + return ret; +} +EXPORT_SYMBOL(dma_fence_default_wait); + +static bool +dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count) +{ + int i; + + for (i = 0; i < count; ++i) { + struct dma_fence *fence = fences[i]; + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + } + return false; +} + +/** + * dma_fence_wait_any_timeout - sleep until any fence gets signaled + * or until timeout elapses + * @fences: [in] array of fences to wait on + * @count: [in] number of fences to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if + * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies + * on success. + * + * Synchronous waits for the first fence in the array to be signaled. The + * caller needs to hold a reference to all fences in the array, otherwise a + * fence might be freed before return, resulting in undefined behavior. + */ +signed long +dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, + bool intr, signed long timeout) +{ + struct default_wait_cb *cb; + signed long ret = timeout; + unsigned i; + + if (WARN_ON(!fences || !count || timeout < 0)) + return -EINVAL; + + if (timeout == 0) { + for (i = 0; i < count; ++i) + if (dma_fence_is_signaled(fences[i])) + return 1; + + return 0; + } + + cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); + if (cb == NULL) { + ret = -ENOMEM; + goto err_free_cb; + } + + for (i = 0; i < count; ++i) { + struct dma_fence *fence = fences[i]; + + if (fence->ops->wait != dma_fence_default_wait) { + ret = -EINVAL; + goto fence_rm_cb; + } + + cb[i].task = current; + if (dma_fence_add_callback(fence, &cb[i].base, + dma_fence_default_wait_cb)) { + /* This fence is already signaled */ + goto fence_rm_cb; + } + } + + while (ret > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + if (dma_fence_test_signaled_any(fences, count)) + break; + + ret = schedule_timeout(ret); + + if (ret > 0 && intr && signal_pending(current)) + ret = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + +fence_rm_cb: + while (i-- > 0) + dma_fence_remove_callback(fences[i], &cb[i].base); + +err_free_cb: + kfree(cb); + + return ret; +} +EXPORT_SYMBOL(dma_fence_wait_any_timeout); + +/** + * dma_fence_init - Initialize a custom fence. + * @fence: [in] the fence to initialize + * @ops: [in] the dma_fence_ops for operations on this fence + * @lock: [in] the irqsafe spinlock to use for locking this fence + * @context: [in] the execution context this fence is run on + * @seqno: [in] a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if dma_fence_ops.enable_signaling gets called. This can + * be used for other implementing other types of fence. + * + * context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using dma_fence_later. + */ +void +dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->wait || !ops->enable_signaling || + !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = 0UL; + + trace_dma_fence_init(fence); +} +EXPORT_SYMBOL(dma_fence_init); diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c deleted file mode 100644 index f1989fcaf354..000000000000 --- a/drivers/dma-buf/fence-array.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - * fence-array: aggregate fences to be waited together - * - * Copyright (C) 2016 Collabora Ltd - * Copyright (C) 2016 Advanced Micro Devices, Inc. - * Authors: - * Gustavo Padovan - * Christian König - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include - -static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); - -static const char *fence_array_get_driver_name(struct fence *fence) -{ - return "fence_array"; -} - -static const char *fence_array_get_timeline_name(struct fence *fence) -{ - return "unbound"; -} - -static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) -{ - struct fence_array_cb *array_cb = - container_of(cb, struct fence_array_cb, cb); - struct fence_array *array = array_cb->array; - - if (atomic_dec_and_test(&array->num_pending)) - fence_signal(&array->base); - fence_put(&array->base); -} - -static bool fence_array_enable_signaling(struct fence *fence) -{ - struct fence_array *array = to_fence_array(fence); - struct fence_array_cb *cb = (void *)(&array[1]); - unsigned i; - - for (i = 0; i < array->num_fences; ++i) { - cb[i].array = array; - /* - * As we may report that the fence is signaled before all - * callbacks are complete, we need to take an additional - * reference count on the array so that we do not free it too - * early. The core fence handling will only hold the reference - * until we signal the array as complete (but that is now - * insufficient). - */ - fence_get(&array->base); - if (fence_add_callback(array->fences[i], &cb[i].cb, - fence_array_cb_func)) { - fence_put(&array->base); - if (atomic_dec_and_test(&array->num_pending)) - return false; - } - } - - return true; -} - -static bool fence_array_signaled(struct fence *fence) -{ - struct fence_array *array = to_fence_array(fence); - - return atomic_read(&array->num_pending) <= 0; -} - -static void fence_array_release(struct fence *fence) -{ - struct fence_array *array = to_fence_array(fence); - unsigned i; - - for (i = 0; i < array->num_fences; ++i) - fence_put(array->fences[i]); - - kfree(array->fences); - fence_free(fence); -} - -const struct fence_ops fence_array_ops = { - .get_driver_name = fence_array_get_driver_name, - .get_timeline_name = fence_array_get_timeline_name, - .enable_signaling = fence_array_enable_signaling, - .signaled = fence_array_signaled, - .wait = fence_default_wait, - .release = fence_array_release, -}; -EXPORT_SYMBOL(fence_array_ops); - -/** - * fence_array_create - Create a custom fence array - * @num_fences: [in] number of fences to add in the array - * @fences: [in] array containing the fences - * @context: [in] fence context to use - * @seqno: [in] sequence number to use - * @signal_on_any: [in] signal on any fence in the array - * - * Allocate a fence_array object and initialize the base fence with fence_init(). - * In case of error it returns NULL. - * - * The caller should allocate the fences array with num_fences size - * and fill it with the fences it wants to add to the object. Ownership of this - * array is taken and fence_put() is used on each fence on release. - * - * If @signal_on_any is true the fence array signals if any fence in the array - * signals, otherwise it signals when all fences in the array signal. - */ -struct fence_array *fence_array_create(int num_fences, struct fence **fences, - u64 context, unsigned seqno, - bool signal_on_any) -{ - struct fence_array *array; - size_t size = sizeof(*array); - - /* Allocate the callback structures behind the array. */ - size += num_fences * sizeof(struct fence_array_cb); - array = kzalloc(size, GFP_KERNEL); - if (!array) - return NULL; - - spin_lock_init(&array->lock); - fence_init(&array->base, &fence_array_ops, &array->lock, - context, seqno); - - array->num_fences = num_fences; - atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); - array->fences = fences; - - return array; -} -EXPORT_SYMBOL(fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c deleted file mode 100644 index cc05dddc77a6..000000000000 --- a/drivers/dma-buf/fence.c +++ /dev/null @@ -1,534 +0,0 @@ -/* - * Fence mechanism for dma-buf and to allow for asynchronous dma access - * - * Copyright (C) 2012 Canonical Ltd - * Copyright (C) 2012 Texas Instruments - * - * Authors: - * Rob Clark - * Maarten Lankhorst - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include - -EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); -EXPORT_TRACEPOINT_SYMBOL(fence_emit); - -/* - * fence context counter: each execution context should have its own - * fence context, this allows checking if fences belong to the same - * context or not. One device can have multiple separate contexts, - * and they're used if some engine can run independently of another. - */ -static atomic64_t fence_context_counter = ATOMIC64_INIT(0); - -/** - * fence_context_alloc - allocate an array of fence contexts - * @num: [in] amount of contexts to allocate - * - * This function will return the first index of the number of fences allocated. - * The fence context is used for setting fence->context to a unique number. - */ -u64 fence_context_alloc(unsigned num) -{ - BUG_ON(!num); - return atomic64_add_return(num, &fence_context_counter) - num; -} -EXPORT_SYMBOL(fence_context_alloc); - -/** - * fence_signal_locked - signal completion of a fence - * @fence: the fence to signal - * - * Signal completion for software callbacks on a fence, this will unblock - * fence_wait() calls and run all the callbacks added with - * fence_add_callback(). Can be called multiple times, but since a fence - * can only go from unsignaled to signaled state, it will only be effective - * the first time. - * - * Unlike fence_signal, this function must be called with fence->lock held. - */ -int fence_signal_locked(struct fence *fence) -{ - struct fence_cb *cur, *tmp; - int ret = 0; - - lockdep_assert_held(fence->lock); - - if (WARN_ON(!fence)) - return -EINVAL; - - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - - if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - ret = -EINVAL; - - /* - * we might have raced with the unlocked fence_signal, - * still run through all callbacks - */ - } else - trace_fence_signaled(fence); - - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); - cur->func(fence, cur); - } - return ret; -} -EXPORT_SYMBOL(fence_signal_locked); - -/** - * fence_signal - signal completion of a fence - * @fence: the fence to signal - * - * Signal completion for software callbacks on a fence, this will unblock - * fence_wait() calls and run all the callbacks added with - * fence_add_callback(). Can be called multiple times, but since a fence - * can only go from unsignaled to signaled state, it will only be effective - * the first time. - */ -int fence_signal(struct fence *fence) -{ - unsigned long flags; - - if (!fence) - return -EINVAL; - - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - - if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return -EINVAL; - - trace_fence_signaled(fence); - - if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { - struct fence_cb *cur, *tmp; - - spin_lock_irqsave(fence->lock, flags); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); - cur->func(fence, cur); - } - spin_unlock_irqrestore(fence->lock, flags); - } - return 0; -} -EXPORT_SYMBOL(fence_signal); - -/** - * fence_wait_timeout - sleep until the fence gets signaled - * or until timeout elapses - * @fence: [in] the fence to wait on - * @intr: [in] if true, do an interruptible wait - * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT - * - * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the - * remaining timeout in jiffies on success. Other error values may be - * returned on custom implementations. - * - * Performs a synchronous wait on this fence. It is assumed the caller - * directly or indirectly (buf-mgr between reservation and committing) - * holds a reference to the fence, otherwise the fence might be - * freed before return, resulting in undefined behavior. - */ -signed long -fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) -{ - signed long ret; - - if (WARN_ON(timeout < 0)) - return -EINVAL; - - if (timeout == 0) - return fence_is_signaled(fence); - - trace_fence_wait_start(fence); - ret = fence->ops->wait(fence, intr, timeout); - trace_fence_wait_end(fence); - return ret; -} -EXPORT_SYMBOL(fence_wait_timeout); - -void fence_release(struct kref *kref) -{ - struct fence *fence = - container_of(kref, struct fence, refcount); - - trace_fence_destroy(fence); - - BUG_ON(!list_empty(&fence->cb_list)); - - if (fence->ops->release) - fence->ops->release(fence); - else - fence_free(fence); -} -EXPORT_SYMBOL(fence_release); - -void fence_free(struct fence *fence) -{ - kfree_rcu(fence, rcu); -} -EXPORT_SYMBOL(fence_free); - -/** - * fence_enable_sw_signaling - enable signaling on fence - * @fence: [in] the fence to enable - * - * this will request for sw signaling to be enabled, to make the fence - * complete as soon as possible - */ -void fence_enable_sw_signaling(struct fence *fence) -{ - unsigned long flags; - - if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && - !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - trace_fence_enable_signal(fence); - - spin_lock_irqsave(fence->lock, flags); - - if (!fence->ops->enable_signaling(fence)) - fence_signal_locked(fence); - - spin_unlock_irqrestore(fence->lock, flags); - } -} -EXPORT_SYMBOL(fence_enable_sw_signaling); - -/** - * fence_add_callback - add a callback to be called when the fence - * is signaled - * @fence: [in] the fence to wait on - * @cb: [in] the callback to register - * @func: [in] the function to call - * - * cb will be initialized by fence_add_callback, no initialization - * by the caller is required. Any number of callbacks can be registered - * to a fence, but a callback can only be registered to one fence at a time. - * - * Note that the callback can be called from an atomic context. If - * fence is already signaled, this function will return -ENOENT (and - * *not* call the callback) - * - * Add a software callback to the fence. Same restrictions apply to - * refcount as it does to fence_wait, however the caller doesn't need to - * keep a refcount to fence afterwards: when software access is enabled, - * the creator of the fence is required to keep the fence alive until - * after it signals with fence_signal. The callback itself can be called - * from irq context. - * - */ -int fence_add_callback(struct fence *fence, struct fence_cb *cb, - fence_func_t func) -{ - unsigned long flags; - int ret = 0; - bool was_set; - - if (WARN_ON(!fence || !func)) - return -EINVAL; - - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - INIT_LIST_HEAD(&cb->node); - return -ENOENT; - } - - spin_lock_irqsave(fence->lock, flags); - - was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); - - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - ret = -ENOENT; - else if (!was_set) { - trace_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - fence_signal_locked(fence); - ret = -ENOENT; - } - } - - if (!ret) { - cb->func = func; - list_add_tail(&cb->node, &fence->cb_list); - } else - INIT_LIST_HEAD(&cb->node); - spin_unlock_irqrestore(fence->lock, flags); - - return ret; -} -EXPORT_SYMBOL(fence_add_callback); - -/** - * fence_remove_callback - remove a callback from the signaling list - * @fence: [in] the fence to wait on - * @cb: [in] the callback to remove - * - * Remove a previously queued callback from the fence. This function returns - * true if the callback is successfully removed, or false if the fence has - * already been signaled. - * - * *WARNING*: - * Cancelling a callback should only be done if you really know what you're - * doing, since deadlocks and race conditions could occur all too easily. For - * this reason, it should only ever be done on hardware lockup recovery, - * with a reference held to the fence. - */ -bool -fence_remove_callback(struct fence *fence, struct fence_cb *cb) -{ - unsigned long flags; - bool ret; - - spin_lock_irqsave(fence->lock, flags); - - ret = !list_empty(&cb->node); - if (ret) - list_del_init(&cb->node); - - spin_unlock_irqrestore(fence->lock, flags); - - return ret; -} -EXPORT_SYMBOL(fence_remove_callback); - -struct default_wait_cb { - struct fence_cb base; - struct task_struct *task; -}; - -static void -fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) -{ - struct default_wait_cb *wait = - container_of(cb, struct default_wait_cb, base); - - wake_up_state(wait->task, TASK_NORMAL); -} - -/** - * fence_default_wait - default sleep until the fence gets signaled - * or until timeout elapses - * @fence: [in] the fence to wait on - * @intr: [in] if true, do an interruptible wait - * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT - * - * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the - * remaining timeout in jiffies on success. - */ -signed long -fence_default_wait(struct fence *fence, bool intr, signed long timeout) -{ - struct default_wait_cb cb; - unsigned long flags; - signed long ret = timeout; - bool was_set; - - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return timeout; - - spin_lock_irqsave(fence->lock, flags); - - if (intr && signal_pending(current)) { - ret = -ERESTARTSYS; - goto out; - } - - was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); - - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - goto out; - - if (!was_set) { - trace_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - fence_signal_locked(fence); - goto out; - } - } - - cb.base.func = fence_default_wait_cb; - cb.task = current; - list_add(&cb.base.node, &fence->cb_list); - - while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { - if (intr) - __set_current_state(TASK_INTERRUPTIBLE); - else - __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock_irqrestore(fence->lock, flags); - - ret = schedule_timeout(ret); - - spin_lock_irqsave(fence->lock, flags); - if (ret > 0 && intr && signal_pending(current)) - ret = -ERESTARTSYS; - } - - if (!list_empty(&cb.base.node)) - list_del(&cb.base.node); - __set_current_state(TASK_RUNNING); - -out: - spin_unlock_irqrestore(fence->lock, flags); - return ret; -} -EXPORT_SYMBOL(fence_default_wait); - -static bool -fence_test_signaled_any(struct fence **fences, uint32_t count) -{ - int i; - - for (i = 0; i < count; ++i) { - struct fence *fence = fences[i]; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - } - return false; -} - -/** - * fence_wait_any_timeout - sleep until any fence gets signaled - * or until timeout elapses - * @fences: [in] array of fences to wait on - * @count: [in] number of fences to wait on - * @intr: [in] if true, do an interruptible wait - * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT - * - * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if - * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies - * on success. - * - * Synchronous waits for the first fence in the array to be signaled. The - * caller needs to hold a reference to all fences in the array, otherwise a - * fence might be freed before return, resulting in undefined behavior. - */ -signed long -fence_wait_any_timeout(struct fence **fences, uint32_t count, - bool intr, signed long timeout) -{ - struct default_wait_cb *cb; - signed long ret = timeout; - unsigned i; - - if (WARN_ON(!fences || !count || timeout < 0)) - return -EINVAL; - - if (timeout == 0) { - for (i = 0; i < count; ++i) - if (fence_is_signaled(fences[i])) - return 1; - - return 0; - } - - cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); - if (cb == NULL) { - ret = -ENOMEM; - goto err_free_cb; - } - - for (i = 0; i < count; ++i) { - struct fence *fence = fences[i]; - - if (fence->ops->wait != fence_default_wait) { - ret = -EINVAL; - goto fence_rm_cb; - } - - cb[i].task = current; - if (fence_add_callback(fence, &cb[i].base, - fence_default_wait_cb)) { - /* This fence is already signaled */ - goto fence_rm_cb; - } - } - - while (ret > 0) { - if (intr) - set_current_state(TASK_INTERRUPTIBLE); - else - set_current_state(TASK_UNINTERRUPTIBLE); - - if (fence_test_signaled_any(fences, count)) - break; - - ret = schedule_timeout(ret); - - if (ret > 0 && intr && signal_pending(current)) - ret = -ERESTARTSYS; - } - - __set_current_state(TASK_RUNNING); - -fence_rm_cb: - while (i-- > 0) - fence_remove_callback(fences[i], &cb[i].base); - -err_free_cb: - kfree(cb); - - return ret; -} -EXPORT_SYMBOL(fence_wait_any_timeout); - -/** - * fence_init - Initialize a custom fence. - * @fence: [in] the fence to initialize - * @ops: [in] the fence_ops for operations on this fence - * @lock: [in] the irqsafe spinlock to use for locking this fence - * @context: [in] the execution context this fence is run on - * @seqno: [in] a linear increasing sequence number for this context - * - * Initializes an allocated fence, the caller doesn't have to keep its - * refcount after committing with this fence, but it will need to hold a - * refcount again if fence_ops.enable_signaling gets called. This can - * be used for other implementing other types of fence. - * - * context and seqno are used for easy comparison between fences, allowing - * to check which fence is later by simply using fence_later. - */ -void -fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno) -{ - BUG_ON(!lock); - BUG_ON(!ops || !ops->wait || !ops->enable_signaling || - !ops->get_driver_name || !ops->get_timeline_name); - - kref_init(&fence->refcount); - fence->ops = ops; - INIT_LIST_HEAD(&fence->cb_list); - fence->lock = lock; - fence->context = context; - fence->seqno = seqno; - fence->flags = 0UL; - - trace_fence_init(fence); -} -EXPORT_SYMBOL(fence_init); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 82de59f7cbbd..7ed56f3edfb7 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared); static void reservation_object_add_shared_inplace(struct reservation_object *obj, struct reservation_object_list *fobj, - struct fence *fence) + struct dma_fence *fence) { u32 i; - fence_get(fence); + dma_fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < fobj->shared_count; ++i) { - struct fence *old_fence; + struct dma_fence *old_fence; old_fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(obj)); @@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, write_seqcount_end(&obj->seq); preempt_enable(); - fence_put(old_fence); + dma_fence_put(old_fence); return; } } @@ -143,12 +143,12 @@ static void reservation_object_add_shared_replace(struct reservation_object *obj, struct reservation_object_list *old, struct reservation_object_list *fobj, - struct fence *fence) + struct dma_fence *fence) { unsigned i; - struct fence *old_fence = NULL; + struct dma_fence *old_fence = NULL; - fence_get(fence); + dma_fence_get(fence); if (!old) { RCU_INIT_POINTER(fobj->shared[0], fence); @@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj, fobj->shared_count = old->shared_count; for (i = 0; i < old->shared_count; ++i) { - struct fence *check; + struct dma_fence *check; check = rcu_dereference_protected(old->shared[i], reservation_object_held(obj)); @@ -196,7 +196,7 @@ done: kfree_rcu(old, rcu); if (old_fence) - fence_put(old_fence); + dma_fence_put(old_fence); } /** @@ -208,7 +208,7 @@ done: * reservation_object_reserve_shared() has been called. */ void reservation_object_add_shared_fence(struct reservation_object *obj, - struct fence *fence) + struct dma_fence *fence) { struct reservation_object_list *old, *fobj = obj->staged; @@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence); * Add a fence to the exclusive slot. The obj->lock must be held. */ void reservation_object_add_excl_fence(struct reservation_object *obj, - struct fence *fence) + struct dma_fence *fence) { - struct fence *old_fence = reservation_object_get_excl(obj); + struct dma_fence *old_fence = reservation_object_get_excl(obj); struct reservation_object_list *old; u32 i = 0; @@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, i = old->shared_count; if (fence) - fence_get(fence); + dma_fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); @@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, /* inplace update, no shared fences */ while (i--) - fence_put(rcu_dereference_protected(old->shared[i], + dma_fence_put(rcu_dereference_protected(old->shared[i], reservation_object_held(obj))); if (old_fence) - fence_put(old_fence); + dma_fence_put(old_fence); } EXPORT_SYMBOL(reservation_object_add_excl_fence); @@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * Zero or -errno */ int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct fence **pfence_excl, + struct dma_fence **pfence_excl, unsigned *pshared_count, - struct fence ***pshared) + struct dma_fence ***pshared) { - struct fence **shared = NULL; - struct fence *fence_excl; + struct dma_fence **shared = NULL; + struct dma_fence *fence_excl; unsigned int shared_count; int ret = 1; @@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, seq = read_seqcount_begin(&obj->seq); fence_excl = rcu_dereference(obj->fence_excl); - if (fence_excl && !fence_get_rcu(fence_excl)) + if (fence_excl && !dma_fence_get_rcu(fence_excl)) goto unlock; fobj = rcu_dereference(obj->fence); if (fobj) { - struct fence **nshared; + struct dma_fence **nshared; size_t sz = sizeof(*shared) * fobj->shared_max; nshared = krealloc(shared, sz, @@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, for (i = 0; i < shared_count; ++i) { shared[i] = rcu_dereference(fobj->shared[i]); - if (!fence_get_rcu(shared[i])) + if (!dma_fence_get_rcu(shared[i])) break; } } if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { while (i--) - fence_put(shared[i]); - fence_put(fence_excl); + dma_fence_put(shared[i]); + dma_fence_put(fence_excl); goto unlock; } @@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) { - struct fence *fence; + struct dma_fence *fence; unsigned seq, shared_count, i = 0; long ret = timeout; @@ -389,16 +389,17 @@ retry: shared_count = fobj->shared_count; for (i = 0; i < shared_count; ++i) { - struct fence *lfence = rcu_dereference(fobj->shared[i]); + struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &lfence->flags)) continue; - if (!fence_get_rcu(lfence)) + if (!dma_fence_get_rcu(lfence)) goto unlock_retry; - if (fence_is_signaled(lfence)) { - fence_put(lfence); + if (dma_fence_is_signaled(lfence)) { + dma_fence_put(lfence); continue; } @@ -408,15 +409,16 @@ retry: } if (!shared_count) { - struct fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl && - !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { - if (!fence_get_rcu(fence_excl)) + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence_excl->flags)) { + if (!dma_fence_get_rcu(fence_excl)) goto unlock_retry; - if (fence_is_signaled(fence_excl)) - fence_put(fence_excl); + if (dma_fence_is_signaled(fence_excl)) + dma_fence_put(fence_excl); else fence = fence_excl; } @@ -425,12 +427,12 @@ retry: rcu_read_unlock(); if (fence) { if (read_seqcount_retry(&obj->seq, seq)) { - fence_put(fence); + dma_fence_put(fence); goto retry; } - ret = fence_wait_timeout(fence, intr, ret); - fence_put(fence); + ret = dma_fence_wait_timeout(fence, intr, ret); + dma_fence_put(fence); if (ret > 0 && wait_all && (i + 1 < shared_count)) goto retry; } @@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); static inline int -reservation_object_test_signaled_single(struct fence *passed_fence) +reservation_object_test_signaled_single(struct dma_fence *passed_fence) { - struct fence *fence, *lfence = passed_fence; + struct dma_fence *fence, *lfence = passed_fence; int ret = 1; - if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { - fence = fence_get_rcu(lfence); + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { + fence = dma_fence_get_rcu(lfence); if (!fence) return -1; - ret = !!fence_is_signaled(fence); - fence_put(fence); + ret = !!dma_fence_is_signaled(fence); + dma_fence_put(fence); } return ret; } @@ -492,7 +494,7 @@ retry: shared_count = fobj->shared_count; for (i = 0; i < shared_count; ++i) { - struct fence *fence = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence = rcu_dereference(fobj->shared[i]); ret = reservation_object_test_signaled_single(fence); if (ret < 0) @@ -506,7 +508,7 @@ retry: } if (!shared_count) { - struct fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl) { ret = reservation_object_test_signaled_single( diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c index 71127f8f1626..f47112a64763 100644 --- a/drivers/dma-buf/seqno-fence.c +++ b/drivers/dma-buf/seqno-fence.c @@ -21,35 +21,35 @@ #include #include -static const char *seqno_fence_get_driver_name(struct fence *fence) +static const char *seqno_fence_get_driver_name(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->get_driver_name(fence); } -static const char *seqno_fence_get_timeline_name(struct fence *fence) +static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->get_timeline_name(fence); } -static bool seqno_enable_signaling(struct fence *fence) +static bool seqno_enable_signaling(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->enable_signaling(fence); } -static bool seqno_signaled(struct fence *fence) +static bool seqno_signaled(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); } -static void seqno_release(struct fence *fence) +static void seqno_release(struct dma_fence *fence) { struct seqno_fence *f = to_seqno_fence(fence); @@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence) if (f->ops->release) f->ops->release(fence); else - fence_free(&f->base); + dma_fence_free(&f->base); } -static signed long seqno_wait(struct fence *fence, bool intr, - signed long timeout) +static signed long seqno_wait(struct dma_fence *fence, bool intr, + signed long timeout) { struct seqno_fence *f = to_seqno_fence(fence); return f->ops->wait(fence, intr, timeout); } -const struct fence_ops seqno_fence_ops = { +const struct dma_fence_ops seqno_fence_ops = { .get_driver_name = seqno_fence_get_driver_name, .get_timeline_name = seqno_fence_get_timeline_name, .enable_signaling = seqno_enable_signaling, diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 62e8e6dc7953..82e0ca4dd0c1 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -68,9 +68,9 @@ struct sw_sync_create_fence_data { #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) -static const struct fence_ops timeline_fence_ops; +static const struct dma_fence_ops timeline_fence_ops; -static inline struct sync_pt *fence_to_sync_pt(struct fence *fence) +static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence) { if (fence->ops != &timeline_fence_ops) return NULL; @@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name) return NULL; kref_init(&obj->kref); - obj->context = fence_context_alloc(1); + obj->context = dma_fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name)); INIT_LIST_HEAD(&obj->child_list_head); @@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) list_for_each_entry_safe(pt, next, &obj->active_list_head, active_list) { - if (fence_is_signaled_locked(&pt->base)) + if (dma_fence_is_signaled_locked(&pt->base)) list_del_init(&pt->active_list); } @@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, spin_lock_irqsave(&obj->child_list_lock, flags); sync_timeline_get(obj); - fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, - obj->context, value); + dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, + obj->context, value); list_add_tail(&pt->child_list, &obj->child_list_head); INIT_LIST_HEAD(&pt->active_list); spin_unlock_irqrestore(&obj->child_list_lock, flags); return pt; } -static const char *timeline_fence_get_driver_name(struct fence *fence) +static const char *timeline_fence_get_driver_name(struct dma_fence *fence) { return "sw_sync"; } -static const char *timeline_fence_get_timeline_name(struct fence *fence) +static const char *timeline_fence_get_timeline_name(struct dma_fence *fence) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); return parent->name; } -static void timeline_fence_release(struct fence *fence) +static void timeline_fence_release(struct dma_fence *fence) { - struct sync_pt *pt = fence_to_sync_pt(fence); - struct sync_timeline *parent = fence_parent(fence); + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + struct sync_timeline *parent = dma_fence_parent(fence); unsigned long flags; spin_lock_irqsave(fence->lock, flags); @@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence) spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); - fence_free(fence); + dma_fence_free(fence); } -static bool timeline_fence_signaled(struct fence *fence) +static bool timeline_fence_signaled(struct dma_fence *fence) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); return (fence->seqno > parent->value) ? false : true; } -static bool timeline_fence_enable_signaling(struct fence *fence) +static bool timeline_fence_enable_signaling(struct dma_fence *fence) { - struct sync_pt *pt = fence_to_sync_pt(fence); - struct sync_timeline *parent = fence_parent(fence); + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + struct sync_timeline *parent = dma_fence_parent(fence); if (timeline_fence_signaled(fence)) return false; @@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence) return true; } -static void timeline_fence_value_str(struct fence *fence, +static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%d", fence->seqno); } -static void timeline_fence_timeline_value_str(struct fence *fence, +static void timeline_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); snprintf(str, size, "%d", parent->value); } -static const struct fence_ops timeline_fence_ops = { +static const struct dma_fence_ops timeline_fence_ops = { .get_driver_name = timeline_fence_get_driver_name, .get_timeline_name = timeline_fence_get_timeline_name, .enable_signaling = timeline_fence_enable_signaling, .signaled = timeline_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = timeline_fence_release, .fence_value_str = timeline_fence_value_str, .timeline_value_str = timeline_fence_timeline_value_str, @@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, sync_file = sync_file_create(&pt->base); if (!sync_file) { - fence_put(&pt->base); + dma_fence_put(&pt->base); err = -ENOMEM; goto err; } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 2dd4c3db6caa..48b20e34fb6d 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -71,12 +71,13 @@ static const char *sync_status_str(int status) return "error"; } -static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) +static void sync_print_fence(struct seq_file *s, + struct dma_fence *fence, bool show) { int status = 1; - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); - if (fence_is_signaled_locked(fence)) + if (dma_fence_is_signaled_locked(fence)) status = fence->status; seq_printf(s, " %s%sfence %s", @@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s, int i; seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, - sync_status_str(!fence_is_signaled(sync_file->fence))); + sync_status_str(!dma_fence_is_signaled(sync_file->fence))); - if (fence_is_array(sync_file->fence)) { - struct fence_array *array = to_fence_array(sync_file->fence); + if (dma_fence_is_array(sync_file->fence)) { + struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); for (i = 0; i < array->num_fences; ++i) sync_print_fence(s, array->fences[i], true); diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index d269aa6783aa..26fe8b9907b3 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include @@ -45,10 +45,9 @@ struct sync_timeline { struct list_head sync_timeline_list; }; -static inline struct sync_timeline *fence_parent(struct fence *fence) +static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) { - return container_of(fence->lock, struct sync_timeline, - child_list_lock); + return container_of(fence->lock, struct sync_timeline, child_list_lock); } /** @@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence) * @active_list: sync timeline active child's list */ struct sync_pt { - struct fence base; + struct dma_fence base; struct list_head child_list; struct list_head active_list; }; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 235f8ac113cc..69d8ef98d34c 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -54,7 +54,7 @@ err: return NULL; } -static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) +static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { struct sync_file *sync_file; @@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) * takes ownership of @fence. The sync_file can be released with * fput(sync_file->file). Returns the sync_file or NULL in case of error. */ -struct sync_file *sync_file_create(struct fence *fence) +struct sync_file *sync_file_create(struct dma_fence *fence) { struct sync_file *sync_file; @@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence) if (!sync_file) return NULL; - sync_file->fence = fence_get(fence); + sync_file->fence = dma_fence_get(fence); snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), @@ -121,16 +121,16 @@ err: * Ensures @fd references a valid sync_file and returns a fence that * represents all fence in the sync_file. On error NULL is returned. */ -struct fence *sync_file_get_fence(int fd) +struct dma_fence *sync_file_get_fence(int fd) { struct sync_file *sync_file; - struct fence *fence; + struct dma_fence *fence; sync_file = sync_file_fdget(fd); if (!sync_file) return NULL; - fence = fence_get(sync_file->fence); + fence = dma_fence_get(sync_file->fence); fput(sync_file->file); return fence; @@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd) EXPORT_SYMBOL(sync_file_get_fence); static int sync_file_set_fence(struct sync_file *sync_file, - struct fence **fences, int num_fences) + struct dma_fence **fences, int num_fences) { - struct fence_array *array; + struct dma_fence_array *array; /* * The reference for the fences in the new sync_file and held * in add_fence() during the merge procedure, so for num_fences == 1 * we already own a new reference to the fence. For num_fence > 1 - * we own the reference of the fence_array creation. + * we own the reference of the dma_fence_array creation. */ if (num_fences == 1) { sync_file->fence = fences[0]; kfree(fences); } else { - array = fence_array_create(num_fences, fences, - fence_context_alloc(1), 1, false); + array = dma_fence_array_create(num_fences, fences, + dma_fence_context_alloc(1), + 1, false); if (!array) return -ENOMEM; @@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file, return 0; } -static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) +static struct dma_fence **get_fences(struct sync_file *sync_file, + int *num_fences) { - if (fence_is_array(sync_file->fence)) { - struct fence_array *array = to_fence_array(sync_file->fence); + if (dma_fence_is_array(sync_file->fence)) { + struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); *num_fences = array->num_fences; return array->fences; @@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) return &sync_file->fence; } -static void add_fence(struct fence **fences, int *i, struct fence *fence) +static void add_fence(struct dma_fence **fences, + int *i, struct dma_fence *fence) { fences[*i] = fence; - if (!fence_is_signaled(fence)) { - fence_get(fence); + if (!dma_fence_is_signaled(fence)) { + dma_fence_get(fence); (*i)++; } } @@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct fence **fences, **nfences, **a_fences, **b_fences; + struct dma_fence **fences, **nfences, **a_fences, **b_fences; int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); @@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * and sync_file_create, this is a reasonable assumption. */ for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { - struct fence *pt_a = a_fences[i_a]; - struct fence *pt_b = b_fences[i_b]; + struct dma_fence *pt_a = a_fences[i_a]; + struct dma_fence *pt_b = b_fences[i_b]; if (pt_a->context < pt_b->context) { add_fence(fences, &i, pt_a); @@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, add_fence(fences, &i, b_fences[i_b]); if (i == 0) - fences[i++] = fence_get(a_fences[0]); + fences[i++] = dma_fence_get(a_fences[0]); if (num_fences > i) { nfences = krealloc(fences, i * sizeof(*fences), @@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref) kref); if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) - fence_remove_callback(sync_file->fence, &sync_file->cb); - fence_put(sync_file->fence); + dma_fence_remove_callback(sync_file->fence, &sync_file->cb); + dma_fence_put(sync_file->fence); kfree(sync_file); } @@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) if (!poll_does_not_wait(wait) && !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { - if (fence_add_callback(sync_file->fence, &sync_file->cb, - fence_check_cb_func) < 0) + if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, + fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); } - return fence_is_signaled(sync_file->fence) ? POLLIN : 0; + return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; } static long sync_file_ioctl_merge(struct sync_file *sync_file, @@ -370,14 +373,14 @@ err_put_fd: return err; } -static void sync_fill_fence_info(struct fence *fence, +static void sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); strlcpy(info->driver_name, fence->ops->get_driver_name(fence), sizeof(info->driver_name)); - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) info->status = fence->status >= 0 ? 1 : fence->status; else info->status = 0; @@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, { struct sync_file_info info; struct sync_fence_info *fence_info = NULL; - struct fence **fences; + struct dma_fence **fences; __u32 size; int num_fences, ret, i; @@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, no_fences: strlcpy(info.name, sync_file->name, sizeof(info.name)); - info.status = fence_is_signaled(sync_file->fence); + info.status = dma_fence_is_signaled(sync_file->fence); info.num_fences = num_fences; if (copy_to_user((void __user *)arg, &info, sizeof(info))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..283d05927d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include @@ -378,7 +378,7 @@ struct amdgpu_fence_driver { struct timer_list fallback_timer; unsigned num_fences_mask; spinlock_t lock; - struct fence **fences; + struct dma_fence **fences; }; /* some special values for the owner field */ @@ -399,7 +399,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, unsigned irq_type); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); @@ -427,7 +427,7 @@ struct amdgpu_bo_va_mapping { struct amdgpu_bo_va { /* protected by bo being reserved */ struct list_head bo_list; - struct fence *last_pt_update; + struct dma_fence *last_pt_update; unsigned ref_count; /* protected by vm mutex and spinlock */ @@ -543,7 +543,7 @@ struct amdgpu_sa_bo { struct amdgpu_sa_manager *manager; unsigned soffset; unsigned eoffset; - struct fence *fence; + struct dma_fence *fence; }; /* @@ -566,19 +566,19 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, */ struct amdgpu_sync { DECLARE_HASHTABLE(fences, 4); - struct fence *last_vm_update; + struct dma_fence *last_vm_update; }; void amdgpu_sync_create(struct amdgpu_sync *sync); int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *f); + struct dma_fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, +struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); @@ -703,10 +703,10 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_abo; - struct fence *excl; + struct dma_fence *excl; unsigned shared_count; - struct fence **shared; - struct fence_cb cb; + struct dma_fence **shared; + struct dma_fence_cb cb; bool async; }; @@ -742,7 +742,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, - struct fence **f); + struct dma_fence **f); struct amdgpu_ring { struct amdgpu_device *adev; @@ -844,7 +844,7 @@ struct amdgpu_vm { /* contains the page directory */ struct amdgpu_bo *page_directory; unsigned max_pde_used; - struct fence *page_directory_fence; + struct dma_fence *page_directory_fence; uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ @@ -865,14 +865,14 @@ struct amdgpu_vm { struct amdgpu_vm_id { struct list_head list; - struct fence *first; + struct dma_fence *first; struct amdgpu_sync active; - struct fence *last_flush; + struct dma_fence *last_flush; atomic64_t owner; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct fence *flushed_updates; + struct dma_fence *flushed_updates; uint32_t current_gpu_reset_count; @@ -921,7 +921,7 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence, + struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); @@ -957,7 +957,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_ctx_ring { uint64_t sequence; - struct fence **fences; + struct dma_fence **fences; struct amd_sched_entity entity; }; @@ -966,7 +966,7 @@ struct amdgpu_ctx { struct amdgpu_device *adev; unsigned reset_counter; spinlock_t ring_lock; - struct fence **fences; + struct dma_fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; bool preamble_presented; }; @@ -982,8 +982,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct fence *fence); -struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct dma_fence *fence); +struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t seq); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, @@ -1181,10 +1181,10 @@ struct amdgpu_gfx { int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, - struct fence *f); + struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ib, struct fence *last_vm_update, - struct amdgpu_job *job, struct fence **f); + struct amdgpu_ib *ib, struct dma_fence *last_vm_update, + struct amdgpu_job *job, struct dma_fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -1225,7 +1225,7 @@ struct amdgpu_cs_parser { struct amdgpu_bo_list *bo_list; struct amdgpu_bo_list_entry vm_pd; struct list_head validated; - struct fence *fence; + struct dma_fence *fence; uint64_t bytes_moved_threshold; uint64_t bytes_moved; struct amdgpu_bo_list_entry *evictable; @@ -1245,7 +1245,7 @@ struct amdgpu_job { struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_ib *ibs; - struct fence *fence; /* the hw fence */ + struct dma_fence *fence; /* the hw fence */ uint32_t preamble_status; uint32_t num_ibs; void *owner; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 345305235349..cc97eee93226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; int i, r; start_jiffies = jiffies; @@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, false); if (r) goto exit_do_move; - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) goto exit_do_move; - fence_put(fence); + dma_fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: if (fence) - fence_put(fence); + dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b0f6e6957536..5d582265e929 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -719,7 +719,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } - fence_put(parser->fence); + dma_fence_put(parser->fence); if (parser->ctx) amdgpu_ctx_put(parser->ctx); @@ -756,7 +756,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (p->bo_list) { for (i = 0; i < p->bo_list->num_entries; i++) { - struct fence *f; + struct dma_fence *f; /* ignore duplicates */ bo = p->bo_list->array[i].robj; @@ -956,7 +956,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, for (j = 0; j < num_deps; ++j) { struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; - struct fence *fence; + struct dma_fence *fence; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -978,7 +978,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, } else if (fence) { r = amdgpu_sync_fence(adev, &p->job->sync, fence); - fence_put(fence); + dma_fence_put(fence); amdgpu_ctx_put(ctx); if (r) return r; @@ -1008,7 +1008,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->fence_ctx = entity->fence_context; - p->fence = fence_get(&job->base.s_fence->finished); + p->fence = dma_fence_get(&job->base.s_fence->finished); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); @@ -1091,7 +1091,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; - struct fence *fence; + struct dma_fence *fence; long r; r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, @@ -1107,8 +1107,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (IS_ERR(fence)) r = PTR_ERR(fence); else if (fence) { - r = fence_wait_timeout(fence, true, timeout); - fence_put(fence); + r = dma_fence_wait_timeout(fence, true, timeout); + dma_fence_put(fence); } else r = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a5e2fcbef0f0..99bbc860322f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, - sizeof(struct fence*), GFP_KERNEL); + sizeof(struct dma_fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; @@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (j = 0; j < amdgpu_sched_jobs; ++j) - fence_put(ctx->rings[i].fences[j]); + dma_fence_put(ctx->rings[i].fences[j]); kfree(ctx->fences); ctx->fences = NULL; @@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) } uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct fence *fence) + struct dma_fence *fence) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; uint64_t seq = cring->sequence; unsigned idx = 0; - struct fence *other = NULL; + struct dma_fence *other = NULL; idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; if (other) { signed long r; - r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); if (r < 0) DRM_ERROR("Error (%ld) waiting for fence!\n", r); } - fence_get(fence); + dma_fence_get(fence); spin_lock(&ctx->ring_lock); cring->fences[idx] = fence; cring->sequence++; spin_unlock(&ctx->ring_lock); - fence_put(other); + dma_fence_put(other); return seq; } -struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq) +struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; - struct fence *fence; + struct dma_fence *fence; spin_lock(&ctx->ring_lock); @@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return NULL; } - fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); + fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); spin_unlock(&ctx->ring_lock); return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4f4a9239069..0262b43c8f0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1599,7 +1599,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; - adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->smc_rreg = &amdgpu_invalid_rreg; adev->smc_wreg = &amdgpu_invalid_wreg; @@ -2193,7 +2193,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, - struct fence **fence) + struct dma_fence **fence) { uint32_t domain; int r; @@ -2312,30 +2312,30 @@ retry: if (need_full_reset && amdgpu_need_backup(adev)) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *bo, *tmp; - struct fence *fence = NULL, *next = NULL; + struct dma_fence *fence = NULL, *next = NULL; DRM_INFO("recover vram bo from shadow\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); if (fence) { - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { WARN(r, "recovery from shadow isn't comleted\n"); break; } } - fence_put(fence); + dma_fence_put(fence); fence = next; } mutex_unlock(&adev->shadow_list_lock); if (fence) { - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) WARN(r, "recovery from shadow isn't comleted\n"); } - fence_put(fence); + dma_fence_put(fence); } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 083e2b429872..075c0d7db205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,29 +35,29 @@ #include #include -static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) +static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) { struct amdgpu_flip_work *work = container_of(cb, struct amdgpu_flip_work, cb); - fence_put(f); + dma_fence_put(f); schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, - struct fence **f) + struct dma_fence **f) { - struct fence *fence= *f; + struct dma_fence *fence= *f; if (fence == NULL) return false; *f = NULL; - if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) + if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) return true; - fence_put(fence); + dma_fence_put(fence); return false; } @@ -244,9 +244,9 @@ unreserve: cleanup: amdgpu_bo_unref(&work->old_abo); - fence_put(work->excl); + dma_fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) - fence_put(work->shared[i]); + dma_fence_put(work->shared[i]); kfree(work->shared); kfree(work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..57552c79ec58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -48,7 +48,7 @@ */ struct amdgpu_fence { - struct fence base; + struct dma_fence base; /* RB, DMA, etc. */ struct amdgpu_ring *ring; @@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void) /* * Cast helper */ -static const struct fence_ops amdgpu_fence_ops; -static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) +static const struct dma_fence_ops amdgpu_fence_ops; +static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct fence *old, **ptr; + struct dma_fence *old, **ptr; uint32_t seq; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); @@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) seq = ++ring->fence_drv.sync_seq; fence->ring = ring; - fence_init(&fence->base, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); + dma_fence_init(&fence->base, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, + seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, AMDGPU_FENCE_FLAG_INT); @@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) * emitting the fence would mess up the hardware ring buffer. */ old = rcu_dereference_protected(*ptr, 1); - if (old && !fence_is_signaled(old)) { + if (old && !dma_fence_is_signaled(old)) { DRM_INFO("rcu slot is busy\n"); - fence_wait(old, false); + dma_fence_wait(old, false); } - rcu_assign_pointer(*ptr, fence_get(&fence->base)); + rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; @@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) seq &= drv->num_fences_mask; do { - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; ++last_seq; last_seq &= drv->num_fences_mask; @@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = fence_signal(fence); + r = dma_fence_signal(fence); if (!r) - FENCE_TRACE(fence, "signaled from irq context\n"); + DMA_FENCE_TRACE(fence, "signaled from irq context\n"); else BUG(); - fence_put(fence); + dma_fence_put(fence); } while (last_seq != seq); } @@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; int r; if (!seq) @@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; rcu_read_lock(); fence = rcu_dereference(*ptr); - if (!fence || !fence_get_rcu(fence)) { + if (!fence || !dma_fence_get_rcu(fence)) { rcu_read_unlock(); return 0; } rcu_read_unlock(); - r = fence_wait(fence, false); - fence_put(fence); + r = dma_fence_wait(fence, false); + dma_fence_put(fence); return r; } @@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - fence_put(ring->fence_drv.fences[j]); + dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.fences = NULL; ring->fence_drv.initialized = false; @@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) * Common fence implementation */ -static const char *amdgpu_fence_get_driver_name(struct fence *fence) +static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) { return "amdgpu"; } -static const char *amdgpu_fence_get_timeline_name(struct fence *f) +static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); return (const char *)fence->ring->name; @@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ -static bool amdgpu_fence_enable_signaling(struct fence *f) +static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_ring *ring = fence->ring; @@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } @@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) */ static void amdgpu_fence_free(struct rcu_head *rcu) { - struct fence *f = container_of(rcu, struct fence, rcu); + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct amdgpu_fence *fence = to_amdgpu_fence(f); kmem_cache_free(amdgpu_fence_slab, fence); } @@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amdgpu_fence_release(struct fence *f) +static void amdgpu_fence_release(struct dma_fence *f) { call_rcu(&f->rcu, amdgpu_fence_free); } -static const struct fence_ops amdgpu_fence_ops = { +static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amdgpu_fence_release, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 6a6c86c9c169..c3672dfcfd6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Free an IB (all asics). */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, - struct fence *f) + struct dma_fence *f) { amdgpu_sa_bo_free(adev, &ib->sa_bo, f); } @@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ibs, struct fence *last_vm_update, - struct amdgpu_job *job, struct fence **f) + struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, + struct amdgpu_job *job, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 8c5807994073..a0de6286c453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, void amdgpu_job_free_resources(struct amdgpu_job *job) { - struct fence *f; + struct dma_fence *f; unsigned i; /* use sched fence if available */ @@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - fence_put(job->fence); + dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); kfree(job); } @@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job) { amdgpu_job_free_resources(job); - fence_put(job->fence); + dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, - struct fence **f) + struct dma_fence **f) { int r; job->ring = ring; @@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, job->owner = owner; job->fence_ctx = entity->fence_context; - *f = fence_get(&job->base.s_fence->finished); + *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; } -static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; - struct fence *fence = amdgpu_sync_get_fence(&job->sync); + struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync); if (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; @@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) return fence; } -static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) { - struct fence *fence = NULL; + struct dma_fence *fence = NULL; struct amdgpu_job *job; int r; @@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) DRM_ERROR("Error scheduling IBs (%d)\n", r); /* if gpu reset, hw fence will be replaced here */ - fence_put(job->fence); - job->fence = fence_get(fence); + dma_fence_put(job->fence); + job->fence = dma_fence_get(fence); amdgpu_job_free_resources(job); return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aa074fac0c7f..55e142a5ff5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -383,7 +383,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { - struct fence *fence; + struct dma_fence *fence; if (adev->mman.buffer_funcs_ring == NULL || !adev->mman.buffer_funcs_ring->ready) { @@ -403,9 +403,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); amdgpu_bo_fence(bo, fence, false); amdgpu_bo_unreserve(bo); - fence_put(bo->tbo.moving); - bo->tbo.moving = fence_get(fence); - fence_put(fence); + dma_fence_put(bo->tbo.moving); + bo->tbo.moving = dma_fence_get(fence); + dma_fence_put(fence); } *bo_ptr = bo; @@ -491,7 +491,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct) { @@ -523,7 +523,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct) { @@ -926,7 +926,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) * @shared: true if fence should be added shared * */ -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared) { struct reservation_object *resv = bo->tbo.resv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 8255034d73eb..3e785ed3cb4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -156,19 +156,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, bool direct); + struct dma_fence **fence, bool direct); int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, struct reservation_object *resv, - struct fence **fence, + struct dma_fence **fence, bool direct); @@ -200,7 +200,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, unsigned size, unsigned align); void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct fence *fence); + struct dma_fence *fence); #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index d8af37a845f4..fd26c4b8d793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) } list_del_init(&sa_bo->olist); list_del_init(&sa_bo->flist); - fence_put(sa_bo->fence); + dma_fence_put(sa_bo->fence); kfree(sa_bo); } @@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { if (sa_bo->fence == NULL || - !fence_is_signaled(sa_bo->fence)) { + !dma_fence_is_signaled(sa_bo->fence)) { return; } amdgpu_sa_bo_remove_locked(sa_bo); @@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, } static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, - struct fence **fences, + struct dma_fence **fences, unsigned *tries) { struct amdgpu_sa_bo *best_bo = NULL; @@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, sa_bo = list_first_entry(&sa_manager->flist[i], struct amdgpu_sa_bo, flist); - if (!fence_is_signaled(sa_bo->fence)) { + if (!dma_fence_is_signaled(sa_bo->fence)) { fences[i] = sa_bo->fence; continue; } @@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, struct amdgpu_sa_bo **sa_bo, unsigned size, unsigned align) { - struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; + struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; unsigned count; int i, r; @@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) if (fences[i]) - fences[count++] = fence_get(fences[i]); + fences[count++] = dma_fence_get(fences[i]); if (count) { spin_unlock(&sa_manager->wq.lock); - t = fence_wait_any_timeout(fences, count, false, - MAX_SCHEDULE_TIMEOUT); + t = dma_fence_wait_any_timeout(fences, count, false, + MAX_SCHEDULE_TIMEOUT); for (i = 0; i < count; ++i) - fence_put(fences[i]); + dma_fence_put(fences[i]); r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); @@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, } void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct fence *fence) + struct dma_fence *fence) { struct amdgpu_sa_manager *sa_manager; @@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, sa_manager = (*sa_bo)->manager; spin_lock(&sa_manager->wq.lock); - if (fence && !fence_is_signaled(fence)) { + if (fence && !dma_fence_is_signaled(fence)) { uint32_t idx; - (*sa_bo)->fence = fence_get(fence); + (*sa_bo)->fence = dma_fence_get(fence); idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 5c8d3022fb87..ed814e6d0207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -34,7 +34,7 @@ struct amdgpu_sync_entry { struct hlist_node node; - struct fence *fence; + struct dma_fence *fence; }; static struct kmem_cache *amdgpu_sync_slab; @@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) * * Test if the fence was issued by us. */ -static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) +static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, + struct dma_fence *f) { struct amd_sched_fence *s_fence = to_amd_sched_fence(f); @@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) * * Extract who originally created the fence. */ -static void *amdgpu_sync_get_owner(struct fence *f) +static void *amdgpu_sync_get_owner(struct dma_fence *f) { struct amd_sched_fence *s_fence = to_amd_sched_fence(f); @@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f) * * Either keep the existing fence or the new one, depending which one is later. */ -static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) +static void amdgpu_sync_keep_later(struct dma_fence **keep, + struct dma_fence *fence) { - if (*keep && fence_is_later(*keep, fence)) + if (*keep && dma_fence_is_later(*keep, fence)) return; - fence_put(*keep); - *keep = fence_get(fence); + dma_fence_put(*keep); + *keep = dma_fence_get(fence); } /** @@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) * Tries to add the fence to an existing hash entry. Returns true when an entry * was found, false otherwise. */ -static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) { struct amdgpu_sync_entry *e; @@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) * */ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *f) + struct dma_fence *f) { struct amdgpu_sync_entry *e; @@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -ENOMEM; hash_add(sync->fences, &e->node, f->context); - e->fence = fence_get(f); + e->fence = dma_fence_get(f); return 0; } @@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, void *owner) { struct reservation_object_list *flist; - struct fence *f; + struct dma_fence *f; void *fence_owner; unsigned i; int r = 0; @@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, * Returns the next fence not signaled yet without removing it from the sync * object. */ -struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, - struct amdgpu_ring *ring) +struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { - struct fence *f = e->fence; + struct dma_fence *f = e->fence; struct amd_sched_fence *s_fence = to_amd_sched_fence(f); if (ring && s_fence) { @@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * when they are scheduled. */ if (s_fence->sched == &ring->sched) { - if (fence_is_signaled(&s_fence->scheduled)) + if (dma_fence_is_signaled(&s_fence->scheduled)) continue; return &s_fence->scheduled; } } - if (fence_is_signaled(f)) { + if (dma_fence_is_signaled(f)) { hash_del(&e->node); - fence_put(f); + dma_fence_put(f); kmem_cache_free(amdgpu_sync_slab, e); continue; } @@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * * Get and removes the next fence from the sync object not signaled yet. */ -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - struct fence *f; + struct dma_fence *f; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { @@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) hash_del(&e->node); kmem_cache_free(amdgpu_sync_slab, e); - if (!fence_is_signaled(f)) + if (!dma_fence_is_signaled(f)) return f; - fence_put(f); + dma_fence_put(f); } return NULL; } @@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) hash_for_each_safe(sync->fences, i, tmp, e, node) { hash_del(&e->node); - fence_put(e->fence); + dma_fence_put(e->fence); kmem_cache_free(amdgpu_sync_slab, e); } - fence_put(sync->last_vm_update); + dma_fence_put(sync->last_vm_update); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b827c75e95de..e05a24325eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) void *gtt_map, *vram_map; void **gtt_start, **gtt_end; void **vram_start, **vram_end; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, @@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); goto out_lclean_unpin; } - fence_put(fence); + dma_fence_put(fence); r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = fence_wait(fence, false); + r = dma_fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); goto out_lclean_unpin; } - fence_put(fence); + dma_fence_put(fence); r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { @@ -216,7 +216,7 @@ out_lclean: amdgpu_bo_unref(>t_obj[i]); } if (fence) - fence_put(fence); + dma_fence_put(fence); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 067e5e683bb3..bb964a8ff938 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(struct amdgpu_device *, adev) __field(struct amd_sched_job *, sched_job) __field(struct amdgpu_ib *, ib) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(char *, ring_name) __field(u32, num_ibs) ), @@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __field(struct amdgpu_device *, adev) __field(struct amd_sched_job *, sched_job) __field(struct amdgpu_ib *, ib) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(char *, ring_name) __field(u32, num_ibs) ), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dcaf691f56b5..a743aeabc767 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -268,7 +268,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct amdgpu_device *adev; struct amdgpu_ring *ring; uint64_t old_start, new_start; - struct fence *fence; + struct dma_fence *fence; int r; adev = amdgpu_get_adev(bo->bdev); @@ -316,7 +316,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, return r; r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); - fence_put(fence); + dma_fence_put(fence); return r; } @@ -1247,7 +1247,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence, bool direct_submit) + struct dma_fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1294,7 +1294,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, if (direct_submit) { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, NULL, fence); - job->fence = fence_get(*fence); + job->fence = dma_fence_get(*fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); amdgpu_job_free(job); @@ -1315,7 +1315,7 @@ error_free: int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = bo->adev; struct amdgpu_job *job; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9812c805326c..3f293e189378 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -77,11 +77,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence, bool direct_submit); + struct dma_fence **fence, bool direct_submit); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, - struct fence **fence); + struct dma_fence **fence); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e3281cacc586..0f6575e7ef8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) for (i = 0; i < adev->uvd.max_handles; ++i) { uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (handle != 0 && adev->uvd.filp[i] == filp) { - struct fence *fence; + struct dma_fence *fence; r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); @@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) continue; } - fence_wait(fence, false); - fence_put(fence); + dma_fence_wait(fence, false); + dma_fence_put(fence); adev->uvd.filp[i] = NULL; atomic_set(&adev->uvd.handles[i], 0); @@ -909,14 +909,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) } static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; struct amdgpu_device *adev = ring->adev; uint64_t addr; int i, r; @@ -960,7 +960,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err_free; @@ -975,9 +975,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ttm_eu_fence_buffer_objects(&ticket, &head, f); if (fence) - *fence = fence_get(f); + *fence = dma_fence_get(f); amdgpu_bo_unref(&bo); - fence_put(f); + dma_fence_put(f); return 0; @@ -993,7 +993,7 @@ err: crash the vcpu so just try to emmit a dummy create/destroy msg to avoid this */ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1042,7 +1042,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -1128,7 +1128,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) */ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct fence *fence; + struct dma_fence *fence; long r; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); @@ -1143,7 +1143,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = fence_wait_timeout(fence, false, timeout); + r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -1154,7 +1154,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } - fence_put(fence); + dma_fence_put(fence); error: return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index c850009602d1..6249ba1bde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + struct dma_fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence); + bool direct, struct dma_fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 7fe8fd884f06..f0f8afb85585 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -395,12 +395,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence) + struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint64_t dummy; int i, r; @@ -450,14 +450,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err; amdgpu_job_free(job); if (fence) - *fence = fence_get(f); - fence_put(f); + *fence = dma_fence_get(f); + dma_fence_put(f); return 0; err: @@ -476,12 +476,12 @@ err: * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence) + bool direct, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -513,7 +513,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) { r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); - job->fence = fence_get(f); + job->fence = dma_fence_get(f); if (r) goto err; @@ -526,8 +526,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, } if (fence) - *fence = fence_get(f); - fence_put(f); + *fence = dma_fence_get(f); + dma_fence_put(f); return 0; err: @@ -883,7 +883,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) */ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct fence *fence = NULL; + struct dma_fence *fence = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ @@ -902,7 +902,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = fence_wait_timeout(fence, false, timeout); + r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -913,6 +913,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } error: - fence_put(fence); + dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 12729d2852df..566c29ddeeb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct fence **fence); + struct dma_fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct fence **fence); + bool direct, struct dma_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06f24322e7c3..22cabb5456e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,7 +25,7 @@ * Alex Deucher * Jerome Glisse */ -#include +#include #include #include #include "amdgpu.h" @@ -194,14 +194,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, * Allocate an id for the vm, adding fences to the sync obj as necessary. */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct fence *fence, + struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; uint64_t fence_context = adev->fence_context + ring->idx; - struct fence *updates = sync->last_vm_update; + struct dma_fence *updates = sync->last_vm_update; struct amdgpu_vm_id *id, *idle; - struct fence **fences; + struct dma_fence **fences; unsigned i; int r = 0; @@ -225,17 +225,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (&idle->list == &adev->vm_manager.ids_lru) { u64 fence_context = adev->vm_manager.fence_context + ring->idx; unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct fence_array *array; + struct dma_fence_array *array; unsigned j; for (j = 0; j < i; ++j) - fence_get(fences[j]); + dma_fence_get(fences[j]); - array = fence_array_create(i, fences, fence_context, + array = dma_fence_array_create(i, fences, fence_context, seqno, true); if (!array) { for (j = 0; j < i; ++j) - fence_put(fences[j]); + dma_fence_put(fences[j]); kfree(fences); r = -ENOMEM; goto error; @@ -243,7 +243,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, r = amdgpu_sync_fence(ring->adev, sync, &array->base); - fence_put(&array->base); + dma_fence_put(&array->base); if (r) goto error; @@ -257,7 +257,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check if we can use a VMID already assigned to this VM */ i = ring->idx; do { - struct fence *flushed; + struct dma_fence *flushed; id = vm->ids[i++]; if (i == AMDGPU_MAX_RINGS) @@ -279,12 +279,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, continue; if (id->last_flush->context != fence_context && - !fence_is_signaled(id->last_flush)) + !dma_fence_is_signaled(id->last_flush)) continue; flushed = id->flushed_updates; if (updates && - (!flushed || fence_is_later(updates, flushed))) + (!flushed || dma_fence_is_later(updates, flushed))) continue; /* Good we can use this VMID. Remember this submission as @@ -315,14 +315,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - fence_put(id->first); - id->first = fence_get(fence); + dma_fence_put(id->first); + id->first = dma_fence_get(fence); - fence_put(id->last_flush); + dma_fence_put(id->last_flush); id->last_flush = NULL; - fence_put(id->flushed_updates); - id->flushed_updates = fence_get(updates); + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); id->pd_gpu_addr = job->vm_pd_addr; id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); @@ -393,7 +393,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || amdgpu_vm_is_gpu_reset(adev, id))) { - struct fence *fence; + struct dma_fence *fence; trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); @@ -403,7 +403,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) return r; mutex_lock(&adev->vm_manager.lock); - fence_put(id->last_flush); + dma_fence_put(id->last_flush); id->last_flush = fence; mutex_unlock(&adev->vm_manager.lock); } @@ -537,7 +537,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) { struct amdgpu_ring *ring; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; struct amdgpu_job *job; struct amdgpu_pte_update_params params; unsigned entries; @@ -578,7 +578,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(bo, fence, true); - fence_put(fence); + dma_fence_put(fence); return 0; error_free: @@ -625,7 +625,7 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; struct amdgpu_pte_update_params params; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; int r; @@ -714,9 +714,9 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, goto error_free; amdgpu_bo_fence(pd, fence, true); - fence_put(vm->page_directory_fence); - vm->page_directory_fence = fence_get(fence); - fence_put(fence); + dma_fence_put(vm->page_directory_fence); + vm->page_directory_fence = dma_fence_get(fence); + dma_fence_put(fence); } else { amdgpu_job_free(job); @@ -929,20 +929,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct fence *exclusive, + struct dma_fence *exclusive, uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint32_t flags, uint64_t addr, - struct fence **fence) + struct dma_fence **fence) { struct amdgpu_ring *ring; void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; struct amdgpu_pte_update_params params; - struct fence *f = NULL; + struct dma_fence *f = NULL; int r; memset(¶ms, 0, sizeof(params)); @@ -1045,10 +1045,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, amdgpu_bo_fence(vm->page_directory, f, true); if (fence) { - fence_put(*fence); - *fence = fence_get(f); + dma_fence_put(*fence); + *fence = dma_fence_get(f); } - fence_put(f); + dma_fence_put(f); return 0; error_free: @@ -1074,13 +1074,13 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, - struct fence *exclusive, + struct dma_fence *exclusive, uint32_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint32_t flags, uint64_t addr, - struct fence **fence) + struct dma_fence **fence) { const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; @@ -1147,7 +1147,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; struct ttm_mem_reg *mem; - struct fence *exclusive; + struct dma_fence *exclusive; uint64_t addr; int r; @@ -1547,7 +1547,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, kfree(mapping); } - fence_put(bo_va->last_pt_update); + dma_fence_put(bo_va->last_pt_update); kfree(bo_va); } @@ -1709,7 +1709,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); - fence_put(vm->page_directory_fence); + dma_fence_put(vm->page_directory_fence); } /** @@ -1733,7 +1733,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) &adev->vm_manager.ids_lru); } - adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + adev->vm_manager.fence_context = + dma_fence_context_alloc(AMDGPU_MAX_RINGS); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; @@ -1755,8 +1756,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_NUM_VM; ++i) { struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; - fence_put(adev->vm_manager.ids[i].first); + dma_fence_put(adev->vm_manager.ids[i].first); amdgpu_sync_free(&adev->vm_manager.ids[i].active); - fence_put(id->flushed_updates); + dma_fence_put(id->flushed_updates); } } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index cb952acc7133..321b9d5a4e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 40abb6b81c09..7dc11a19e49d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 71116da9e782..3865ffe7de55 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee6a48a09214..a9dd18847c40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; long r; @@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err2; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; @@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } err2: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err1: amdgpu_gfx_scratch_free(adev, scratch); return r; @@ -1575,7 +1575,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; int r, i; u32 tmp; unsigned total_size, vgpr_offset, sgpr_offset; @@ -1708,7 +1708,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) } /* wait for the GPU to finish processing the IB */ - r = fence_wait(f, false); + r = dma_fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto fail; @@ -1729,7 +1729,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) fail: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 565dab3c7218..7edf6e8c63dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index a9d10941fb53..1932a67c62ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index de358193a8f9..b4cf4e25bf91 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; - struct fence *f = NULL; + struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; @@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) if (r) goto err1; - r = fence_wait_timeout(f, false, timeout); + r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; @@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) err1: amdgpu_ib_free(adev, &ib, NULL); - fence_put(f); + dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index b961a1c6caf3..dbd4fd3a810b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job, TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) __field(struct amd_sched_job *, sched_job) - __field(struct fence *, fence) + __field(struct dma_fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job, TP_PROTO(struct amd_sched_fence *fence), TP_ARGS(fence), TP_STRUCT__entry( - __field(struct fence *, fence) + __field(struct dma_fence *, fence) ), TP_fast_assign( diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..5364e6a7ec8f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,7 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(2); + entity->fence_context = dma_fence_context_alloc(2); return 0; } @@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, kfifo_free(&entity->job_queue); } -static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_entity *entity = container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; - fence_put(f); + dma_fence_put(f); amd_sched_wakeup(entity->sched); } -static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_entity *entity = container_of(cb, struct amd_sched_entity, cb); entity->dependency = NULL; - fence_put(f); + dma_fence_put(f); } static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; - struct fence * fence = entity->dependency; + struct dma_fence * fence = entity->dependency; struct amd_sched_fence *s_fence; if (fence->context == entity->fence_context) { /* We can ignore fences from ourself */ - fence_put(entity->dependency); + dma_fence_put(entity->dependency); return false; } @@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) * Fence is from the same scheduler, only need to wait for * it to be scheduled */ - fence = fence_get(&s_fence->scheduled); - fence_put(entity->dependency); + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); entity->dependency = fence; - if (!fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) + if (!dma_fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) return true; /* Ignore it when it is already scheduled */ - fence_put(fence); + dma_fence_put(fence); return false; } - if (!fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) return true; - fence_put(entity->dependency); + dma_fence_put(entity->dependency); return false; } @@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work) sched->ops->free_job(s_job); } -static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +static void amd_sched_job_finish_cb(struct dma_fence *f, + struct dma_fence_cb *cb) { struct amd_sched_job *job = container_of(cb, struct amd_sched_job, finish_cb); @@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) spin_lock(&sched->job_list_lock); list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { - if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { - fence_put(s_job->s_fence->parent); + if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + dma_fence_put(s_job->s_fence->parent); s_job->s_fence->parent = NULL; } } @@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct amd_sched_fence *s_fence = s_job->s_fence; - struct fence *fence; + struct dma_fence *fence; spin_unlock(&sched->job_list_lock); fence = sched->ops->run_job(s_job); atomic_inc(&sched->hw_rq_count); if (fence) { - s_fence->parent = fence_get(fence); - r = fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); - fence_put(fence); + dma_fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); @@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) struct amd_sched_entity *entity = sched_job->s_entity; trace_amd_sched_job(sched_job); - fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, - amd_sched_job_finish_cb); + dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } @@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched) return entity; } -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) { struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); @@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->finished); + dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } @@ -547,7 +548,7 @@ static int amd_sched_main(void *param) struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; - struct fence *fence; + struct dma_fence *fence; wait_event_interruptible(sched->wake_up_worker, (!amd_sched_blocked(sched) && @@ -569,15 +570,15 @@ static int amd_sched_main(void *param) fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { - s_fence->parent = fence_get(fence); - r = fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); - fence_put(fence); + dma_fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..876aa43b57df 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -25,7 +25,7 @@ #define _GPU_SCHEDULER_H_ #include -#include +#include struct amd_gpu_scheduler; struct amd_sched_rq; @@ -50,8 +50,8 @@ struct amd_sched_entity { atomic_t fence_seq; uint64_t fence_context; - struct fence *dependency; - struct fence_cb cb; + struct dma_fence *dependency; + struct dma_fence_cb cb; }; /** @@ -66,10 +66,10 @@ struct amd_sched_rq { }; struct amd_sched_fence { - struct fence scheduled; - struct fence finished; - struct fence_cb cb; - struct fence *parent; + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -79,15 +79,15 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - struct fence_cb finish_cb; + struct dma_fence_cb finish_cb; struct work_struct finish_work; struct list_head node; struct delayed_work work_tdr; }; -extern const struct fence_ops amd_sched_fence_ops_scheduled; -extern const struct fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; +extern const struct dma_fence_ops amd_sched_fence_ops_finished; +static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) { if (f->ops == &amd_sched_fence_ops_scheduled) return container_of(f, struct amd_sched_fence, scheduled); @@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - struct fence *(*dependency)(struct amd_sched_job *sched_job); - struct fence *(*run_job)(struct amd_sched_job *sched_job); + struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); + struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); void (*timedout_job)(struct amd_sched_job *sched_job); void (*free_job)(struct amd_sched_job *sched_job); }; diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..c26fa298fe9e 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq); - fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); - fence_init(&fence->finished, &amd_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); + dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); return fence; } void amd_sched_fence_scheduled(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->scheduled); + int ret = dma_fence_signal(&fence->scheduled); if (!ret) - FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->scheduled, + "signaled from irq context\n"); else - FENCE_TRACE(&fence->scheduled, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->scheduled, + "was already signaled\n"); } void amd_sched_fence_finished(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->finished); + int ret = dma_fence_signal(&fence->finished); if (!ret) - FENCE_TRACE(&fence->finished, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->finished, + "signaled from irq context\n"); else - FENCE_TRACE(&fence->finished, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->finished, + "was already signaled\n"); } -static const char *amd_sched_fence_get_driver_name(struct fence *fence) +static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) { return "amd_sched"; } -static const char *amd_sched_fence_get_timeline_name(struct fence *f) +static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); return (const char *)fence->sched->name; } -static bool amd_sched_fence_enable_signaling(struct fence *f) +static bool amd_sched_fence_enable_signaling(struct dma_fence *f) { return true; } @@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) */ static void amd_sched_fence_free(struct rcu_head *rcu) { - struct fence *f = container_of(rcu, struct fence, rcu); + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct amd_sched_fence *fence = to_amd_sched_fence(f); - fence_put(fence->parent); + dma_fence_put(fence->parent); kmem_cache_free(sched_fence_slab, fence); } @@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amd_sched_fence_release_scheduled(struct fence *f) +static void amd_sched_fence_release_scheduled(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); @@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f) * * Drop the extra reference from the scheduled fence to the base fence. */ -static void amd_sched_fence_release_finished(struct fence *f) +static void amd_sched_fence_release_finished(struct dma_fence *f) { struct amd_sched_fence *fence = to_amd_sched_fence(f); - fence_put(&fence->scheduled); + dma_fence_put(&fence->scheduled); } -const struct fence_ops amd_sched_fence_ops_scheduled = { +const struct dma_fence_ops amd_sched_fence_ops_scheduled = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amd_sched_fence_release_scheduled, }; -const struct fence_ops amd_sched_fence_ops_finished = { +const struct dma_fence_ops amd_sched_fence_ops_finished = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amd_sched_fence_release_finished, }; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 1b5a32df9a9a..c32fb3c1d6f0 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1463,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); static struct drm_pending_vblank_event *create_vblank_event( struct drm_device *dev, struct drm_file *file_priv, - struct fence *fence, uint64_t user_data) + struct dma_fence *fence, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; int ret; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f9362760bfb2..75ad01d595fd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include "drm_crtc_internal.h" @@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); * drm_atomic_helper_swap_state() so it uses the current plane state (and * just uses the atomic state to find the changed planes) * - * Returns zero if success or < 0 if fence_wait() fails. + * Returns zero if success or < 0 if dma_fence_wait() fails. */ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, @@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, * still interrupt the operation. Instead of blocking until the * timer expires, make the wait interruptible. */ - ret = fence_wait(plane_state->fence, pre_swap); + ret = dma_fence_wait(plane_state->fence, pre_swap); if (ret) return ret; - fence_put(plane_state->fence); + dma_fence_put(plane_state->fence); plane_state->fence = NULL; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8bed5f459182..cf993dbf602e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); if (p->fence) - fence_put(p->fence); + dma_fence_put(p->fence); kfree(p); } @@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) } if (e->fence) { - fence_signal(e->fence); - fence_put(e->fence); + dma_fence_signal(e->fence); + dma_fence_put(e->fence); } if (!e->file_priv) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 3755ef935af4..7d066a91d778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, } #ifdef CONFIG_DEBUG_FS -static void etnaviv_gem_describe_fence(struct fence *fence, +static void etnaviv_gem_describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) seq_printf(m, "\t%9s: %s %s seq %u\n", type, fence->ops->get_driver_name(fence), @@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index b1254f885fed..d2211825e5c8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include "etnaviv_dump.h" @@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work) for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { if (!gpu->event[i].used) continue; - fence_signal(gpu->event[i].fence); + dma_fence_signal(gpu->event[i].fence); gpu->event[i].fence = NULL; gpu->event[i].used = false; complete(&gpu->event_free); @@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) /* fence object management */ struct etnaviv_fence { struct etnaviv_gpu *gpu; - struct fence base; + struct dma_fence base; }; -static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) +static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) { return container_of(fence, struct etnaviv_fence, base); } -static const char *etnaviv_fence_get_driver_name(struct fence *fence) +static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) { return "etnaviv"; } -static const char *etnaviv_fence_get_timeline_name(struct fence *fence) +static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); return dev_name(f->gpu->dev); } -static bool etnaviv_fence_enable_signaling(struct fence *fence) +static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) { return true; } -static bool etnaviv_fence_signaled(struct fence *fence) +static bool etnaviv_fence_signaled(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); return fence_completed(f->gpu, f->base.seqno); } -static void etnaviv_fence_release(struct fence *fence) +static void etnaviv_fence_release(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); kfree_rcu(f, base.rcu); } -static const struct fence_ops etnaviv_fence_ops = { +static const struct dma_fence_ops etnaviv_fence_ops = { .get_driver_name = etnaviv_fence_get_driver_name, .get_timeline_name = etnaviv_fence_get_timeline_name, .enable_signaling = etnaviv_fence_enable_signaling, .signaled = etnaviv_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = etnaviv_fence_release, }; -static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) +static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) { struct etnaviv_fence *f; @@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) f->gpu = gpu; - fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, - gpu->fence_context, ++gpu->next_fence); + dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, + gpu->fence_context, ++gpu->next_fence); return &f->base; } @@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, { struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i, ret; if (!exclusive) { @@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, /* Wait on any existing exclusive fence which isn't our own */ fence = reservation_object_get_excl(robj); if (fence && fence->context != context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(robj)); if (fence->context != context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work) mutex_lock(&gpu->lock); list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { - if (!fence_is_signaled(cmdbuf->fence)) + if (!dma_fence_is_signaled(cmdbuf->fence)) break; list_del(&cmdbuf->node); - fence_put(cmdbuf->fence); + dma_fence_put(cmdbuf->fence); for (i = 0; i < cmdbuf->nr_bos; i++) { struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; @@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) { - struct fence *fence; + struct dma_fence *fence; unsigned int event, i; int ret; @@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data) } while ((event = ffs(intr)) != 0) { - struct fence *fence; + struct dma_fence *fence; event -= 1; @@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data) fence = gpu->event[event].fence; gpu->event[event].fence = NULL; - fence_signal(fence); + dma_fence_signal(fence); /* * Events can be processed out of order. Eg, @@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, return ret; gpu->drm = drm; - gpu->fence_context = fence_context_alloc(1); + gpu->fence_context = dma_fence_context_alloc(1); spin_lock_init(&gpu->fence_spinlock); INIT_LIST_HEAD(&gpu->active_cmd_list); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 73c278dc3706..8c6b824e9d0a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -89,7 +89,7 @@ struct etnaviv_chip_identity { struct etnaviv_event { bool used; - struct fence *fence; + struct dma_fence *fence; }; struct etnaviv_cmdbuf; @@ -163,7 +163,7 @@ struct etnaviv_cmdbuf { /* vram node used if the cmdbuf is mapped through the MMUv2 */ struct drm_mm_node vram_node; /* fence after which this buffer is to be disposed */ - struct fence *fence; + struct dma_fence *fence; /* target exec state */ u32 exec_state; /* per GPU in-flight list */ diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 74ede1f53372..f9af2a00625e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -26,12 +26,12 @@ #include "i915_drv.h" -static const char *i915_fence_get_driver_name(struct fence *fence) +static const char *i915_fence_get_driver_name(struct dma_fence *fence) { return "i915"; } -static const char *i915_fence_get_timeline_name(struct fence *fence) +static const char *i915_fence_get_timeline_name(struct dma_fence *fence) { /* Timelines are bound by eviction to a VM. However, since * we only have a global seqno at the moment, we only have @@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence) return "global"; } -static bool i915_fence_signaled(struct fence *fence) +static bool i915_fence_signaled(struct dma_fence *fence) { return i915_gem_request_completed(to_request(fence)); } -static bool i915_fence_enable_signaling(struct fence *fence) +static bool i915_fence_enable_signaling(struct dma_fence *fence) { if (i915_fence_signaled(fence)) return false; @@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence) return true; } -static signed long i915_fence_wait(struct fence *fence, +static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout_jiffies) { @@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence, return timeout_jiffies; } -static void i915_fence_value_str(struct fence *fence, char *str, int size) +static void i915_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", fence->seqno); } -static void i915_fence_timeline_value_str(struct fence *fence, char *str, +static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", intel_engine_get_seqno(to_request(fence)->engine)); } -static void i915_fence_release(struct fence *fence) +static void i915_fence_release(struct dma_fence *fence) { struct drm_i915_gem_request *req = to_request(fence); kmem_cache_free(req->i915->requests, req); } -const struct fence_ops i915_fence_ops = { +const struct dma_fence_ops i915_fence_ops = { .get_driver_name = i915_fence_get_driver_name, .get_timeline_name = i915_fence_get_timeline_name, .enable_signaling = i915_fence_enable_signaling, @@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, * The reference count is incremented atomically. If it is zero, * the lookup knows the request is unallocated and complete. Otherwise, * it is either still in use, or has been reallocated and reset - * with fence_init(). This increment is safe for release as we check - * that the request we have a reference to and matches the active + * with dma_fence_init(). This increment is safe for release as we + * check that the request we have a reference to and matches the active * request. * * Before we increment the refcount, we chase the request->engine @@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, goto err; spin_lock_init(&req->lock); - fence_init(&req->fence, - &i915_fence_ops, - &req->lock, - engine->fence_context, - seqno); + dma_fence_init(&req->fence, + &i915_fence_ops, + &req->lock, + engine->fence_context, + seqno); i915_sw_fence_init(&req->submit, submit_notify); diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 974bd7bcc801..bceeaa3a5193 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -25,7 +25,7 @@ #ifndef I915_GEM_REQUEST_H #define I915_GEM_REQUEST_H -#include +#include #include "i915_gem.h" #include "i915_sw_fence.h" @@ -62,7 +62,7 @@ struct intel_signal_node { * The requests are reference counted. */ struct drm_i915_gem_request { - struct fence fence; + struct dma_fence fence; spinlock_t lock; /** On Which ring this request was generated */ @@ -145,9 +145,9 @@ struct drm_i915_gem_request { struct list_head execlist_link; }; -extern const struct fence_ops i915_fence_ops; +extern const struct dma_fence_ops i915_fence_ops; -static inline bool fence_is_i915(struct fence *fence) +static inline bool fence_is_i915(struct dma_fence *fence) { return fence->ops == &i915_fence_ops; } @@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req) } static inline struct drm_i915_gem_request * -to_request(struct fence *fence) +to_request(struct dma_fence *fence) { /* We assume that NULL fence/request are interoperable */ BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); @@ -183,19 +183,19 @@ to_request(struct fence *fence) static inline struct drm_i915_gem_request * i915_gem_request_get(struct drm_i915_gem_request *req) { - return to_request(fence_get(&req->fence)); + return to_request(dma_fence_get(&req->fence)); } static inline struct drm_i915_gem_request * i915_gem_request_get_rcu(struct drm_i915_gem_request *req) { - return to_request(fence_get_rcu(&req->fence)); + return to_request(dma_fence_get_rcu(&req->fence)); } static inline void i915_gem_request_put(struct drm_i915_gem_request *req) { - fence_put(&req->fence); + dma_fence_put(&req->fence); } static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, @@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) * compiler. * * The atomic operation at the heart of - * i915_gem_request_get_rcu(), see fence_get_rcu(), is + * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is * atomic_inc_not_zero() which is only a full memory barrier * when successful. That is, if i915_gem_request_get_rcu() * returns the request (and so with the reference counted diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 1e5cbc585ca2..8185002d7ec8 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include "i915_sw_fence.h" @@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, return pending; } -struct dma_fence_cb { - struct fence_cb base; +struct i915_sw_dma_fence_cb { + struct dma_fence_cb base; struct i915_sw_fence *fence; - struct fence *dma; + struct dma_fence *dma; struct timer_list timer; }; static void timer_i915_sw_fence_wake(unsigned long data) { - struct dma_fence_cb *cb = (struct dma_fence_cb *)data; + struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno); - fence_put(cb->dma); + dma_fence_put(cb->dma); cb->dma = NULL; i915_sw_fence_commit(cb->fence); cb->timer.function = NULL; } -static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data) +static void dma_i915_sw_fence_wake(struct dma_fence *dma, + struct dma_fence_cb *data) { - struct dma_fence_cb *cb = container_of(data, typeof(*cb), base); + struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); del_timer_sync(&cb->timer); if (cb->timer.function) i915_sw_fence_commit(cb->fence); - fence_put(cb->dma); + dma_fence_put(cb->dma); kfree(cb); } int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, - struct fence *dma, + struct dma_fence *dma, unsigned long timeout, gfp_t gfp) { - struct dma_fence_cb *cb; + struct i915_sw_dma_fence_cb *cb; int ret; - if (fence_is_signaled(dma)) + if (dma_fence_is_signaled(dma)) return 0; cb = kmalloc(sizeof(*cb), gfp); @@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, if (!gfpflags_allow_blocking(gfp)) return -ENOMEM; - return fence_wait(dma, false); + return dma_fence_wait(dma, false); } cb->fence = i915_sw_fence_get(fence); @@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, timer_i915_sw_fence_wake, (unsigned long)cb, TIMER_IRQSAFE); if (timeout) { - cb->dma = fence_get(dma); + cb->dma = dma_fence_get(dma); mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); } - ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); + ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); if (ret == 0) { ret = 1; } else { @@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, - const struct fence_ops *exclude, + const struct dma_fence_ops *exclude, bool write, unsigned long timeout, gfp_t gfp) { - struct fence *excl; + struct dma_fence *excl; int ret = 0, pending; if (write) { - struct fence **shared; + struct dma_fence **shared; unsigned int count, i; ret = reservation_object_get_fences_rcu(resv, @@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, } for (i = 0; i < count; i++) - fence_put(shared[i]); + dma_fence_put(shared[i]); kfree(shared); } else { excl = reservation_object_get_excl_rcu(resv); @@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, ret |= pending; } - fence_put(excl); + dma_fence_put(excl); return ret; } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 373141602ca4..cd239e92f67f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -16,8 +16,8 @@ #include struct completion; -struct fence; -struct fence_ops; +struct dma_fence; +struct dma_fence_ops; struct reservation_object; struct i915_sw_fence { @@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *after, wait_queue_t *wq); int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, - struct fence *dma, + struct dma_fence *dma, unsigned long timeout, gfp_t gfp); int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, - const struct fence_ops *exclude, + const struct dma_fence_ops *exclude, bool write, unsigned long timeout, gfp_t gfp); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 178798002a73..5c912c25f7d3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, __entry->ring = req->engine->id; __entry->seqno = req->fence.seqno; __entry->flags = flags; - fence_enable_sw_signaling(&req->fence); + dma_fence_enable_sw_signaling(&req->fence); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 23fc1042fed4..56efcc507ea2 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg) &request->signaling.wait); local_bh_disable(); - fence_signal(&request->fence); + dma_fence_signal(&request->fence); local_bh_enable(); /* kick start the tasklets */ /* Find the next oldest signal. Note that as we have @@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) struct rb_node *parent, **p; bool first, wakeup; - /* locked by fence_enable_sw_signaling() */ + /* locked by dma_fence_enable_sw_signaling() */ assert_spin_locked(&request->lock); request->signaling.wait.tsk = b->signaler; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2dc94812bea5..8cceb345aa0f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) INIT_LIST_HEAD(&engine->execlist_queue); spin_lock_init(&engine->execlist_lock); - engine->fence_context = fence_context_alloc(1); + engine->fence_context = dma_fence_context_alloc(1); intel_engine_init_requests(engine); intel_engine_init_hangcheck(engine); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d0da52f2a806..940bf4992fe2 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct fence *fence); + struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); void msm_gem_move_to_inactive(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index a9b9b1c95a2e..3f299c537b77 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -15,7 +15,7 @@ * this program. If not, see . */ -#include +#include #include "msm_drv.h" #include "msm_fence.h" @@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) fctx->dev = dev; fctx->name = name; - fctx->context = fence_context_alloc(1); + fctx->context = dma_fence_context_alloc(1); init_waitqueue_head(&fctx->event); spin_lock_init(&fctx->spinlock); @@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) struct msm_fence { struct msm_fence_context *fctx; - struct fence base; + struct dma_fence base; }; -static inline struct msm_fence *to_msm_fence(struct fence *fence) +static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) { return container_of(fence, struct msm_fence, base); } -static const char *msm_fence_get_driver_name(struct fence *fence) +static const char *msm_fence_get_driver_name(struct dma_fence *fence) { return "msm"; } -static const char *msm_fence_get_timeline_name(struct fence *fence) +static const char *msm_fence_get_timeline_name(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); return f->fctx->name; } -static bool msm_fence_enable_signaling(struct fence *fence) +static bool msm_fence_enable_signaling(struct dma_fence *fence) { return true; } -static bool msm_fence_signaled(struct fence *fence) +static bool msm_fence_signaled(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); return fence_completed(f->fctx, f->base.seqno); } -static void msm_fence_release(struct fence *fence) +static void msm_fence_release(struct dma_fence *fence) { struct msm_fence *f = to_msm_fence(fence); kfree_rcu(f, base.rcu); } -static const struct fence_ops msm_fence_ops = { +static const struct dma_fence_ops msm_fence_ops = { .get_driver_name = msm_fence_get_driver_name, .get_timeline_name = msm_fence_get_timeline_name, .enable_signaling = msm_fence_enable_signaling, .signaled = msm_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = msm_fence_release, }; -struct fence * +struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx) { struct msm_fence *f; @@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx) f->fctx = fctx; - fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, - fctx->context, ++fctx->last_fence); + dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, + fctx->context, ++fctx->last_fence); return &f->base; } diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index ceb5b3d314b4..56061aa1959d 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); -struct fence * msm_fence_alloc(struct msm_fence_context *fctx); +struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); #endif diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..57db7dbbb618 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i, ret; if (!exclusive) { @@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = reservation_object_get_excl(msm_obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(msm_obj->resv)); if (fence->context != fctx->context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, } void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct fence *fence) + struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); @@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS -static void describe_fence(struct fence *fence, const char *type, +static void describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!fence_is_signaled(fence)) + if (!dma_fence_is_signaled(fence)) seq_printf(m, "\t%9s: %s %s seq %u\n", type, fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), @@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b2f13cfe945e..2cb8551fda70 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -104,7 +104,7 @@ struct msm_gem_submit { struct list_head node; /* node in gpu submit_list */ struct list_head bo_list; struct ww_acquire_ctx ticket; - struct fence *fence; + struct dma_fence *fence; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ unsigned int nr_cmds; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b6a0f37a65f3..25e8786fa4ca 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, void msm_gem_submit_free(struct msm_gem_submit *submit) { - fence_put(submit->fence); + dma_fence_put(submit->fence); list_del(&submit->node); put_pid(submit->pid); kfree(submit); @@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; struct msm_gpu *gpu = priv->gpu; - struct fence *in_fence = NULL; + struct dma_fence *in_fence = NULL; struct sync_file *sync_file = NULL; int out_fence_fd = -1; unsigned i; @@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, */ if (in_fence->context != gpu->fctx->context) { - ret = fence_wait(in_fence, true); + ret = dma_fence_wait(in_fence, true); if (ret) goto out; } @@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: if (in_fence) - fence_put(in_fence); + dma_fence_put(in_fence); submit_cleanup(submit); if (ret) msm_gem_submit_free(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..3249707e6834 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu) submit = list_first_entry(&gpu->submit_list, struct msm_gem_submit, node); - if (fence_is_signaled(submit->fence)) { + if (dma_fence_is_signaled(submit->fence)) { retire_submit(gpu, submit); } else { break; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 343b8659472c..ec8ac756aab4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) static void nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, - struct fence *fence) + struct dma_fence *fence) { struct nouveau_drm *drm = nouveau_drm(dev); if (tile) { spin_lock(&drm->tile.lock); - tile->fence = (struct nouveau_fence *)fence_get(fence); + tile->fence = (struct nouveau_fence *)dma_fence_get(fence); tile->used = false; spin_unlock(&drm->tile.lock); } @@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 4bb9ab892ae1..e9529ee6bc23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include @@ -38,11 +38,11 @@ #include "nouveau_dma.h" #include "nouveau_fence.h" -static const struct fence_ops nouveau_fence_ops_uevent; -static const struct fence_ops nouveau_fence_ops_legacy; +static const struct dma_fence_ops nouveau_fence_ops_uevent; +static const struct dma_fence_ops nouveau_fence_ops_legacy; static inline struct nouveau_fence * -from_fence(struct fence *fence) +from_fence(struct dma_fence *fence) { return container_of(fence, struct nouveau_fence, base); } @@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence) { int drop = 0; - fence_signal_locked(&fence->base); + dma_fence_signal_locked(&fence->base); list_del(&fence->head); rcu_assign_pointer(fence->channel, NULL); - if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { + if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { struct nouveau_fence_chan *fctx = nouveau_fctx(fence); if (!--fctx->notify_ref) drop = 1; } - fence_put(&fence->base); + dma_fence_put(&fence->base); return drop; } static struct nouveau_fence * -nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) { +nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) { struct nouveau_fence_priv *priv = (void*)drm->fence; if (fence->ops != &nouveau_fence_ops_legacy && @@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha struct nouveau_fence_work { struct work_struct work; - struct fence_cb cb; + struct dma_fence_cb cb; void (*func)(void *); void *data; }; @@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork) kfree(work); } -static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) +static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); @@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) } void -nouveau_fence_work(struct fence *fence, +nouveau_fence_work(struct dma_fence *fence, void (*func)(void *), void *data) { struct nouveau_fence_work *work; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto err; work = kmalloc(sizeof(*work), GFP_KERNEL); @@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence, work->func = func; work->data = data; - if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) + if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) goto err_free; return; @@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) fence->timeout = jiffies + (15 * HZ); if (priv->uevent) - fence_init(&fence->base, &nouveau_fence_ops_uevent, - &fctx->lock, fctx->context, ++fctx->sequence); + dma_fence_init(&fence->base, &nouveau_fence_ops_uevent, + &fctx->lock, fctx->context, ++fctx->sequence); else - fence_init(&fence->base, &nouveau_fence_ops_legacy, - &fctx->lock, fctx->context, ++fctx->sequence); + dma_fence_init(&fence->base, &nouveau_fence_ops_legacy, + &fctx->lock, fctx->context, ++fctx->sequence); kref_get(&fctx->fence_ref); - trace_fence_emit(&fence->base); + trace_dma_fence_emit(&fence->base); ret = fctx->emit(fence); if (!ret) { - fence_get(&fence->base); + dma_fence_get(&fence->base); spin_lock_irq(&fctx->lock); if (nouveau_fence_update(chan, fctx)) @@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence) struct nouveau_channel *chan; unsigned long flags; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return true; spin_lock_irqsave(&fctx->lock, flags); @@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence) nvif_notify_put(&fctx->notify); spin_unlock_irqrestore(&fctx->lock, flags); } - return fence_is_signaled(&fence->base); + return dma_fence_is_signaled(&fence->base); } static long -nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait) +nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) { struct nouveau_fence *fence = from_fence(f); unsigned long sleep_time = NSEC_PER_MSEC / 1000; @@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) if (!lazy) return nouveau_fence_wait_busy(fence, intr); - ret = fence_wait_timeout(&fence->base, intr, 15 * HZ); + ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); if (ret < 0) return ret; else if (!ret) @@ -391,7 +391,7 @@ int nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct fence *fence; + struct dma_fence *fence; struct reservation_object *resv = nvbo->bo.resv; struct reservation_object_list *fobj; struct nouveau_fence *f; @@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } if (must_wait) - ret = fence_wait(fence, intr); + ret = dma_fence_wait(fence, intr); return ret; } @@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } if (must_wait) - ret = fence_wait(fence, intr); + ret = dma_fence_wait(fence, intr); } return ret; @@ -456,7 +456,7 @@ void nouveau_fence_unref(struct nouveau_fence **pfence) { if (*pfence) - fence_put(&(*pfence)->base); + dma_fence_put(&(*pfence)->base); *pfence = NULL; } @@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, return ret; } -static const char *nouveau_fence_get_get_driver_name(struct fence *fence) +static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) { return "nouveau"; } -static const char *nouveau_fence_get_timeline_name(struct fence *f) +static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) * result. The drm node should still be there, so we can derive the index from * the fence context. */ -static bool nouveau_fence_is_signaled(struct fence *f) +static bool nouveau_fence_is_signaled(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f) return ret; } -static bool nouveau_fence_no_signaling(struct fence *f) +static bool nouveau_fence_no_signaling(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); @@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f) WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); /* - * This needs uevents to work correctly, but fence_add_callback relies on + * This needs uevents to work correctly, but dma_fence_add_callback relies on * being able to enable signaling. It will still get signaled eventually, * just not right away. */ if (nouveau_fence_is_signaled(f)) { list_del(&fence->head); - fence_put(&fence->base); + dma_fence_put(&fence->base); return false; } return true; } -static void nouveau_fence_release(struct fence *f) +static void nouveau_fence_release(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); kref_put(&fctx->fence_ref, nouveau_fence_context_put); - fence_free(&fence->base); + dma_fence_free(&fence->base); } -static const struct fence_ops nouveau_fence_ops_legacy = { +static const struct dma_fence_ops nouveau_fence_ops_legacy = { .get_driver_name = nouveau_fence_get_get_driver_name, .get_timeline_name = nouveau_fence_get_timeline_name, .enable_signaling = nouveau_fence_no_signaling, @@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = { .release = nouveau_fence_release }; -static bool nouveau_fence_enable_signaling(struct fence *f) +static bool nouveau_fence_enable_signaling(struct dma_fence *f) { struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); @@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f) ret = nouveau_fence_no_signaling(f); if (ret) - set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags); + set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); else if (!--fctx->notify_ref) nvif_notify_put(&fctx->notify); return ret; } -static const struct fence_ops nouveau_fence_ops_uevent = { +static const struct dma_fence_ops nouveau_fence_ops_uevent = { .get_driver_name = nouveau_fence_get_get_driver_name, .get_timeline_name = nouveau_fence_get_timeline_name, .enable_signaling = nouveau_fence_enable_signaling, .signaled = nouveau_fence_is_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = NULL }; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64c4ce7115ad..41f3c019e534 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -1,14 +1,14 @@ #ifndef __NOUVEAU_FENCE_H__ #define __NOUVEAU_FENCE_H__ -#include +#include #include struct nouveau_drm; struct nouveau_bo; struct nouveau_fence { - struct fence base; + struct dma_fence base; struct list_head head; @@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); bool nouveau_fence_done(struct nouveau_fence *); -void nouveau_fence_work(struct fence *, void (*)(void *), void *); +void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0bd7164bc817..7f083c95f422 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct reservation_object *resv = nvbo->bo.resv; struct reservation_object_list *fobj; - struct fence *fence = NULL; + struct dma_fence *fence = NULL; fobj = reservation_object_get_list(resv); diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 1915b7b82a59..fa8f2375c398 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c @@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv04_fence_context_new; priv->base.context_del = nv04_fence_context_del; priv->base.contexts = 15; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 4e3de34ff6f4..f99fcf56928a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c @@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv10_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 31; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 7d5e562a55c5..79bc01111351 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv17_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 31; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 4d6f202b7770..8c5295414578 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv50_fence_context_new; priv->base.context_del = nv10_fence_context_del; priv->base.contexts = 127; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); spin_lock_init(&priv->lock); ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 18bde9d8e6d6..23ef04b4e0b2 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv84_fence_context_del; priv->base.contexts = fifo->nr; - priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); priv->base.uevent = true; /* Use VRAM if there is any ; otherwise fallback to system memory */ diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 5f3e5ad99de7..84995ebc6ffc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -31,7 +31,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ -#include +#include #include #include #include @@ -190,7 +190,7 @@ enum { * spice-protocol/qxl_dev.h */ #define QXL_MAX_RES 96 struct qxl_release { - struct fence base; + struct dma_fence base; int id; int type; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cd83f050cf3e..50b4e522f05f 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -21,7 +21,7 @@ */ #include "qxl_drv.h" #include "qxl_object.h" -#include +#include /* * drawable cmd cache - allocate a bunch of VRAM pages, suballocate @@ -40,23 +40,24 @@ static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; -static const char *qxl_get_driver_name(struct fence *fence) +static const char *qxl_get_driver_name(struct dma_fence *fence) { return "qxl"; } -static const char *qxl_get_timeline_name(struct fence *fence) +static const char *qxl_get_timeline_name(struct dma_fence *fence) { return "release"; } -static bool qxl_nop_signaling(struct fence *fence) +static bool qxl_nop_signaling(struct dma_fence *fence) { /* fences are always automatically signaled, so just pretend we did this.. */ return true; } -static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) +static long qxl_fence_wait(struct dma_fence *fence, bool intr, + signed long timeout) { struct qxl_device *qdev; struct qxl_release *release; @@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) retry: sc++; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; qxl_io_notify_oom(qdev); @@ -80,11 +81,11 @@ retry: if (!qxl_queue_garbage_collect(qdev, true)) break; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; } - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) goto signaled; if (have_drawable_releases || sc < 4) { @@ -96,9 +97,9 @@ retry: return 0; if (have_drawable_releases && sc > 300) { - FENCE_WARN(fence, "failed to wait on release %llu " - "after spincount %d\n", - fence->context & ~0xf0000000, sc); + DMA_FENCE_WARN(fence, "failed to wait on release %llu " + "after spincount %d\n", + fence->context & ~0xf0000000, sc); goto signaled; } goto retry; @@ -115,7 +116,7 @@ signaled: return end - cur; } -static const struct fence_ops qxl_fence_ops = { +static const struct dma_fence_ops qxl_fence_ops = { .get_driver_name = qxl_get_driver_name, .get_timeline_name = qxl_get_timeline_name, .enable_signaling = qxl_nop_signaling, @@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev, WARN_ON(list_empty(&release->bos)); qxl_release_free_list(release); - fence_signal(&release->base); - fence_put(&release->base); + dma_fence_signal(&release->base); + dma_fence_put(&release->base); } else { qxl_release_free_list(release); kfree(release); @@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) * Since we never really allocated a context and we don't want to conflict, * set the highest bits. This will break if we really allow exporting of dma-bufs. */ - fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, - release->id | 0xf0000000, release->base.seqno); - trace_fence_emit(&release->base); + dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, + release->id | 0xf0000000, release->base.seqno); + trace_dma_fence_emit(&release->base); driver = bdev->driver; glob = bo->glob; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1b0dcad916b0..44e0c5ed6418 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -66,7 +66,7 @@ #include #include #include -#include +#include #include #include @@ -367,7 +367,7 @@ struct radeon_fence_driver { }; struct radeon_fence { - struct fence base; + struct dma_fence base; struct radeon_device *rdev; uint64_t seq; @@ -746,7 +746,7 @@ struct radeon_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; - struct fence *fence; + struct dma_fence *fence; bool async; }; @@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); /* * Cast helper */ -extern const struct fence_ops radeon_fence_ops; +extern const struct dma_fence_ops radeon_fence_ops; -static inline struct radeon_fence *to_radeon_fence(struct fence *f) +static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f) { struct radeon_fence *__f = container_of(f, struct radeon_fence, base); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index eb92aef46e3c..36b7ac7e57e5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev, for (i = 0; i < RADEON_NUM_RINGS; i++) { rdev->ring[i].idx = i; } - rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); + rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", radeon_family_name[rdev->family], pdev->vendor, pdev->device, diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index cdb8cb568c15..e7409e8a9f87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work) down_read(&rdev->exclusive_lock); } } else - r = fence_wait(work->fence, false); + r = dma_fence_wait(work->fence, false); if (r) DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); @@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work) * confused about which BO the CRTC is scanning out */ - fence_put(work->fence); + dma_fence_put(work->fence); work->fence = NULL; } @@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); + work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); @@ -617,7 +617,7 @@ pflip_cleanup: cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - fence_put(work->fence); + dma_fence_put(work->fence); kfree(work); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ef075acde9c..ef09f0a63754 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev, (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; (*fence)->ring = ring; (*fence)->is_vm_update = false; - fence_init(&(*fence)->base, &radeon_fence_ops, - &rdev->fence_queue.lock, rdev->fence_context + ring, seq); + dma_fence_init(&(*fence)->base, &radeon_fence_ops, + &rdev->fence_queue.lock, + rdev->fence_context + ring, + seq); radeon_fence_ring_emit(rdev, ring, *fence); trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); radeon_fence_schedule_check(rdev, ring); @@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl */ seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); if (seq >= fence->seq) { - int ret = fence_signal_locked(&fence->base); + int ret = dma_fence_signal_locked(&fence->base); if (!ret) - FENCE_TRACE(&fence->base, "signaled from irq context\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); else - FENCE_TRACE(&fence->base, "was already signaled\n"); + DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); - fence_put(&fence->base); + dma_fence_put(&fence->base); } else - FENCE_TRACE(&fence->base, "pending\n"); + DMA_FENCE_TRACE(&fence->base, "pending\n"); return 0; } @@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, return false; } -static bool radeon_fence_is_signaled(struct fence *f) +static bool radeon_fence_is_signaled(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; @@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f) * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ -static bool radeon_fence_enable_signaling(struct fence *f) +static bool radeon_fence_enable_signaling(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; @@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f) fence->fence_wake.private = NULL; fence->fence_wake.func = radeon_fence_check_signaled; __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); - fence_get(f); + dma_fence_get(f); - FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); return true; } @@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence) if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { int ret; - ret = fence_signal(&fence->base); + ret = dma_fence_signal(&fence->base); if (!ret) - FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); return true; } return false; @@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo * exclusive_lock is not held in that case. */ if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) - return fence_wait(&fence->base, intr); + return dma_fence_wait(&fence->base, intr); seq[fence->ring] = fence->seq; r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); @@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo return r; } - r_sig = fence_signal(&fence->base); + r_sig = dma_fence_signal(&fence->base); if (!r_sig) - FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); + DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); return r; } @@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) */ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) { - fence_get(&fence->base); + dma_fence_get(&fence->base); return fence; } @@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence) *fence = NULL; if (tmp) { - fence_put(&tmp->base); + dma_fence_put(&tmp->base); } } @@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev) #endif } -static const char *radeon_fence_get_driver_name(struct fence *fence) +static const char *radeon_fence_get_driver_name(struct dma_fence *fence) { return "radeon"; } -static const char *radeon_fence_get_timeline_name(struct fence *f) +static const char *radeon_fence_get_timeline_name(struct dma_fence *f) { struct radeon_fence *fence = to_radeon_fence(f); switch (fence->ring) { @@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f) static inline bool radeon_test_signaled(struct radeon_fence *fence) { - return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } struct radeon_wait_cb { - struct fence_cb base; + struct dma_fence_cb base; struct task_struct *task; }; static void -radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct radeon_wait_cb *wait = container_of(cb, struct radeon_wait_cb, base); @@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) wake_up_process(wait->task); } -static signed long radeon_fence_default_wait(struct fence *f, bool intr, +static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); @@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, cb.task = current; - if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) return t; while (t > 0) { @@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr, } __set_current_state(TASK_RUNNING); - fence_remove_callback(f, &cb.base); + dma_fence_remove_callback(f, &cb.base); return t; } -const struct fence_ops radeon_fence_ops = { +const struct dma_fence_ops radeon_fence_ops = { .get_driver_name = radeon_fence_get_driver_name, .get_timeline_name = radeon_fence_get_timeline_name, .enable_signaling = radeon_fence_enable_signaling, diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 02ac8a1de4ff..be5d7a38d3aa 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev, bool shared) { struct reservation_object_list *flist; - struct fence *f; + struct dma_fence *f; struct radeon_fence *fence; unsigned i; int r = 0; @@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev, if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else if (f) - r = fence_wait(f, true); + r = dma_fence_wait(f, true); flist = reservation_object_get_list(resv); if (shared || !flist || r) @@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev, if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else - r = fence_wait(f, true); + r = dma_fence_wait(f, true); if (r) break; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 0cd0e7bdee55..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; - struct fence *f; + struct dma_fence *f; void *ptr; int i, r; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index fc6217dfe401..915e0d1c316a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); - fence_put(bo->moving); + dma_fence_put(bo->moving); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i; fobj = reservation_object_get_list(bo->resv); fence = reservation_object_get_excl(bo->resv); if (fence && !fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); for (i = 0; fobj && i < fobj->shared_count; ++i) { fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(bo->resv)); if (!fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); } } @@ -792,11 +792,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { - struct fence *fence; + struct dma_fence *fence; int ret; spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { @@ -806,7 +806,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - fence_put(bo->moving); + dma_fence_put(bo->moving); bo->moving = fence; } @@ -1286,7 +1286,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; - struct fence *fence; + struct dma_fence *fence; int ret; /* @@ -1309,12 +1309,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_unlock(&glob->lru_lock); spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { - ret = fence_wait(fence, false); - fence_put(fence); + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); if (ret) { if (allow_errors) { return ret; @@ -1343,7 +1343,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } - fence_put(man->move); + dma_fence_put(man->move); man->use_type = false; man->has_type = false; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bf6e21655c57..d0459b392e5e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct fence *fence, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem) { @@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * operation has completed. */ - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct fence *fence, bool evict, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * operation has completed. */ - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, */ spin_lock(&from->move_lock); - if (!from->move || fence_is_later(fence, from->move)) { - fence_put(from->move); - from->move = fence_get(fence); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); } spin_unlock(&from->move_lock); ttm_bo_free_old_node(bo); - fence_put(bo->moving); - bo->moving = fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); } else { /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a6ed9d5e5167..4748aedc933a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Quick non-stalling check for idle. */ - if (fence_is_signaled(bo->moving)) + if (dma_fence_is_signaled(bo->moving)) goto out_clear; /* @@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; up_read(&vma->vm_mm->mmap_sem); - (void) fence_wait(bo->moving, true); + (void) dma_fence_wait(bo->moving, true); goto out_unlock; } /* * Ordinary wait. */ - ret = fence_wait(bo->moving, true); + ret = dma_fence_wait(bo->moving, true); if (unlikely(ret != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; @@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, } out_clear: - fence_put(bo->moving); + dma_fence_put(bo->moving); bo->moving = NULL; out_unlock: diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index a80717b35dc6..d35bc491e8de 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, struct fence *fence) + struct list_head *list, + struct dma_fence *fence) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 5c57c1ffa1f9..488909a21ed8 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -28,56 +28,57 @@ #define VGEM_FENCE_TIMEOUT (10*HZ) struct vgem_fence { - struct fence base; + struct dma_fence base; struct spinlock lock; struct timer_list timer; }; -static const char *vgem_fence_get_driver_name(struct fence *fence) +static const char *vgem_fence_get_driver_name(struct dma_fence *fence) { return "vgem"; } -static const char *vgem_fence_get_timeline_name(struct fence *fence) +static const char *vgem_fence_get_timeline_name(struct dma_fence *fence) { return "unbound"; } -static bool vgem_fence_signaled(struct fence *fence) +static bool vgem_fence_signaled(struct dma_fence *fence) { return false; } -static bool vgem_fence_enable_signaling(struct fence *fence) +static bool vgem_fence_enable_signaling(struct dma_fence *fence) { return true; } -static void vgem_fence_release(struct fence *base) +static void vgem_fence_release(struct dma_fence *base) { struct vgem_fence *fence = container_of(base, typeof(*fence), base); del_timer_sync(&fence->timer); - fence_free(&fence->base); + dma_fence_free(&fence->base); } -static void vgem_fence_value_str(struct fence *fence, char *str, int size) +static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", fence->seqno); } -static void vgem_fence_timeline_value_str(struct fence *fence, char *str, +static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); + snprintf(str, size, "%u", + dma_fence_is_signaled(fence) ? fence->seqno : 0); } -static const struct fence_ops vgem_fence_ops = { +static const struct dma_fence_ops vgem_fence_ops = { .get_driver_name = vgem_fence_get_driver_name, .get_timeline_name = vgem_fence_get_timeline_name, .enable_signaling = vgem_fence_enable_signaling, .signaled = vgem_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = vgem_fence_release, .fence_value_str = vgem_fence_value_str, @@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data) { struct vgem_fence *fence = (struct vgem_fence *)data; - fence_signal(&fence->base); + dma_fence_signal(&fence->base); } -static struct fence *vgem_fence_create(struct vgem_file *vfile, - unsigned int flags) +static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, + unsigned int flags) { struct vgem_fence *fence; @@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile, return NULL; spin_lock_init(&fence->lock); - fence_init(&fence->base, &vgem_fence_ops, &fence->lock, - fence_context_alloc(1), 1); + dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, + dma_fence_context_alloc(1), 1); setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); @@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, struct vgem_file *vfile = file->driver_priv; struct reservation_object *resv; struct drm_gem_object *obj; - struct fence *fence; + struct dma_fence *fence; int ret; if (arg->flags & ~VGEM_FENCE_WRITE) @@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, } err_fence: if (ret) { - fence_signal(fence); - fence_put(fence); + dma_fence_signal(fence); + dma_fence_put(fence); } err: drm_gem_object_unreference_unlocked(obj); @@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, { struct vgem_file *vfile = file->driver_priv; struct drm_vgem_fence_signal *arg = data; - struct fence *fence; + struct dma_fence *fence; int ret = 0; if (arg->flags) @@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, if (IS_ERR(fence)) return PTR_ERR(fence); - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) ret = -ETIMEDOUT; - fence_signal(fence); - fence_put(fence); + dma_fence_signal(fence); + dma_fence_put(fence); return ret; } @@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile) static int __vgem_fence_idr_fini(int id, void *p, void *data) { - fence_signal(p); - fence_put(p); + dma_fence_signal(p); + dma_fence_put(p); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ae59080d63d1..ec1ebdcfe80b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver { }; struct virtio_gpu_fence { - struct fence f; + struct dma_fence f; struct virtio_gpu_fence_driver *drv; struct list_head node; uint64_t seq; diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f3f70fa8a4c7..23353521f903 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -26,22 +26,22 @@ #include #include "virtgpu_drv.h" -static const char *virtio_get_driver_name(struct fence *f) +static const char *virtio_get_driver_name(struct dma_fence *f) { return "virtio_gpu"; } -static const char *virtio_get_timeline_name(struct fence *f) +static const char *virtio_get_timeline_name(struct dma_fence *f) { return "controlq"; } -static bool virtio_enable_signaling(struct fence *f) +static bool virtio_enable_signaling(struct dma_fence *f) { return true; } -static bool virtio_signaled(struct fence *f) +static bool virtio_signaled(struct dma_fence *f) { struct virtio_gpu_fence *fence = to_virtio_fence(f); @@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f) return false; } -static void virtio_fence_value_str(struct fence *f, char *str, int size) +static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); snprintf(str, size, "%llu", fence->seq); } -static void virtio_timeline_value_str(struct fence *f, char *str, int size) +static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } -static const struct fence_ops virtio_fence_ops = { +static const struct dma_fence_ops virtio_fence_ops = { .get_driver_name = virtio_get_driver_name, .get_timeline_name = virtio_get_timeline_name, .enable_signaling = virtio_enable_signaling, .signaled = virtio_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .fence_value_str = virtio_fence_value_str, .timeline_value_str = virtio_timeline_value_str, }; @@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, spin_lock_irqsave(&drv->lock, irq_flags); (*fence)->drv = drv; (*fence)->seq = ++drv->sync_seq; - fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, - drv->context, (*fence)->seq); - fence_get(&(*fence)->f); + dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, + drv->context, (*fence)->seq); + dma_fence_get(&(*fence)->f); list_add_tail(&(*fence)->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); @@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, list_for_each_entry_safe(fence, tmp, &drv->fences, node) { if (last_seq < fence->seq) continue; - fence_signal_locked(&fence->f); + dma_fence_signal_locked(&fence->f); list_del(&fence->node); - fence_put(&fence->f); + dma_fence_put(&fence->f); } spin_unlock_irqrestore(&drv->lock, irq_flags); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 818478b4c4f0..61f3a963af95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, /* fence the command bo */ virtio_gpu_unref_list(&validate_list); drm_free_large(buflist); - fence_put(&fence->f); + dma_fence_put(&fence->f); return 0; out_unresv: @@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, drm_gem_object_release(obj); if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } return ret; } @@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } return 0; fail_unref: if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - fence_put(&fence->f); + dma_fence_put(&fence->f); } //fail_obj: // drm_gem_object_handle_unreference_unlocked(obj); @@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, reservation_object_add_excl_fence(qobj->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); out_unres: virtio_gpu_object_unreserve(qobj); out: @@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, args->level, &box, &fence); reservation_object_add_excl_fence(qobj->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); } out_unres: diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 036b0fbae0fb..1235519853f4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); - vgdev->fence_drv.context = fence_context_alloc(1); + vgdev->fence_drv.context = dma_fence_context_alloc(1); spin_lock_init(&vgdev->fence_drv.lock); INIT_LIST_HEAD(&vgdev->fence_drv.fences); INIT_LIST_HEAD(&vgdev->cap_cache); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index ba28c0f6f28a..cb75f0663ba0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!ret) { reservation_object_add_excl_fence(bo->tbo.resv, &fence->f); - fence_put(&fence->f); + dma_fence_put(&fence->f); fence = NULL; virtio_gpu_object_unreserve(bo); virtio_gpu_object_wait(bo, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 26ac8e80a478..6541dd8b82dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence) * objects with actions attached to them. */ -static void vmw_fence_obj_destroy(struct fence *f) +static void vmw_fence_obj_destroy(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f) fence->destroy(fence); } -static const char *vmw_fence_get_driver_name(struct fence *f) +static const char *vmw_fence_get_driver_name(struct dma_fence *f) { return "vmwgfx"; } -static const char *vmw_fence_get_timeline_name(struct fence *f) +static const char *vmw_fence_get_timeline_name(struct dma_fence *f) { return "svga"; } -static bool vmw_fence_enable_signaling(struct fence *f) +static bool vmw_fence_enable_signaling(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f) } struct vmwgfx_wait_cb { - struct fence_cb base; + struct dma_fence_cb base; struct task_struct *task; }; static void -vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) +vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct vmwgfx_wait_cb *wait = container_of(cb, struct vmwgfx_wait_cb, base); @@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) static void __vmw_fences_update(struct vmw_fence_manager *fman); -static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) +static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); @@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) while (ret > 0) { __vmw_fences_update(fman); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) break; if (intr) @@ -225,7 +225,7 @@ out: return ret; } -static struct fence_ops vmw_fence_ops = { +static struct dma_fence_ops vmw_fence_ops = { .get_driver_name = vmw_fence_get_driver_name, .get_timeline_name = vmw_fence_get_timeline_name, .enable_signaling = vmw_fence_enable_signaling, @@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) fman->event_fence_action_size = ttm_round_pot(sizeof(struct vmw_event_fence_action)); mutex_init(&fman->goal_irq_mutex); - fman->ctx = fence_context_alloc(1); + fman->ctx = dma_fence_context_alloc(1); return fman; } @@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman, unsigned long irq_flags; int ret = 0; - fence_init(&fence->base, &vmw_fence_ops, &fman->lock, - fman->ctx, seqno); + dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, + fman->ctx, seqno); INIT_LIST_HEAD(&fence->seq_passed_actions); fence->destroy = destroy; @@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) u32 goal_seqno; u32 *fifo_mem; - if (fence_is_signaled_locked(&fence->base)) + if (dma_fence_is_signaled_locked(&fence->base)) return false; fifo_mem = fman->dev_priv->mmio_virt; @@ -459,7 +459,7 @@ rerun: list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { list_del_init(&fence->head); - fence_signal_locked(&fence->base); + dma_fence_signal_locked(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); @@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return 1; vmw_fences_update(fman); - return fence_is_signaled(&fence->base); + return dma_fence_is_signaled(&fence->base); } int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, bool interruptible, unsigned long timeout) { - long ret = fence_wait_timeout(&fence->base, interruptible, timeout); + long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); if (likely(ret > 0)) return 0; @@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) static void vmw_fence_destroy(struct vmw_fence_obj *fence) { - fence_free(&fence->base); + dma_fence_free(&fence->base); } int vmw_fence_create(struct vmw_fence_manager *fman, @@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) struct vmw_fence_obj *fence = list_entry(fman->fence_list.prev, struct vmw_fence_obj, head); - fence_get(&fence->base); + dma_fence_get(&fence->base); spin_unlock_irq(&fman->lock); ret = vmw_fence_obj_wait(fence, false, false, @@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) if (unlikely(ret != 0)) { list_del_init(&fence->head); - fence_signal(&fence->base); + dma_fence_signal(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); @@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) } BUG_ON(!list_empty(&fence->head)); - fence_put(&fence->base); + dma_fence_put(&fence->base); spin_lock_irq(&fman->lock); } spin_unlock_irq(&fman->lock); @@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, spin_lock_irqsave(&fman->lock, irq_flags); fman->pending_actions[action->type]++; - if (fence_is_signaled_locked(&fence->base)) { + if (dma_fence_is_signaled_locked(&fence->base)) { struct list_head action_list; INIT_LIST_HEAD(&action_list); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 83ae301ee141..d9d85aa6ed20 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -27,7 +27,7 @@ #ifndef _VMWGFX_FENCE_H_ -#include +#include #define VMW_FENCE_WAIT_TIMEOUT (5*HZ) @@ -52,7 +52,7 @@ struct vmw_fence_action { }; struct vmw_fence_obj { - struct fence base; + struct dma_fence base; struct list_head head; struct list_head seq_passed_actions; @@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) *fence_p = NULL; if (fence) - fence_put(&fence->base); + dma_fence_put(&fence->base); } static inline struct vmw_fence_obj * vmw_fence_obj_reference(struct vmw_fence_obj *fence) { if (fence) - fence_get(&fence->base); + dma_fence_get(&fence->base); return fence; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1a85fb2d4dc6..8e86d6d4141b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, if (fence == NULL) { vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); reservation_object_add_excl_fence(bo->resv, &fence->base); - fence_put(&fence->base); + dma_fence_put(&fence->base); } else reservation_object_add_excl_fence(bo->resv, &fence->base); } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 672644031bd5..e336e3901876 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -57,7 +57,7 @@ #include #include #include -#include +#include #include #include @@ -362,7 +362,7 @@ struct drm_ioctl_desc { struct drm_pending_event { struct completion *completion; struct drm_event *event; - struct fence *fence; + struct dma_fence *fence; struct list_head link; struct list_head pending_link; struct drm_file *file_priv; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index bc860cfc67ca..fa1aa214c8ea 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -54,7 +54,7 @@ struct drm_mode_set; struct drm_file; struct drm_clip_rect; struct device_node; -struct fence; +struct dma_fence; struct edid; static inline int64_t U642I64(uint64_t val) diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 98b39d66eb32..c5e8a0df1623 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -59,7 +59,7 @@ struct drm_plane_state { struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ - struct fence *fence; + struct dma_fence *fence; /* Signed dest location allows it to be partially off screen */ int32_t crtc_x, crtc_y; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 9eb940d6755f..5beae7969bf7 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -209,7 +209,7 @@ struct ttm_buffer_object { * Members protected by a bo reservation. */ - struct fence *moving; + struct dma_fence *moving; struct drm_vma_offset_node vma_node; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 4f0a92185995..27e9c26c9150 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -303,7 +303,7 @@ struct ttm_mem_type_manager { /* * Protected by @move_lock. */ - struct fence *move; + struct dma_fence *move; }; /** @@ -1025,7 +1025,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct fence *fence, bool evict, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem); /** @@ -1040,7 +1040,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * immediately or hang it on a temporary buffer object. */ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct fence *fence, bool evict, + struct dma_fence *fence, bool evict, struct ttm_mem_reg *new_mem); /** diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index b620c317c772..47f35b8e6d09 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, struct list_head *list, - struct fence *fence); + struct dma_fence *fence); #endif diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include struct device; @@ -143,7 +143,7 @@ struct dma_buf { wait_queue_head_t poll; struct dma_buf_poll_cb_t { - struct fence_cb cb; + struct dma_fence_cb cb; wait_queue_head_t *poll; unsigned long active; diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h @@ -0,0 +1,86 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan + * Christian König + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_ARRAY_H +#define __LINUX_DMA_FENCE_ARRAY_H + +#include + +/** + * struct dma_fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +/** + * struct dma_fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct dma_fence_array { + struct dma_fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct dma_fence **fences; +}; + +extern const struct dma_fence_ops dma_fence_array_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subsclass + * @fence: fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * to_dma_fence_array - cast a fence to a dma_fence_array + * @fence: fence to cast to a dma_fence_array + * + * Returns NULL if the fence is not a dma_fence_array, + * or the dma_fence_array otherwise. + */ +static inline struct dma_fence_array * +to_dma_fence_array(struct dma_fence *fence) +{ + if (fence->ops != &dma_fence_array_ops) + return NULL; + + return container_of(fence, struct dma_fence_array, base); +} + +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_DMA_FENCE_ARRAY_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h new file mode 100644 index 000000000000..ba60c043a5d3 --- /dev/null +++ b/include/linux/dma-fence.h @@ -0,0 +1,437 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_H +#define __LINUX_DMA_FENCE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct dma_fence; +struct dma_fence_ops; +struct dma_fence_cb; + +/** + * struct dma_fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: dma_fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * dma_fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of DMA_FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * dma_fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called + * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but dma_fence_signal was called right + * before this bit was set, it would have been able to set the + * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after dma_fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct dma_fence { + struct kref refcount; + const struct dma_fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + u64 context; + unsigned seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*dma_fence_func_t)(struct dma_fence *fence, + struct dma_fence_cb *cb); + +/** + * struct dma_fence_cb - callback for dma_fence_add_callback + * @node: used by dma_fence_add_callback to append this struct to fence::cb_list + * @func: dma_fence_func_t to call + * + * This struct will be initialized by dma_fence_add_callback, additional + * data can be passed along by embedding dma_fence_cb in another struct. + */ +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +/** + * struct dma_fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or dma_fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling dma_fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after dma_fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean dma_fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to dma_fence_default_wait for default implementation. + * the dma_fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct dma_fence_ops { + const char * (*get_driver_name)(struct dma_fence *fence); + const char * (*get_timeline_name)(struct dma_fence *fence); + bool (*enable_signaling)(struct dma_fence *fence); + bool (*signaled)(struct dma_fence *fence); + signed long (*wait)(struct dma_fence *fence, + bool intr, signed long timeout); + void (*release)(struct dma_fence *fence); + + int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); + void (*fence_value_str)(struct dma_fence *fence, char *str, int size); + void (*timeline_value_str)(struct dma_fence *fence, + char *str, int size); +}; + +void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno); + +void dma_fence_release(struct kref *kref); +void dma_fence_free(struct dma_fence *fence); + +/** + * dma_fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void dma_fence_put(struct dma_fence *fence) +{ + if (fence) + kref_put(&fence->refcount, dma_fence_release); +} + +/** + * dma_fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * dma_fence_get_rcu - get a fence from a reservation_object_list with + * rcu read lock + * @fence: [in] fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence + * @fence: [in] pointer to fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + * This function handles acquiring a reference to a fence that may be + * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), + * so long as the caller is using RCU on the pointer to the fence. + * + * An alternative mechanism is to employ a seqlock to protect a bunch of + * fences, such as used by struct reservation_object. When using a seqlock, + * the seqlock must be taken before and checked after a reference to the + * fence is acquired (as shown here). + * + * The caller is required to hold the RCU read lock. + */ +static inline struct dma_fence * +dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +{ + do { + struct dma_fence *fence; + + fence = rcu_dereference(*fencep); + if (!fence || !dma_fence_get_rcu(fence)) + return NULL; + + /* The atomic_inc_not_zero() inside dma_fence_get_rcu() + * provides a full memory barrier upon success (such as now). + * This is paired with the write barrier from assigning + * to the __rcu protected fence pointer so that if that + * pointer still matches the current fence, we know we + * have successfully acquire a reference to it. If it no + * longer matches, we are holding a reference to some other + * reallocated pointer. This is possible if the allocator + * is using a freelist like SLAB_DESTROY_BY_RCU where the + * fence remains valid for the RCU grace period, but it + * may be reallocated. When using such allocators, we are + * responsible for ensuring the reference we get is to + * the right fence, as below. + */ + if (fence == rcu_access_pointer(*fencep)) + return rcu_pointer_handoff(fence); + + dma_fence_put(fence); + } while (1); +} + +int dma_fence_signal(struct dma_fence *fence); +int dma_fence_signal_locked(struct dma_fence *fence); +signed long dma_fence_default_wait(struct dma_fence *fence, + bool intr, signed long timeout); +int dma_fence_add_callback(struct dma_fence *fence, + struct dma_fence_cb *cb, + dma_fence_func_t func); +bool dma_fence_remove_callback(struct dma_fence *fence, + struct dma_fence_cb *cb); +void dma_fence_enable_sw_signaling(struct dma_fence *fence); + +/** + * dma_fence_is_signaled_locked - Return an indication if the fence + * is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +dma_fence_is_signaled_locked(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback, dma_fence_wait or + * dma_fence_enable_sw_signaling haven't been called before. + * + * It's recommended for seqno fences to call dma_fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +dma_fence_is_signaled(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool dma_fence_is_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return (int)(f1->seqno - f2->seqno) > 0; +} + +/** + * dma_fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never + * have been set if enable_signaling wasn't called, and enabling that + * here is overkill. + */ + if (dma_fence_is_later(f1, f2)) + return dma_fence_is_signaled(f1) ? NULL : f1; + else + return dma_fence_is_signaled(f2) ? NULL : f2; +} + +signed long dma_fence_wait_timeout(struct dma_fence *, + bool intr, signed long timeout); +signed long dma_fence_wait_any_timeout(struct dma_fence **fences, + uint32_t count, + bool intr, signed long timeout); + +/** + * dma_fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) +{ + signed long ret; + + /* Since dma_fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +u64 dma_fence_context_alloc(unsigned num); + +#define DMA_FENCE_TRACE(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define DMA_FENCE_WARN(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define DMA_FENCE_ERR(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index 9ea2bde10ac1..000000000000 --- a/include/linux/fence-array.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * fence-array: aggregates fence to be waited together - * - * Copyright (C) 2016 Collabora Ltd - * Copyright (C) 2016 Advanced Micro Devices, Inc. - * Authors: - * Gustavo Padovan - * Christian König - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_ARRAY_H -#define __LINUX_FENCE_ARRAY_H - -#include - -/** - * struct fence_array_cb - callback helper for fence array - * @cb: fence callback structure for signaling - * @array: reference to the parent fence array object - */ -struct fence_array_cb { - struct fence_cb cb; - struct fence_array *array; -}; - -/** - * struct fence_array - fence to represent an array of fences - * @base: fence base class - * @lock: spinlock for fence handling - * @num_fences: number of fences in the array - * @num_pending: fences in the array still pending - * @fences: array of the fences - */ -struct fence_array { - struct fence base; - - spinlock_t lock; - unsigned num_fences; - atomic_t num_pending; - struct fence **fences; -}; - -extern const struct fence_ops fence_array_ops; - -/** - * fence_is_array - check if a fence is from the array subsclass - * @fence: fence to test - * - * Return true if it is a fence_array and false otherwise. - */ -static inline bool fence_is_array(struct fence *fence) -{ - return fence->ops == &fence_array_ops; -} - -/** - * to_fence_array - cast a fence to a fence_array - * @fence: fence to cast to a fence_array - * - * Returns NULL if the fence is not a fence_array, - * or the fence_array otherwise. - */ -static inline struct fence_array *to_fence_array(struct fence *fence) -{ - if (fence->ops != &fence_array_ops) - return NULL; - - return container_of(fence, struct fence_array, base); -} - -struct fence_array *fence_array_create(int num_fences, struct fence **fences, - u64 context, unsigned seqno, - bool signal_on_any); - -#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h deleted file mode 100644 index c9c5ba98c302..000000000000 --- a/include/linux/fence.h +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Fence mechanism for dma-buf to allow for asynchronous dma access - * - * Copyright (C) 2012 Canonical Ltd - * Copyright (C) 2012 Texas Instruments - * - * Authors: - * Rob Clark - * Maarten Lankhorst - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __LINUX_FENCE_H -#define __LINUX_FENCE_H - -#include -#include -#include -#include -#include -#include -#include -#include - -struct fence; -struct fence_ops; -struct fence_cb; - -/** - * struct fence - software synchronization primitive - * @refcount: refcount for this fence - * @ops: fence_ops associated with this fence - * @rcu: used for releasing fence with kfree_rcu - * @cb_list: list of all callbacks to call - * @lock: spin_lock_irqsave used for locking - * @context: execution context this fence belongs to, returned by - * fence_context_alloc() - * @seqno: the sequence number of this fence inside the execution context, - * can be compared to decide which fence would be signaled later. - * @flags: A mask of FENCE_FLAG_* defined below - * @timestamp: Timestamp when the fence was signaled. - * @status: Optional, only valid if < 0, must be set before calling - * fence_signal, indicates that the fence has completed with an error. - * - * the flags member must be manipulated and read using the appropriate - * atomic ops (bit_*), so taking the spinlock will not be needed most - * of the time. - * - * FENCE_FLAG_SIGNALED_BIT - fence is already signaled - * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* - * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the - * implementer of the fence for its own purposes. Can be used in different - * ways by different fence implementers, so do not rely on this. - * - * Since atomic bitops are used, this is not guaranteed to be the case. - * Particularly, if the bit was set, but fence_signal was called right - * before this bit was set, it would have been able to set the - * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. - * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting - * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that - * after fence_signal was called, any enable_signaling call will have either - * been completed, or never called at all. - */ -struct fence { - struct kref refcount; - const struct fence_ops *ops; - struct rcu_head rcu; - struct list_head cb_list; - spinlock_t *lock; - u64 context; - unsigned seqno; - unsigned long flags; - ktime_t timestamp; - int status; -}; - -enum fence_flag_bits { - FENCE_FLAG_SIGNALED_BIT, - FENCE_FLAG_ENABLE_SIGNAL_BIT, - FENCE_FLAG_USER_BITS, /* must always be last member */ -}; - -typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); - -/** - * struct fence_cb - callback for fence_add_callback - * @node: used by fence_add_callback to append this struct to fence::cb_list - * @func: fence_func_t to call - * - * This struct will be initialized by fence_add_callback, additional - * data can be passed along by embedding fence_cb in another struct. - */ -struct fence_cb { - struct list_head node; - fence_func_t func; -}; - -/** - * struct fence_ops - operations implemented for fence - * @get_driver_name: returns the driver name. - * @get_timeline_name: return the name of the context this fence belongs to. - * @enable_signaling: enable software signaling of fence. - * @signaled: [optional] peek whether the fence is signaled, can be null. - * @wait: custom wait implementation, or fence_default_wait. - * @release: [optional] called on destruction of fence, can be null - * @fill_driver_data: [optional] callback to fill in free-form debug info - * Returns amount of bytes filled, or -errno. - * @fence_value_str: [optional] fills in the value of the fence as a string - * @timeline_value_str: [optional] fills in the current value of the timeline - * as a string - * - * Notes on enable_signaling: - * For fence implementations that have the capability for hw->hw - * signaling, they can implement this op to enable the necessary - * irqs, or insert commands into cmdstream, etc. This is called - * in the first wait() or add_callback() path to let the fence - * implementation know that there is another driver waiting on - * the signal (ie. hw->sw case). - * - * This function can be called called from atomic context, but not - * from irq context, so normal spinlocks can be used. - * - * A return value of false indicates the fence already passed, - * or some failure occurred that made it impossible to enable - * signaling. True indicates successful enabling. - * - * fence->status may be set in enable_signaling, but only when false is - * returned. - * - * Calling fence_signal before enable_signaling is called allows - * for a tiny race window in which enable_signaling is called during, - * before, or after fence_signal. To fight this, it is recommended - * that before enable_signaling returns true an extra reference is - * taken on the fence, to be released when the fence is signaled. - * This will mean fence_signal will still be called twice, but - * the second time will be a noop since it was already signaled. - * - * Notes on signaled: - * May set fence->status if returning true. - * - * Notes on wait: - * Must not be NULL, set to fence_default_wait for default implementation. - * the fence_default_wait implementation should work for any fence, as long - * as enable_signaling works correctly. - * - * Must return -ERESTARTSYS if the wait is intr = true and the wait was - * interrupted, and remaining jiffies if fence has signaled, or 0 if wait - * timed out. Can also return other error values on custom implementations, - * which should be treated as if the fence is signaled. For example a hardware - * lockup could be reported like that. - * - * Notes on release: - * Can be NULL, this function allows additional commands to run on - * destruction of the fence. Can be called from irq context. - * If pointer is set to NULL, kfree will get called instead. - */ - -struct fence_ops { - const char * (*get_driver_name)(struct fence *fence); - const char * (*get_timeline_name)(struct fence *fence); - bool (*enable_signaling)(struct fence *fence); - bool (*signaled)(struct fence *fence); - signed long (*wait)(struct fence *fence, bool intr, signed long timeout); - void (*release)(struct fence *fence); - - int (*fill_driver_data)(struct fence *fence, void *data, int size); - void (*fence_value_str)(struct fence *fence, char *str, int size); - void (*timeline_value_str)(struct fence *fence, char *str, int size); -}; - -void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno); - -void fence_release(struct kref *kref); -void fence_free(struct fence *fence); - -/** - * fence_put - decreases refcount of the fence - * @fence: [in] fence to reduce refcount of - */ -static inline void fence_put(struct fence *fence) -{ - if (fence) - kref_put(&fence->refcount, fence_release); -} - -/** - * fence_get - increases refcount of the fence - * @fence: [in] fence to increase refcount of - * - * Returns the same fence, with refcount increased by 1. - */ -static inline struct fence *fence_get(struct fence *fence) -{ - if (fence) - kref_get(&fence->refcount); - return fence; -} - -/** - * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock - * @fence: [in] fence to increase refcount of - * - * Function returns NULL if no refcount could be obtained, or the fence. - */ -static inline struct fence *fence_get_rcu(struct fence *fence) -{ - if (kref_get_unless_zero(&fence->refcount)) - return fence; - else - return NULL; -} - -/** - * fence_get_rcu_safe - acquire a reference to an RCU tracked fence - * @fence: [in] pointer to fence to increase refcount of - * - * Function returns NULL if no refcount could be obtained, or the fence. - * This function handles acquiring a reference to a fence that may be - * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), - * so long as the caller is using RCU on the pointer to the fence. - * - * An alternative mechanism is to employ a seqlock to protect a bunch of - * fences, such as used by struct reservation_object. When using a seqlock, - * the seqlock must be taken before and checked after a reference to the - * fence is acquired (as shown here). - * - * The caller is required to hold the RCU read lock. - */ -static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep) -{ - do { - struct fence *fence; - - fence = rcu_dereference(*fencep); - if (!fence || !fence_get_rcu(fence)) - return NULL; - - /* The atomic_inc_not_zero() inside fence_get_rcu() - * provides a full memory barrier upon success (such as now). - * This is paired with the write barrier from assigning - * to the __rcu protected fence pointer so that if that - * pointer still matches the current fence, we know we - * have successfully acquire a reference to it. If it no - * longer matches, we are holding a reference to some other - * reallocated pointer. This is possible if the allocator - * is using a freelist like SLAB_DESTROY_BY_RCU where the - * fence remains valid for the RCU grace period, but it - * may be reallocated. When using such allocators, we are - * responsible for ensuring the reference we get is to - * the right fence, as below. - */ - if (fence == rcu_access_pointer(*fencep)) - return rcu_pointer_handoff(fence); - - fence_put(fence); - } while (1); -} - -int fence_signal(struct fence *fence); -int fence_signal_locked(struct fence *fence); -signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); -int fence_add_callback(struct fence *fence, struct fence_cb *cb, - fence_func_t func); -bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); -void fence_enable_sw_signaling(struct fence *fence); - -/** - * fence_is_signaled_locked - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * This function requires fence->lock to be held. - */ -static inline bool -fence_is_signaled_locked(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal_locked(fence); - return true; - } - - return false; -} - -/** - * fence_is_signaled - Return an indication if the fence is signaled yet. - * @fence: [in] the fence to check - * - * Returns true if the fence was already signaled, false if not. Since this - * function doesn't enable signaling, it is not guaranteed to ever return - * true if fence_add_callback, fence_wait or fence_enable_sw_signaling - * haven't been called before. - * - * It's recommended for seqno fences to call fence_signal when the - * operation is complete, it makes it possible to prevent issues from - * wraparound between time of issue and time of use by checking the return - * value of this function before calling hardware-specific wait instructions. - */ -static inline bool -fence_is_signaled(struct fence *fence) -{ - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return true; - - if (fence->ops->signaled && fence->ops->signaled(fence)) { - fence_signal(fence); - return true; - } - - return false; -} - -/** - * fence_is_later - return if f1 is chronologically later than f2 - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns true if f1 is chronologically later than f2. Both fences must be - * from the same context, since a seqno is not re-used across contexts. - */ -static inline bool fence_is_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return false; - - return (int)(f1->seqno - f2->seqno) > 0; -} - -/** - * fence_later - return the chronologically later fence - * @f1: [in] the first fence from the same context - * @f2: [in] the second fence from the same context - * - * Returns NULL if both fences are signaled, otherwise the fence that would be - * signaled last. Both fences must be from the same context, since a seqno is - * not re-used across contexts. - */ -static inline struct fence *fence_later(struct fence *f1, struct fence *f2) -{ - if (WARN_ON(f1->context != f2->context)) - return NULL; - - /* - * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been - * set if enable_signaling wasn't called, and enabling that here is - * overkill. - */ - if (fence_is_later(f1, f2)) - return fence_is_signaled(f1) ? NULL : f1; - else - return fence_is_signaled(f2) ? NULL : f2; -} - -signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); -signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, - bool intr, signed long timeout); - -/** - * fence_wait - sleep until the fence gets signaled - * @fence: [in] the fence to wait on - * @intr: [in] if true, do an interruptible wait - * - * This function will return -ERESTARTSYS if interrupted by a signal, - * or 0 if the fence was signaled. Other error values may be - * returned on custom implementations. - * - * Performs a synchronous wait on this fence. It is assumed the caller - * directly or indirectly holds a reference to the fence, otherwise the - * fence might be freed before return, resulting in undefined behavior. - */ -static inline signed long fence_wait(struct fence *fence, bool intr) -{ - signed long ret; - - /* Since fence_wait_timeout cannot timeout with - * MAX_SCHEDULE_TIMEOUT, only valid return values are - * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. - */ - ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); - - return ret < 0 ? ret : 0; -} - -u64 fence_context_alloc(unsigned num); - -#define FENCE_TRACE(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ - pr_info("f %llu#%u: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define FENCE_WARN(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#define FENCE_ERR(f, fmt, args...) \ - do { \ - struct fence *__ff = (f); \ - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - -#endif /* __LINUX_FENCE_H */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..2e313cca08f0 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -40,7 +40,7 @@ #define _LINUX_RESERVATION_H #include -#include +#include #include #include #include @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; - struct fence __rcu *shared[]; + struct dma_fence __rcu *shared[]; }; /** @@ -74,7 +74,7 @@ struct reservation_object { struct ww_mutex lock; seqcount_t seq; - struct fence __rcu *fence_excl; + struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; struct reservation_object_list *staged; }; @@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) { int i; struct reservation_object_list *fobj; - struct fence *excl; + struct dma_fence *excl; /* * This object should be dead and all references must have @@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) */ excl = rcu_dereference_protected(obj->fence_excl, 1); if (excl) - fence_put(excl); + dma_fence_put(excl); fobj = rcu_dereference_protected(obj->fence, 1); if (fobj) { for (i = 0; i < fobj->shared_count; ++i) - fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); kfree(fobj); } @@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl(struct reservation_object *obj) { return rcu_dereference_protected(obj->fence_excl, @@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj) * RETURNS * The exclusive fence or NULL if none */ -static inline struct fence * +static inline struct dma_fence * reservation_object_get_excl_rcu(struct reservation_object *obj) { - struct fence *fence; + struct dma_fence *fence; unsigned seq; retry: seq = read_seqcount_begin(&obj->seq); @@ -186,22 +186,22 @@ retry: rcu_read_unlock(); goto retry; } - fence = fence_get(fence); + fence = dma_fence_get(fence); rcu_read_unlock(); return fence; } int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); void reservation_object_add_excl_fence(struct reservation_object *obj, - struct fence *fence); + struct dma_fence *fence); int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct fence **pfence_excl, + struct dma_fence **pfence_excl, unsigned *pshared_count, - struct fence ***pshared); + struct dma_fence ***pshared); long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h @@ -20,7 +20,7 @@ #ifndef __LINUX_SEQNO_FENCE_H #define __LINUX_SEQNO_FENCE_H -#include +#include #include enum seqno_fence_condition { @@ -29,15 +29,15 @@ enum seqno_fence_condition { }; struct seqno_fence { - struct fence base; + struct dma_fence base; - const struct fence_ops *ops; + const struct dma_fence_ops *ops; struct dma_buf *sync_buf; uint32_t seqno_ofs; enum seqno_fence_condition condition; }; -extern const struct fence_ops seqno_fence_ops; +extern const struct dma_fence_ops seqno_fence_ops; /** * to_seqno_fence - cast a fence to a seqno_fence @@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; * or the seqno_fence otherwise. */ static inline struct seqno_fence * -to_seqno_fence(struct fence *fence) +to_seqno_fence(struct dma_fence *fence) { if (fence->ops != &seqno_fence_ops) return NULL; @@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the * device's vm can be expensive. * - * It is recommended for creators of seqno_fence to call fence_signal + * It is recommended for creators of seqno_fence to call dma_fence_signal() * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check fence_is_signaled + * time of issue vs time of check, since users can check dma_fence_is_signaled() * before submitting instructions for the hardware to wait on the fence. * However, when ops.enable_signaling is not called, it doesn't have to be * done as soon as possible, just before there's any real danger of seqno @@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, struct dma_buf *sync_buf, uint32_t context, uint32_t seqno_ofs, uint32_t seqno, enum seqno_fence_condition cond, - const struct fence_ops *ops) + const struct dma_fence_ops *ops) { BUG_ON(!fence || !sync_buf || !ops); BUG_ON(!ops->wait || !ops->enable_signaling || !ops->get_driver_name || !ops->get_timeline_name); /* - * ops is used in fence_init for get_driver_name, so needs to be + * ops is used in dma_fence_init for get_driver_name, so needs to be * initialized first */ fence->ops = ops; - fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); get_dma_buf(sync_buf); fence->sync_buf = sync_buf; fence->seqno_ofs = seqno_ofs; diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -18,8 +18,8 @@ #include #include #include -#include -#include +#include +#include /** * struct sync_file - sync file to export to the userspace @@ -41,13 +41,13 @@ struct sync_file { wait_queue_head_t wq; - struct fence *fence; - struct fence_cb cb; + struct dma_fence *fence; + struct dma_fence_cb cb; }; -#define POLL_ENABLED FENCE_FLAG_USER_BITS +#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS -struct sync_file *sync_file_create(struct fence *fence); -struct fence *sync_file_get_fence(int fd); +struct sync_file *sync_file_create(struct dma_fence *fence); +struct dma_fence *sync_file_get_fence(int fd); #endif /* _LINUX_SYNC_H */ diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h new file mode 100644 index 000000000000..1157cb4c3c6f --- /dev/null +++ b/include/trace/events/dma_fence.h @@ -0,0 +1,128 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dma_fence + +#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DMA_FENCE_H + +#include + +struct dma_fence; + +TRACE_EVENT(dma_fence_annotate_wait_on, + + /* fence: the fence waiting on f1, f1: the fence to be waited on. */ + TP_PROTO(struct dma_fence *fence, struct dma_fence *f1), + + TP_ARGS(fence, f1), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_timeline_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + + __string(waiting_driver, f1->ops->get_driver_name(f1)) + __string(waiting_timeline, f1->ops->get_timeline_name(f1)) + __field(unsigned int, waiting_context) + __field(unsigned int, waiting_seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + + __assign_str(waiting_driver, f1->ops->get_driver_name(f1)) + __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1)) + __entry->waiting_context = f1->context; + __entry->waiting_seqno = f1->seqno; + + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ + "waits on driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno, + __get_str(waiting_driver), __get_str(waiting_timeline), + __entry->waiting_context, __entry->waiting_seqno) +); + +DECLARE_EVENT_CLASS(dma_fence, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, fence->ops->get_driver_name(fence)) + __string(timeline, fence->ops->get_timeline_name(fence)) + __field(unsigned int, context) + __field(unsigned int, seqno) + ), + + TP_fast_assign( + __assign_str(driver, fence->ops->get_driver_name(fence)) + __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __entry->context = fence->context; + __entry->seqno = fence->seqno; + ), + + TP_printk("driver=%s timeline=%s context=%u seqno=%u", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->seqno) +); + +DEFINE_EVENT(dma_fence, dma_fence_emit, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_init, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_destroy, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_enable_signal, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_signaled, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_wait_start, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +DEFINE_EVENT(dma_fence, dma_fence_wait_end, + + TP_PROTO(struct dma_fence *fence), + + TP_ARGS(fence) +); + +#endif /* _TRACE_DMA_FENCE_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h deleted file mode 100644 index d6dfa05ba322..000000000000 --- a/include/trace/events/fence.h +++ /dev/null @@ -1,128 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM fence - -#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_FENCE_H - -#include - -struct fence; - -TRACE_EVENT(fence_annotate_wait_on, - - /* fence: the fence waiting on f1, f1: the fence to be waited on. */ - TP_PROTO(struct fence *fence, struct fence *f1), - - TP_ARGS(fence, f1), - - TP_STRUCT__entry( - __string(driver, fence->ops->get_driver_name(fence)) - __string(timeline, fence->ops->get_timeline_name(fence)) - __field(unsigned int, context) - __field(unsigned int, seqno) - - __string(waiting_driver, f1->ops->get_driver_name(f1)) - __string(waiting_timeline, f1->ops->get_timeline_name(f1)) - __field(unsigned int, waiting_context) - __field(unsigned int, waiting_seqno) - ), - - TP_fast_assign( - __assign_str(driver, fence->ops->get_driver_name(fence)) - __assign_str(timeline, fence->ops->get_timeline_name(fence)) - __entry->context = fence->context; - __entry->seqno = fence->seqno; - - __assign_str(waiting_driver, f1->ops->get_driver_name(f1)) - __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1)) - __entry->waiting_context = f1->context; - __entry->waiting_seqno = f1->seqno; - - ), - - TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ - "waits on driver=%s timeline=%s context=%u seqno=%u", - __get_str(driver), __get_str(timeline), __entry->context, - __entry->seqno, - __get_str(waiting_driver), __get_str(waiting_timeline), - __entry->waiting_context, __entry->waiting_seqno) -); - -DECLARE_EVENT_CLASS(fence, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence), - - TP_STRUCT__entry( - __string(driver, fence->ops->get_driver_name(fence)) - __string(timeline, fence->ops->get_timeline_name(fence)) - __field(unsigned int, context) - __field(unsigned int, seqno) - ), - - TP_fast_assign( - __assign_str(driver, fence->ops->get_driver_name(fence)) - __assign_str(timeline, fence->ops->get_timeline_name(fence)) - __entry->context = fence->context; - __entry->seqno = fence->seqno; - ), - - TP_printk("driver=%s timeline=%s context=%u seqno=%u", - __get_str(driver), __get_str(timeline), __entry->context, - __entry->seqno) -); - -DEFINE_EVENT(fence, fence_emit, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_init, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_destroy, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_enable_signal, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_signaled, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_wait_start, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -DEFINE_EVENT(fence, fence_wait_end, - - TP_PROTO(struct fence *fence), - - TP_ARGS(fence) -); - -#endif /* _TRACE_FENCE_H */ - -/* This part must be outside protection */ -#include -- cgit v1.2.3 From 97ac0e47aed5f635893b0e2df634c64b38ca7541 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 19 Oct 2016 11:28:27 +0100 Subject: drm: convert DT component matching to component_match_add_release() Convert DT component matching to use component_match_add_release(). Acked-by: Jyri Sarha Reviewed-by: Jyri Sarha Signed-off-by: Russell King Signed-off-by: Sean Paul Link: http://patchwork.freedesktop.org/patch/msgid/E1bwo6l-0005Io-Q1@rmk-PC.armlinux.org.uk --- drivers/gpu/drm/arm/hdlcd_drv.c | 3 ++- drivers/gpu/drm/arm/malidp_drv.c | 4 +++- drivers/gpu/drm/armada/armada_drv.c | 2 +- drivers/gpu/drm/drm_of.c | 28 +++++++++++++++++++++++-- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 5 +++-- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | 7 ++++--- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 4 +++- drivers/gpu/drm/msm/msm_drv.c | 12 ++++++----- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 6 ++++-- drivers/gpu/drm/sti/sti_drv.c | 5 +++-- drivers/gpu/drm/sun4i/sun4i_drv.c | 3 ++- drivers/gpu/drm/tilcdc/tilcdc_external.c | 4 +++- include/drm/drm_of.h | 12 +++++++++++ 13 files changed, 73 insertions(+), 22 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index fb6a418ce6be..6477d1a65266 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, match); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 9280358b8f15..9f4739452a25 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, malidp_compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, + port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &malidp_master_ops, match); } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f608e4..94e46da9a758 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index bc98bb94264d..47848ed8ca48 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -6,6 +6,11 @@ #include #include +static void drm_release_of(struct device *dev, void *data) +{ + of_node_put(data); +} + /** * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node * @dev: DRM device @@ -63,6 +68,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, } EXPORT_SYMBOL(drm_of_find_possible_crtcs); +/** + * drm_of_component_match_add - Add a component helper OF node match rule + * @master: master device + * @matchptr: component match pointer + * @compare: compare function used for matching component + * @node: of_node + */ +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ + of_node_get(node); + component_match_add_release(master, matchptr, drm_release_of, + compare, node); +} +EXPORT_SYMBOL_GPL(drm_of_component_match_add); + /** * drm_of_component_probe - Generic probe function for a component based master * @dev: master device containing the OF node @@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, port); + drm_of_component_match_add(dev, &match, compare_of, port); of_node_put(port); } @@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, + remote); of_node_put(remote); } of_node_put(port); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa687669e22b..0dee6acbd880 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -16,6 +16,7 @@ #include #include +#include #include "etnaviv_drv.h" #include "etnaviv_gpu.h" @@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!core_node) break; - component_match_add(&pdev->dev, &match, compare_of, - core_node); + drm_of_component_match_add(&pdev->dev, &match, + compare_of, core_node); of_node_put(core_node); } } else if (dev->platform_data) { diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 90377a609c98..e88fde18c946 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "kirin_drm_drv.h" @@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np) DRM_ERROR("no valid endpoint node\n"); return ERR_PTR(-ENODEV); } - of_node_put(endpoint); remote = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); if (!remote) { DRM_ERROR("no valid remote node\n"); return ERR_PTR(-ENODEV); } - of_node_put(remote); if (!of_device_is_available(remote)) { DRM_ERROR("not available for remote node\n"); @@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev) if (IS_ERR(remote)) return PTR_ERR(remote); - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, remote); + of_node_put(remote); return component_master_add_with_match(dev, &kirin_drm_ops, match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index db61aa5f32ef..296f541fbe2f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DPI) { dev_info(dev, "Adding component match for %s\n", node->full_name); - component_match_add(dev, &match, compare_of, node); + drm_of_component_match_add(dev, &match, compare_of, + node); } else { struct mtk_ddp_comp *comp; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..84d38eaea585 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -15,6 +15,8 @@ * this program. If not, see . */ +#include + #include "msm_drv.h" #include "msm_debugfs.h" #include "msm_fence.h" @@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev, continue; } - component_match_add(master_dev, matchptr, compare_of, intf); - + drm_of_component_match_add(master_dev, matchptr, compare_of, + intf); of_node_put(intf); of_node_put(ep_node); } @@ -962,8 +964,8 @@ static int add_display_components(struct device *dev, put_device(mdp_dev); /* add the MDP component itself */ - component_match_add(dev, matchptr, compare_of, - mdp_dev->of_node); + drm_of_component_match_add(dev, matchptr, compare_of, + mdp_dev->of_node); } else { /* MDP4 */ mdp_dev = dev; @@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - component_match_add(dev, matchptr, compare_of, np); + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8c8cbe837e61..6fe161192bb4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } @@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) } of_node_put(iommu); - component_match_add(dev, &match, compare_of, port->parent); + drm_of_component_match_add(dev, &match, compare_of, + port->parent); of_node_put(port); } diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 7087499969bc..6aead2013b62 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "sti_crtc.h" #include "sti_drv.h" @@ -424,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev) child_np = of_get_next_available_child(node, NULL); while (child_np) { - component_match_add(dev, &match, compare_of, child_np); - of_node_put(child_np); + drm_of_component_match_add(dev, &match, compare_of, + child_np); child_np = of_get_next_available_child(node, child_np); } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..b3c4ad605e81 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "sun4i_crtc.h" #include "sun4i_drv.h" @@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, /* Add current component */ DRM_DEBUG_DRIVER("Adding component %s\n", of_node_full_name(node)); - component_match_add(dev, match, compare_of, node); + drm_of_component_match_add(dev, match, compare_of, node); count++; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021005..06a4c584f3cb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -10,6 +10,7 @@ #include #include +#include #include "tilcdc_drv.h" #include "tilcdc_external.h" @@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev, dev_dbg(dev, "Subdevice node '%s' found\n", node->name); if (match) - component_match_add(dev, match, dev_match_of, node); + drm_of_component_match_add(dev, match, dev_match_of, + node); of_node_put(node); count++; } diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 3fd87b386ed7..d6b4c5587bbe 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -4,6 +4,7 @@ #include struct component_master_ops; +struct component_match; struct device; struct drm_device; struct drm_encoder; @@ -12,6 +13,10 @@ struct device_node; #ifdef CONFIG_OF extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, struct device_node *port); +extern void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node); extern int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), const struct component_master_ops *m_ops); @@ -25,6 +30,13 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, return 0; } +static void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ +} + static inline int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), -- cgit v1.2.3 From ce6e153f414a73a52fa1498489ce4adf20229445 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Mon, 10 Oct 2016 09:39:17 +0200 Subject: drm/bridge: add Silicon Image SiI8620 driver SiI8620 transmitter converts eTMDS/HDMI signal to MHL 3.0. It is controlled via I2C bus. Its interaction with other devices in video pipeline is performed mainly on HW level. The only interaction it does on device driver level is filtering-out unsupported video modes, it exposes drm_bridge interface to perform this operation. Signed-off-by: Andrzej Hajda Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1476085157-5266-1-git-send-email-a.hajda@samsung.com --- drivers/gpu/drm/bridge/Kconfig | 7 + drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/sil-sii8620.c | 1565 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/bridge/sil-sii8620.h | 1517 ++++++++++++++++++++++++++++++++ 4 files changed, 3090 insertions(+) create mode 100644 drivers/gpu/drm/bridge/sil-sii8620.c create mode 100644 drivers/gpu/drm/bridge/sil-sii8620.h (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 10e12e74fc9f..bd6acc829f97 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -57,6 +57,13 @@ config DRM_PARADE_PS8622 ---help--- Parade eDP-LVDS bridge chip driver. +config DRM_SIL_SII8620 + tristate "Silicon Image SII8620 HDMI/MHL bridge" + depends on OF + select DRM_KMS_HELPER + help + Silicon Image SII8620 HDMI/MHL bridge chip driver. + config DRM_SII902X tristate "Silicon Image sii902x RGB/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index cdf3a3cf765d..97ed1a5fea9a 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o +obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c new file mode 100644 index 000000000000..1345e7cc7bf0 --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -0,0 +1,1565 @@ +/* + * Silicon Image SiI8620 HDMI/MHL bridge driver + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sil-sii8620.h" + +#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) + +enum sii8620_mode { + CM_DISCONNECTED, + CM_DISCOVERY, + CM_MHL1, + CM_MHL3, + CM_ECBUS_S +}; + +enum sii8620_sink_type { + SINK_NONE, + SINK_HDMI, + SINK_DVI +}; + +enum sii8620_mt_state { + MT_STATE_READY, + MT_STATE_BUSY, + MT_STATE_DONE +}; + +struct sii8620 { + struct drm_bridge bridge; + struct device *dev; + struct clk *clk_xtal; + struct gpio_desc *gpio_reset; + struct gpio_desc *gpio_int; + struct regulator_bulk_data supplies[2]; + struct mutex lock; /* context lock, protects fields below */ + int error; + enum sii8620_mode mode; + enum sii8620_sink_type sink_type; + u8 cbus_status; + u8 stat[MHL_DST_SIZE]; + u8 xstat[MHL_XDS_SIZE]; + u8 devcap[MHL_DCAP_SIZE]; + u8 xdevcap[MHL_XDC_SIZE]; + u8 avif[19]; + struct edid *edid; + unsigned int gen2_write_burst:1; + enum sii8620_mt_state mt_state; + struct list_head mt_queue; +}; + +struct sii8620_mt_msg; + +typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx, + struct sii8620_mt_msg *msg); + +struct sii8620_mt_msg { + struct list_head node; + u8 reg[4]; + u8 ret; + sii8620_mt_msg_cb send; + sii8620_mt_msg_cb recv; +}; + +static const u8 sii8620_i2c_page[] = { + 0x39, /* Main System */ + 0x3d, /* TDM and HSIC */ + 0x49, /* TMDS Receiver, MHL EDID */ + 0x4d, /* eMSC, HDCP, HSIC */ + 0x5d, /* MHL Spec */ + 0x64, /* MHL CBUS */ + 0x59, /* Hardware TPI (Transmitter Programming Interface) */ + 0x61, /* eCBUS-S, eCBUS-D */ +}; + +static void sii8620_fetch_edid(struct sii8620 *ctx); +static void sii8620_set_upstream_edid(struct sii8620 *ctx); +static void sii8620_enable_hpd(struct sii8620 *ctx); +static void sii8620_mhl_disconnected(struct sii8620 *ctx); + +static int sii8620_clear_error(struct sii8620 *ctx) +{ + int ret = ctx->error; + + ctx->error = 0; + return ret; +} + +static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) +{ + struct device *dev = ctx->dev; + struct i2c_client *client = to_i2c_client(dev); + u8 data = addr; + struct i2c_msg msg[] = { + { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags, + .len = 1, + .buf = &data + }, + { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags | I2C_M_RD, + .len = len, + .buf = buf + }, + }; + int ret; + + if (ctx->error) + return; + + ret = i2c_transfer(client->adapter, msg, 2); + dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret); + + if (ret != 2) { + dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n", + addr, len, ret); + ctx->error = ret < 0 ? ret : -EIO; + } +} + +static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) +{ + u8 ret; + + sii8620_read_buf(ctx, addr, &ret, 1); + return ret; +} + +static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf, + int len) +{ + struct device *dev = ctx->dev; + struct i2c_client *client = to_i2c_client(dev); + u8 data[2]; + struct i2c_msg msg = { + .addr = sii8620_i2c_page[addr >> 8], + .flags = client->flags, + .len = len + 1, + }; + int ret; + + if (ctx->error) + return; + + if (len > 1) { + msg.buf = kmalloc(len + 1, GFP_KERNEL); + if (!msg.buf) { + ctx->error = -ENOMEM; + return; + } + memcpy(msg.buf + 1, buf, len); + } else { + msg.buf = data; + msg.buf[1] = *buf; + } + + msg.buf[0] = addr; + + ret = i2c_transfer(client->adapter, &msg, 1); + dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret); + + if (ret != 1) { + dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n", + addr, len, buf, ret); + ctx->error = ret ?: -EIO; + } + + if (len > 1) + kfree(msg.buf); +} + +#define sii8620_write(ctx, addr, arr...) \ +({\ + u8 d[] = { arr }; \ + sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \ +}) + +static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len) +{ + int i; + + for (i = 0; i < len; i += 2) + sii8620_write(ctx, seq[i], seq[i + 1]); +} + +#define sii8620_write_seq(ctx, seq...) \ +({\ + const u16 d[] = { seq }; \ + __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ +}) + +#define sii8620_write_seq_static(ctx, seq...) \ +({\ + static const u16 d[] = { seq }; \ + __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ +}) + +static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val) +{ + val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask); + sii8620_write(ctx, addr, val); +} + +static void sii8620_mt_cleanup(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg, *n; + + list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { + list_del(&msg->node); + kfree(msg); + } + ctx->mt_state = MT_STATE_READY; +} + +static void sii8620_mt_work(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg; + + if (ctx->error) + return; + if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue)) + return; + + if (ctx->mt_state == MT_STATE_DONE) { + ctx->mt_state = MT_STATE_READY; + msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, + node); + if (msg->recv) + msg->recv(ctx, msg); + list_del(&msg->node); + kfree(msg); + } + + if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue)) + return; + + ctx->mt_state = MT_STATE_BUSY; + msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); + if (msg->send) + msg->send(ctx, msg); +} + +static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + switch (msg->reg[0]) { + case MHL_WRITE_STAT: + case MHL_SET_INT: + sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2); + sii8620_write(ctx, REG_MSC_COMMAND_START, + BIT_MSC_COMMAND_START_WRITE_STAT); + break; + case MHL_MSC_MSG: + sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3); + sii8620_write(ctx, REG_MSC_COMMAND_START, + BIT_MSC_COMMAND_START_MSC_MSG); + break; + default: + dev_err(ctx->dev, "%s: command %#x not supported\n", __func__, + msg->reg[0]); + } +} + +static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); + + if (!msg) + ctx->error = -ENOMEM; + else + list_add_tail(&msg->node, &ctx->mt_queue); + + return msg; +} + +static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2) +{ + struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); + + if (!msg) + return; + + msg->reg[0] = cmd; + msg->reg[1] = arg1; + msg->reg[2] = arg2; + msg->send = sii8620_mt_msc_cmd_send; +} + +static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val) +{ + sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val); +} + +static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask) +{ + sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask); +} + +static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data) +{ + sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data); +} + +static void sii8620_mt_rap(struct sii8620 *ctx, u8 code) +{ + sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); +} + +static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN; + + if (msg->reg[0] == MHL_READ_XDEVCAP) + ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; + + sii8620_write_seq(ctx, + REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE, + REG_EDID_CTRL, ctrl, + REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START + ); +} + +/* copy src to dst and set changed bits in src */ +static void sii8620_update_array(u8 *dst, u8 *src, int count) +{ + while (--count >= 0) { + *src ^= *dst; + *dst++ ^= *src++; + } +} + +static void sii8620_mr_devcap(struct sii8620 *ctx) +{ + static const char * const sink_str[] = { + [SINK_NONE] = "NONE", + [SINK_HDMI] = "HDMI", + [SINK_DVI] = "DVI" + }; + + u8 dcap[MHL_DCAP_SIZE]; + char sink_name[20]; + struct device *dev = ctx->dev; + + sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE); + if (ctx->error < 0) + return; + + dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap); + dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n", + dcap[MHL_DCAP_MHL_VERSION] / 16, + dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H], + dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], + dcap[MHL_DCAP_DEVICE_ID_L]); + sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); + + if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK)) + return; + + sii8620_fetch_edid(ctx); + if (!ctx->edid) { + dev_err(ctx->dev, "Cannot fetch EDID\n"); + sii8620_mhl_disconnected(ctx); + return; + } + + if (drm_detect_hdmi_monitor(ctx->edid)) + ctx->sink_type = SINK_HDMI; + else + ctx->sink_type = SINK_DVI; + + drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name)); + + dev_info(dev, "detected sink(type: %s): %s\n", + sink_str[ctx->sink_type], sink_name); + sii8620_set_upstream_edid(ctx); + sii8620_enable_hpd(ctx); +} + +static void sii8620_mr_xdevcap(struct sii8620 *ctx) +{ + sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap, + MHL_XDC_SIZE); + + sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE), + MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT); + sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP); +} + +static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx, + struct sii8620_mt_msg *msg) +{ + u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN; + + if (msg->reg[0] == MHL_READ_XDEVCAP) + ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; + + sii8620_write_seq(ctx, + REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE + | BIT_INTR9_EDID_ERROR, + REG_EDID_CTRL, ctrl, + REG_EDID_FIFO_ADDR, 0 + ); + + if (msg->reg[0] == MHL_READ_XDEVCAP) + sii8620_mr_xdevcap(ctx); + else + sii8620_mr_devcap(ctx); +} + +static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) +{ + struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); + + if (!msg) + return; + + msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP; + msg->send = sii8620_mt_read_devcap_send; + msg->recv = sii8620_mt_read_devcap_recv; +} + +static void sii8620_fetch_edid(struct sii8620 *ctx) +{ + u8 lm_ddc, ddc_cmd, int3, cbus; + int fetched, i; + int edid_len = EDID_LENGTH; + u8 *edid; + + sii8620_readb(ctx, REG_CBUS_STATUS); + lm_ddc = sii8620_readb(ctx, REG_LM_DDC); + ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD); + + sii8620_write_seq(ctx, + REG_INTR9_MASK, 0, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, + REG_HDCP2X_POLL_CS, 0x71, + REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX, + REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED, + ); + + for (i = 0; i < 256; ++i) { + u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS); + + if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG)) + break; + sii8620_write(ctx, REG_DDC_STATUS, + BIT_DDC_STATUS_DDC_FIFO_EMPTY); + } + + sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1); + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) { + ctx->error = -ENOMEM; + return; + } + +#define FETCH_SIZE 16 + for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) { + sii8620_readb(ctx, REG_DDC_STATUS); + sii8620_write_seq(ctx, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO, + REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY + ); + sii8620_write_seq(ctx, + REG_DDC_SEGM, fetched >> 8, + REG_DDC_OFFSET, fetched & 0xff, + REG_DDC_DIN_CNT1, FETCH_SIZE, + REG_DDC_DIN_CNT2, 0, + REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK + ); + + do { + int3 = sii8620_readb(ctx, REG_INTR3); + cbus = sii8620_readb(ctx, REG_CBUS_STATUS); + + if (int3 & BIT_DDC_CMD_DONE) + break; + + if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { + kfree(edid); + edid = NULL; + goto end; + } + } while (1); + + sii8620_readb(ctx, REG_DDC_STATUS); + while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) + usleep_range(10, 20); + + sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); + if (fetched + FETCH_SIZE == EDID_LENGTH) { + u8 ext = ((struct edid *)edid)->extensions; + + if (ext) { + u8 *new_edid; + + edid_len += ext * EDID_LENGTH; + new_edid = krealloc(edid, edid_len, GFP_KERNEL); + if (!new_edid) { + kfree(edid); + ctx->error = -ENOMEM; + return; + } + edid = new_edid; + } + } + + if (fetched + FETCH_SIZE == edid_len) + sii8620_write(ctx, REG_INTR3, int3); + } + + sii8620_write(ctx, REG_LM_DDC, lm_ddc); + +end: + kfree(ctx->edid); + ctx->edid = (struct edid *)edid; +} + +static void sii8620_set_upstream_edid(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N + | BIT_DPD_PD_MHL_CLK_N, 0xff); + + sii8620_write_seq_static(ctx, + REG_RX_HDMI_CTRL3, 0x00, + REG_PKT_FILTER_0, 0xFF, + REG_PKT_FILTER_1, 0xFF, + REG_ALICE0_BW_I2C, 0x06 + ); + + sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER, + BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN, + REG_EDID_FIFO_ADDR, 0, + ); + + sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid, + (ctx->edid->extensions + 1) * EDID_LENGTH); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID + | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO + | BIT_EDID_CTRL_EDID_MODE_EN, + REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE, + REG_INTR9_MASK, 0 + ); +} + +static void sii8620_xtal_set_rate(struct sii8620 *ctx) +{ + static const struct { + unsigned int rate; + u8 div; + u8 tp1; + } rates[] = { + { 19200, 0x04, 0x53 }, + { 20000, 0x04, 0x62 }, + { 24000, 0x05, 0x75 }, + { 30000, 0x06, 0x92 }, + { 38400, 0x0c, 0xbc }, + }; + unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000; + int i; + + for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i) + if (rate <= rates[i].rate) + break; + + if (rate != rates[i].rate) + dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n", + rate, rates[i].rate); + + sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div); + sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1); +} + +static int sii8620_hw_on(struct sii8620 *ctx) +{ + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret) + return ret; + usleep_range(10000, 20000); + return clk_prepare_enable(ctx->clk_xtal); +} + +static int sii8620_hw_off(struct sii8620 *ctx) +{ + clk_disable_unprepare(ctx->clk_xtal); + gpiod_set_value(ctx->gpio_reset, 1); + return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); +} + +static void sii8620_hw_reset(struct sii8620 *ctx) +{ + usleep_range(10000, 20000); + gpiod_set_value(ctx->gpio_reset, 0); + usleep_range(5000, 20000); + gpiod_set_value(ctx->gpio_reset, 1); + usleep_range(10000, 20000); + gpiod_set_value(ctx->gpio_reset, 0); + msleep(300); +} + +static void sii8620_cbus_reset(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST + | BIT_PWD_SRST_CBUS_RST_SW_EN, + REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN + ); +} + +static void sii8620_set_auto_zone(struct sii8620 *ctx) +{ + if (ctx->mode != CM_MHL1) { + sii8620_write_seq_static(ctx, + REG_TX_ZONE_CTL1, 0x0, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE + ); + } else { + sii8620_write_seq_static(ctx, + REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE + ); + } +} + +static void sii8620_stop_video(struct sii8620 *ctx) +{ + u8 uninitialized_var(val); + + sii8620_write_seq_static(ctx, + REG_TPI_INTR_EN, 0, + REG_HDCP2X_INTR0_MASK, 0, + REG_TPI_COPP_DATA2, 0, + REG_TPI_INTR_ST0, ~0, + ); + + switch (ctx->sink_type) { + case SINK_DVI: + val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN + | BIT_TPI_SC_TPI_AV_MUTE; + break; + case SINK_HDMI: + val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN + | BIT_TPI_SC_TPI_AV_MUTE + | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI; + break; + default: + return; + } + + sii8620_write(ctx, REG_TPI_SC, val); +} + +static void sii8620_start_hdmi(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL + | BIT_RX_HDMI_CTRL2_USE_AV_MUTE, + REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE + | BIT_VID_OVRRD_M1080P_OVRRD, + REG_VID_MODE, 0, + REG_MHL_TOP_CTL, 0x1, + REG_MHLTX_CTL6, 0xa0, + REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), + REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), + ); + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL | + MHL_DST_LM_PATH_ENABLED); + + sii8620_set_auto_zone(ctx); + + sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); + + sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif, + ARRAY_SIZE(ctx->avif)); + + sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2); +} + +static void sii8620_start_video(struct sii8620 *ctx) +{ + if (ctx->mode < CM_MHL3) + sii8620_stop_video(ctx); + + switch (ctx->sink_type) { + case SINK_HDMI: + sii8620_start_hdmi(ctx); + break; + case SINK_DVI: + default: + break; + } +} + +static void sii8620_disable_hpd(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0); + sii8620_write_seq_static(ctx, + REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN, + REG_INTR8_MASK, 0 + ); +} + +static void sii8620_enable_hpd(struct sii8620 *ctx) +{ + sii8620_setbits(ctx, REG_TMDS_CSTAT_P3, + BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS + | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0); + sii8620_write_seq_static(ctx, + REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN + | BIT_HPD_CTRL_HPD_HIGH, + ); +} + +static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx) +{ + if (ctx->gen2_write_burst) + return; + + sii8620_write_seq_static(ctx, + REG_MDT_RCV_TIMEOUT, 100, + REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN + ); + ctx->gen2_write_burst = 1; +} + +static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx) +{ + if (!ctx->gen2_write_burst) + return; + + sii8620_write_seq_static(ctx, + REG_MDT_XMIT_CTRL, 0, + REG_MDT_RCV_CTRL, 0 + ); + ctx->gen2_write_burst = 0; +} + +static void sii8620_start_gen2_write_burst(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT + | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR + | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD + | BIT_MDT_XMIT_SM_ERROR, + REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY + | BIT_MDT_IDLE_AFTER_HAWB_DISABLE + | BIT_MDT_RFIFO_DATA_RDY + ); + sii8620_enable_gen2_write_burst(ctx); +} + +static void sii8620_mhl_discover(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_DISC_PULSE_PROCEED, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K), + REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT + | BIT_MHL_EST_INT + | BIT_NOT_MHL_EST_INT + | BIT_CBUS_MHL3_DISCON_INT + | BIT_CBUS_MHL12_DISCON_INT + | BIT_RGND_READY_INT, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, + REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE + | BIT_MHL_DP_CTL0_TX_OE_OVR, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_MHL_DP_CTL1, 0xA2, + REG_MHL_DP_CTL2, 0x03, + REG_MHL_DP_CTL3, 0x35, + REG_MHL_DP_CTL5, 0x02, + REG_MHL_DP_CTL6, 0x02, + REG_MHL_DP_CTL7, 0x03, + REG_COC_CTLC, 0xFF, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 + | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC, + REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE + | BIT_COC_CALIBRATION_DONE, + REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD + | BIT_CBUS_CMD_ABORT, + REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE + | BIT_CBUS_HPD_CHG + | BIT_CBUS_MSC_MR_WRITE_STAT + | BIT_CBUS_MSC_MR_MSC_MSG + | BIT_CBUS_MSC_MR_WRITE_BURST + | BIT_CBUS_MSC_MR_SET_INT + | BIT_CBUS_MSC_MT_DONE_NACK + ); +} + +static void sii8620_peer_specific_init(struct sii8620 *ctx) +{ + if (ctx->mode == CM_MHL3) + sii8620_write_seq_static(ctx, + REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD, + REG_EMSCINTRMASK1, + BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR + ); + else + sii8620_write_seq_static(ctx, + REG_HDCP2X_INTR0_MASK, 0x00, + REG_EMSCINTRMASK1, 0x00, + REG_HDCP2X_INTR0, 0xFF, + REG_INTR1, 0xFF, + REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD + | BIT_SYS_CTRL1_TX_CTRL_HDMI + ); +} + +#define SII8620_MHL_VERSION 0x32 +#define SII8620_SCRATCHPAD_SIZE 16 +#define SII8620_INT_STAT_SIZE 0x33 + +static void sii8620_set_dev_cap(struct sii8620 *ctx) +{ + static const u8 devcap[MHL_DCAP_SIZE] = { + [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION, + [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER, + [MHL_DCAP_ADOPTER_ID_H] = 0x01, + [MHL_DCAP_ADOPTER_ID_L] = 0x41, + [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444 + | MHL_DCAP_VID_LINK_PPIXEL + | MHL_DCAP_VID_LINK_16BPP, + [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH, + [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS, + [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI, + [MHL_DCAP_BANDWIDTH] = 0x0f, + [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT + | MHL_DCAP_FEATURE_RAP_SUPPORT + | MHL_DCAP_FEATURE_SP_SUPPORT, + [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE, + [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE, + }; + static const u8 xdcap[MHL_XDC_SIZE] = { + [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075 + | MHL_XDC_ECBUS_S_8BIT, + [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150 + | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600, + [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST, + [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE, + }; + + sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap)); + sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap)); +} + +static void sii8620_mhl_init(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_CBUS_MSC_COMPAT_CTRL, + BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN, + ); + + sii8620_peer_specific_init(ctx); + + sii8620_disable_hpd(ctx); + + sii8620_write_seq_static(ctx, + REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_TMDS0_CCTRL1, 0x90, + REG_TMDS_CLK_EN, 0x01, + REG_TMDS_CH_EN, 0x11, + REG_BGR_BIAS, 0x87, + REG_ALICE0_ZONE_CTRL, 0xE8, + REG_ALICE0_MODE_CTRL, 0x04, + ); + sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0); + sii8620_write_seq_static(ctx, + REG_TPI_HW_OPT3, 0x76, + REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE, + REG_TPI_DTD_B2, 79, + ); + sii8620_set_dev_cap(ctx); + sii8620_write_seq_static(ctx, + REG_MDT_XMIT_TIMEOUT, 100, + REG_MDT_XMIT_CTRL, 0x03, + REG_MDT_XFIFO_STAT, 0x00, + REG_MDT_RCV_TIMEOUT, 100, + REG_CBUS_LINK_CTRL_8, 0x1D, + ); + + sii8620_start_gen2_write_burst(ctx); + sii8620_write_seq_static(ctx, + REG_BIST_CTRL, 0x00, + REG_COC_CTL1, 0x10, + REG_COC_CTL2, 0x18, + REG_COC_CTLF, 0x07, + REG_COC_CTL11, 0xF8, + REG_COC_CTL17, 0x61, + REG_COC_CTL18, 0x46, + REG_COC_CTL19, 0x15, + REG_COC_CTL1A, 0x01, + REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN, + REG_MHL_COC_CTL4, 0x2D, + REG_MHL_COC_CTL5, 0xF9, + REG_MSC_HEARTBEAT_CTRL, 0x27, + ); + sii8620_disable_gen2_write_burst(ctx); + + /* currently MHL3 is not supported, so we force version to 0 */ + sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0); + sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY), + MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP + | MHL_DST_CONN_POW_STAT); + sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG); +} + +static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) +{ + if (ctx->mode == mode) + return; + + ctx->mode = mode; + + switch (mode) { + case CM_MHL1: + sii8620_write_seq_static(ctx, + REG_CBUS_MSC_COMPAT_CTRL, 0x02, + REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 + | BIT_DPD_OSC_EN, + REG_COC_INTR_MASK, 0 + ); + break; + case CM_MHL3: + sii8620_write_seq_static(ctx, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_COC_CTL0, 0x40, + REG_MHL_COC_CTL1, 0x07 + ); + break; + case CM_DISCONNECTED: + break; + default: + dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); + break; + }; + + sii8620_set_auto_zone(ctx); + + if (mode != CM_MHL1) + return; + + sii8620_write_seq_static(ctx, + REG_MHL_DP_CTL0, 0xBC, + REG_MHL_DP_CTL1, 0xBB, + REG_MHL_DP_CTL3, 0x48, + REG_MHL_DP_CTL5, 0x39, + REG_MHL_DP_CTL2, 0x2A, + REG_MHL_DP_CTL6, 0x2A, + REG_MHL_DP_CTL7, 0x08 + ); +} + +static void sii8620_disconnect(struct sii8620 *ctx) +{ + sii8620_disable_gen2_write_burst(ctx); + sii8620_stop_video(ctx); + msleep(50); + sii8620_cbus_reset(ctx); + sii8620_set_mode(ctx, CM_DISCONNECTED); + sii8620_write_seq_static(ctx, + REG_COC_CTL0, 0x40, + REG_CBUS3_CNVT, 0x84, + REG_COC_CTL14, 0x00, + REG_COC_CTL0, 0x40, + REG_HRXCTRL3, 0x07, + REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X + | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL + | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, + REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE + | BIT_MHL_DP_CTL0_TX_OE_OVR, + REG_MHL_DP_CTL1, 0xBB, + REG_MHL_DP_CTL3, 0x48, + REG_MHL_DP_CTL5, 0x3F, + REG_MHL_DP_CTL2, 0x2F, + REG_MHL_DP_CTL6, 0x2A, + REG_MHL_DP_CTL7, 0x03 + ); + sii8620_disable_hpd(ctx); + sii8620_write_seq_static(ctx, + REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, + REG_MHL_COC_CTL1, 0x07, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_DISC_CTRL8, 0x00, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_INT_CTRL, 0x00, + REG_MSC_HEARTBEAT_CTRL, 0x27, + REG_DISC_CTRL1, 0x25, + REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT, + REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT, + REG_MDT_INT_1, 0xff, + REG_MDT_INT_1_MASK, 0x00, + REG_MDT_INT_0, 0xff, + REG_MDT_INT_0_MASK, 0x00, + REG_COC_INTR, 0xff, + REG_COC_INTR_MASK, 0x00, + REG_TRXINTH, 0xff, + REG_TRXINTMH, 0x00, + REG_CBUS_INT_0, 0xff, + REG_CBUS_INT_0_MASK, 0x00, + REG_CBUS_INT_1, 0xff, + REG_CBUS_INT_1_MASK, 0x00, + REG_EMSCINTR, 0xff, + REG_EMSCINTRMASK, 0x00, + REG_EMSCINTR1, 0xff, + REG_EMSCINTRMASK1, 0x00, + REG_INTR8, 0xff, + REG_INTR8_MASK, 0x00, + REG_TPI_INTR_ST0, 0xff, + REG_TPI_INTR_EN, 0x00, + REG_HDCP2X_INTR0, 0xff, + REG_HDCP2X_INTR0_MASK, 0x00, + REG_INTR9, 0xff, + REG_INTR9_MASK, 0x00, + REG_INTR3, 0xff, + REG_INTR3_MASK, 0x00, + REG_INTR5, 0xff, + REG_INTR5_MASK, 0x00, + REG_INTR2, 0xff, + REG_INTR2_MASK, 0x00, + ); + memset(ctx->stat, 0, sizeof(ctx->stat)); + memset(ctx->xstat, 0, sizeof(ctx->xstat)); + memset(ctx->devcap, 0, sizeof(ctx->devcap)); + memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); + ctx->cbus_status = 0; + ctx->sink_type = SINK_NONE; + kfree(ctx->edid); + ctx->edid = NULL; + sii8620_mt_cleanup(ctx); +} + +static void sii8620_mhl_disconnected(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), + REG_CBUS_MSC_COMPAT_CTRL, + BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN + ); + sii8620_disconnect(ctx); +} + +static void sii8620_irq_disc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0); + + if (stat & VAL_CBUS_MHL_DISCON) + sii8620_mhl_disconnected(ctx); + + if (stat & BIT_RGND_READY_INT) { + u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2); + + if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) { + sii8620_mhl_discover(ctx); + } else { + sii8620_write_seq_static(ctx, + REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT + | BIT_DISC_CTRL9_NOMHL_EST + | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, + REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT + | BIT_CBUS_MHL3_DISCON_INT + | BIT_CBUS_MHL12_DISCON_INT + | BIT_NOT_MHL_EST_INT + ); + } + } + if (stat & BIT_MHL_EST_INT) + sii8620_mhl_init(ctx); + + sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat); +} + +static void sii8620_irq_g2wb(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_MDT_INT_0); + + if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE) + dev_dbg(ctx->dev, "HAWB idle\n"); + + sii8620_write(ctx, REG_MDT_INT_0, stat); +} + +static void sii8620_status_changed_dcap(struct sii8620 *ctx) +{ + if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) { + sii8620_set_mode(ctx, CM_MHL1); + sii8620_peer_specific_init(ctx); + sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE + | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR); + } +} + +static void sii8620_status_changed_path(struct sii8620 *ctx) +{ + if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL + | MHL_DST_LM_PATH_ENABLED); + sii8620_mt_read_devcap(ctx, false); + } else { + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + MHL_DST_LM_CLK_MODE_NORMAL); + } +} + +static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) +{ + u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE]; + + sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE); + sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE); + + sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); + sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); + + if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) + sii8620_status_changed_dcap(ctx); + + if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) + sii8620_status_changed_path(ctx); +} + +static void sii8620_msc_mr_set_int(struct sii8620 *ctx) +{ + u8 ints[MHL_INT_SIZE]; + + sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); + sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); +} + +static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) +{ + struct device *dev = ctx->dev; + + if (list_empty(&ctx->mt_queue)) { + dev_err(dev, "unexpected MSC MT response\n"); + return NULL; + } + + return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); +} + +static void sii8620_msc_mt_done(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); + + if (!msg) + return; + + msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0); + ctx->mt_state = MT_STATE_DONE; +} + +static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) +{ + struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); + u8 buf[2]; + + if (!msg) + return; + + sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); + + switch (buf[0]) { + case MHL_MSC_MSG_RAPK: + msg->ret = buf[1]; + ctx->mt_state = MT_STATE_DONE; + break; + default: + dev_err(ctx->dev, "%s message type %d,%d not supported", + __func__, buf[0], buf[1]); + } +} + +static void sii8620_irq_msc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0); + + if (stat & ~BIT_CBUS_HPD_CHG) + sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG); + + if (stat & BIT_CBUS_HPD_CHG) { + u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS); + + if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) { + sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG); + } else { + stat ^= BIT_CBUS_STATUS_CBUS_HPD; + cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD; + } + ctx->cbus_status = cbus_stat; + } + + if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) + sii8620_msc_mr_write_stat(ctx); + + if (stat & BIT_CBUS_MSC_MR_SET_INT) + sii8620_msc_mr_set_int(ctx); + + if (stat & BIT_CBUS_MSC_MT_DONE) + sii8620_msc_mt_done(ctx); + + if (stat & BIT_CBUS_MSC_MR_MSC_MSG) + sii8620_msc_mr_msc_msg(ctx); +} + +static void sii8620_irq_coc(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_COC_INTR); + + sii8620_write(ctx, REG_COC_INTR, stat); +} + +static void sii8620_irq_merr(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1); + + sii8620_write(ctx, REG_CBUS_INT_1, stat); +} + +static void sii8620_irq_edid(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR9); + + sii8620_write(ctx, REG_INTR9, stat); + + if (stat & BIT_INTR9_DEVCAP_DONE) + ctx->mt_state = MT_STATE_DONE; +} + +static void sii8620_scdt_high(struct sii8620 *ctx) +{ + sii8620_write_seq_static(ctx, + REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, + REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, + ); +} + +static void sii8620_scdt_low(struct sii8620 *ctx) +{ + sii8620_write(ctx, REG_TMDS_CSTAT_P3, + BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | + BIT_TMDS_CSTAT_P3_CLR_AVI); + + sii8620_stop_video(ctx); + + sii8620_write(ctx, REG_INTR8_MASK, 0); +} + +static void sii8620_irq_scdt(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR5); + + if (stat & BIT_INTR_SCDT_CHANGE) { + u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); + + if (cstat & BIT_TMDS_CSTAT_P3_SCDT) + sii8620_scdt_high(ctx); + else + sii8620_scdt_low(ctx); + } + + sii8620_write(ctx, REG_INTR5, stat); +} + +static void sii8620_new_vsi(struct sii8620 *ctx) +{ + u8 vsif[11]; + + sii8620_write(ctx, REG_RX_HDMI_CTRL2, + VAL_RX_HDMI_CTRL2_DEFVAL | + BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); + sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, + ARRAY_SIZE(vsif)); +} + +static void sii8620_new_avi(struct sii8620 *ctx) +{ + sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); + sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, + ARRAY_SIZE(ctx->avif)); +} + +static void sii8620_irq_infr(struct sii8620 *ctx) +{ + u8 stat = sii8620_readb(ctx, REG_INTR8) + & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); + + sii8620_write(ctx, REG_INTR8, stat); + + if (stat & BIT_CEA_NEW_VSI) + sii8620_new_vsi(ctx); + + if (stat & BIT_CEA_NEW_AVI) + sii8620_new_avi(ctx); + + if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) + sii8620_start_video(ctx); +} + +/* endian agnostic, non-volatile version of test_bit */ +static bool sii8620_test_bit(unsigned int nr, const u8 *addr) +{ + return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE)); +} + +static irqreturn_t sii8620_irq_thread(int irq, void *data) +{ + static const struct { + int bit; + void (*handler)(struct sii8620 *ctx); + } irq_vec[] = { + { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc }, + { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb }, + { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc }, + { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc }, + { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr }, + { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, + { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, + { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, + }; + struct sii8620 *ctx = data; + u8 stats[LEN_FAST_INTR_STAT]; + int i, ret; + + mutex_lock(&ctx->lock); + + sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats)); + for (i = 0; i < ARRAY_SIZE(irq_vec); ++i) + if (sii8620_test_bit(irq_vec[i].bit, stats)) + irq_vec[i].handler(ctx); + + sii8620_mt_work(ctx); + + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret); + sii8620_mhl_disconnected(ctx); + } + mutex_unlock(&ctx->lock); + + return IRQ_HANDLED; +} + +static void sii8620_cable_in(struct sii8620 *ctx) +{ + struct device *dev = ctx->dev; + u8 ver[5]; + int ret; + + ret = sii8620_hw_on(ctx); + if (ret) { + dev_err(dev, "Error powering on, %d.\n", ret); + return; + } + sii8620_hw_reset(ctx); + + sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(dev, "Error accessing I2C bus, %d.\n", ret); + return; + } + + dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0], + ver[3], ver[2], ver[4]); + + sii8620_write(ctx, REG_DPD, + BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN); + + sii8620_xtal_set_rate(ctx); + sii8620_disconnect(ctx); + + sii8620_write_seq_static(ctx, + REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG + | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734, + REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM, + REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, + ); + + ret = sii8620_clear_error(ctx); + if (ret) { + dev_err(dev, "Error accessing I2C bus, %d.\n", ret); + return; + } + + enable_irq(to_i2c_client(ctx->dev)->irq); +} + +static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sii8620, bridge); +} + +static bool sii8620_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sii8620 *ctx = bridge_to_sii8620(bridge); + bool ret = false; + int max_clock = 74250; + + mutex_lock(&ctx->lock); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + goto out; + + if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) + max_clock = 300000; + + ret = mode->clock <= max_clock; + +out: + mutex_unlock(&ctx->lock); + + return ret; +} + +static const struct drm_bridge_funcs sii8620_bridge_funcs = { + .mode_fixup = sii8620_mode_fixup, +}; + +static int sii8620_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct sii8620 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + mutex_init(&ctx->lock); + INIT_LIST_HEAD(&ctx->mt_queue); + + ctx->clk_xtal = devm_clk_get(dev, "xtal"); + if (IS_ERR(ctx->clk_xtal)) { + dev_err(dev, "failed to get xtal clock from DT\n"); + return PTR_ERR(ctx->clk_xtal); + } + + if (!client->irq) { + dev_err(dev, "no irq provided\n"); + return -EINVAL; + } + irq_set_status_flags(client->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(dev, client->irq, NULL, + sii8620_irq_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "sii8620", ctx); + + ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->gpio_reset)) { + dev_err(dev, "failed to get reset gpio from DT\n"); + return PTR_ERR(ctx->gpio_reset); + } + + ctx->supplies[0].supply = "cvcc10"; + ctx->supplies[1].supply = "iovcc18"; + ret = devm_regulator_bulk_get(dev, 2, ctx->supplies); + if (ret) + return ret; + + i2c_set_clientdata(client, ctx); + + ctx->bridge.funcs = &sii8620_bridge_funcs; + ctx->bridge.of_node = dev->of_node; + drm_bridge_add(&ctx->bridge); + + sii8620_cable_in(ctx); + + return 0; +} + +static int sii8620_remove(struct i2c_client *client) +{ + struct sii8620 *ctx = i2c_get_clientdata(client); + + disable_irq(to_i2c_client(ctx->dev)->irq); + drm_bridge_remove(&ctx->bridge); + sii8620_hw_off(ctx); + + return 0; +} + +static const struct of_device_id sii8620_dt_match[] = { + { .compatible = "sil,sii8620" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sii8620_dt_match); + +static const struct i2c_device_id sii8620_id[] = { + { "sii8620", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, sii8620_id); +static struct i2c_driver sii8620_driver = { + .driver = { + .name = "sii8620", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(sii8620_dt_match), + }, + .probe = sii8620_probe, + .remove = sii8620_remove, + .id_table = sii8620_id, +}; + +module_i2c_driver(sii8620_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h new file mode 100644 index 000000000000..6ff616a4f6ce --- /dev/null +++ b/drivers/gpu/drm/bridge/sil-sii8620.h @@ -0,0 +1,1517 @@ +/* + * Registers of Silicon Image SiI8620 Mobile HD Transmitter + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Andrzej Hajda + * + * Based on MHL driver for Android devices. + * Copyright (C) 2013-2014 Silicon Image, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SIL_SII8620_H__ +#define __SIL_SII8620_H__ + +/* Vendor ID Low byte, default value: 0x01 */ +#define REG_VND_IDL 0x0000 + +/* Vendor ID High byte, default value: 0x00 */ +#define REG_VND_IDH 0x0001 + +/* Device ID Low byte, default value: 0x60 */ +#define REG_DEV_IDL 0x0002 + +/* Device ID High byte, default value: 0x86 */ +#define REG_DEV_IDH 0x0003 + +/* Device Revision, default value: 0x10 */ +#define REG_DEV_REV 0x0004 + +/* OTP DBYTE510, default value: 0x00 */ +#define REG_OTP_DBYTE510 0x0006 + +/* System Control #1, default value: 0x00 */ +#define REG_SYS_CTRL1 0x0008 +#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7) +#define BIT_SYS_CTRL1_VSYNCPIN BIT(6) +#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5) +#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4) +#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3) +#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2) +#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1) +#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0) + +/* System Control DPD, default value: 0x90 */ +#define REG_DPD 0x000b +#define BIT_DPD_PWRON_PLL BIT(7) +#define BIT_DPD_PDNTX12 BIT(6) +#define BIT_DPD_PDNRX12 BIT(5) +#define BIT_DPD_OSC_EN BIT(4) +#define BIT_DPD_PWRON_HSIC BIT(3) +#define BIT_DPD_PDIDCK_N BIT(2) +#define BIT_DPD_PD_MHL_CLK_N BIT(1) + +/* Dual link Control, default value: 0x00 */ +#define REG_DCTL 0x000d +#define BIT_DCTL_TDM_LCLK_PHASE BIT(7) +#define BIT_DCTL_HSIC_CLK_PHASE BIT(6) +#define BIT_DCTL_CTS_TCK_PHASE BIT(5) +#define BIT_DCTL_EXT_DDC_SEL BIT(4) +#define BIT_DCTL_TRANSCODE BIT(3) +#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2) +#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1) +#define BIT_DCTL_TCLKNX_PHASE BIT(0) + +/* PWD Software Reset, default value: 0x20 */ +#define REG_PWD_SRST 0x000e +#define BIT_PWD_SRST_COC_DOC_RST BIT(7) +#define BIT_PWD_SRST_CBUS_RST_SW BIT(6) +#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5) +#define BIT_PWD_SRST_MHLFIFO_RST BIT(4) +#define BIT_PWD_SRST_CBUS_RST BIT(3) +#define BIT_PWD_SRST_SW_RST_AUTO BIT(2) +#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1) +#define BIT_PWD_SRST_SW_RST BIT(0) + +/* AKSV_1, default value: 0x00 */ +#define REG_AKSV_1 0x001d + +/* Video H Resolution #1, default value: 0x00 */ +#define REG_H_RESL 0x003a + +/* Video Mode, default value: 0x00 */ +#define REG_VID_MODE 0x004a +#define BIT_VID_MODE_M1080P BIT(6) + +/* Video Input Mode, default value: 0xc0 */ +#define REG_VID_OVRRD 0x0051 +#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7) +#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6) +#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5) +#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4) +#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3) +#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2) +#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0) + +/* I2C Address reassignment, default value: 0x00 */ +#define REG_PAGE_MHLSPEC_ADDR 0x0057 +#define REG_PAGE7_ADDR 0x0058 +#define REG_PAGE8_ADDR 0x005c + +/* Fast Interrupt Status, default value: 0x00 */ +#define REG_FAST_INTR_STAT 0x005f +#define LEN_FAST_INTR_STAT 7 +#define BIT_FAST_INTR_STAT_TIMR 8 +#define BIT_FAST_INTR_STAT_INT2 9 +#define BIT_FAST_INTR_STAT_DDC 10 +#define BIT_FAST_INTR_STAT_SCDT 11 +#define BIT_FAST_INTR_STAT_INFR 13 +#define BIT_FAST_INTR_STAT_EDID 14 +#define BIT_FAST_INTR_STAT_HDCP 15 +#define BIT_FAST_INTR_STAT_MSC 16 +#define BIT_FAST_INTR_STAT_MERR 17 +#define BIT_FAST_INTR_STAT_G2WB 18 +#define BIT_FAST_INTR_STAT_G2WB_ERR 19 +#define BIT_FAST_INTR_STAT_DISC 28 +#define BIT_FAST_INTR_STAT_BLOCK 30 +#define BIT_FAST_INTR_STAT_LTRN 31 +#define BIT_FAST_INTR_STAT_HDCP2 32 +#define BIT_FAST_INTR_STAT_TDM 42 +#define BIT_FAST_INTR_STAT_COC 51 + +/* GPIO Control, default value: 0x15 */ +#define REG_GPIO_CTRL1 0x006e +#define BIT_CTRL1_GPIO_I_8 BIT(5) +#define BIT_CTRL1_GPIO_OEN_8 BIT(4) +#define BIT_CTRL1_GPIO_I_7 BIT(3) +#define BIT_CTRL1_GPIO_OEN_7 BIT(2) +#define BIT_CTRL1_GPIO_I_6 BIT(1) +#define BIT_CTRL1_GPIO_OEN_6 BIT(0) + +/* Interrupt Control, default value: 0x06 */ +#define REG_INT_CTRL 0x006f +#define BIT_INT_CTRL_SOFTWARE_WP BIT(7) +#define BIT_INT_CTRL_INTR_OD BIT(2) +#define BIT_INT_CTRL_INTR_POLARITY BIT(1) + +/* Interrupt State, default value: 0x00 */ +#define REG_INTR_STATE 0x0070 +#define BIT_INTR_STATE_INTR_STATE BIT(0) + +/* Interrupt Source #1, default value: 0x00 */ +#define REG_INTR1 0x0071 + +/* Interrupt Source #2, default value: 0x00 */ +#define REG_INTR2 0x0072 + +/* Interrupt Source #3, default value: 0x01 */ +#define REG_INTR3 0x0073 +#define BIT_DDC_CMD_DONE BIT(3) + +/* Interrupt Source #5, default value: 0x00 */ +#define REG_INTR5 0x0074 + +/* Interrupt #1 Mask, default value: 0x00 */ +#define REG_INTR1_MASK 0x0075 + +/* Interrupt #2 Mask, default value: 0x00 */ +#define REG_INTR2_MASK 0x0076 + +/* Interrupt #3 Mask, default value: 0x00 */ +#define REG_INTR3_MASK 0x0077 + +/* Interrupt #5 Mask, default value: 0x00 */ +#define REG_INTR5_MASK 0x0078 +#define BIT_INTR_SCDT_CHANGE BIT(0) + +/* Hot Plug Connection Control, default value: 0x45 */ +#define REG_HPD_CTRL 0x0079 +#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7) +#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6) +#define BIT_HPD_CTRL_HPD_HIGH BIT(5) +#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4) +#define BIT_HPD_CTRL_GPIO_I_1 BIT(3) +#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2) +#define BIT_HPD_CTRL_GPIO_I_0 BIT(1) +#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0) + +/* GPIO Control, default value: 0x55 */ +#define REG_GPIO_CTRL 0x007a +#define BIT_CTRL_GPIO_I_5 BIT(7) +#define BIT_CTRL_GPIO_OEN_5 BIT(6) +#define BIT_CTRL_GPIO_I_4 BIT(5) +#define BIT_CTRL_GPIO_OEN_4 BIT(4) +#define BIT_CTRL_GPIO_I_3 BIT(3) +#define BIT_CTRL_GPIO_OEN_3 BIT(2) +#define BIT_CTRL_GPIO_I_2 BIT(1) +#define BIT_CTRL_GPIO_OEN_2 BIT(0) + +/* Interrupt Source 7, default value: 0x00 */ +#define REG_INTR7 0x007b + +/* Interrupt Source 8, default value: 0x00 */ +#define REG_INTR8 0x007c + +/* Interrupt #7 Mask, default value: 0x00 */ +#define REG_INTR7_MASK 0x007d + +/* Interrupt #8 Mask, default value: 0x00 */ +#define REG_INTR8_MASK 0x007e +#define BIT_CEA_NEW_VSI BIT(2) +#define BIT_CEA_NEW_AVI BIT(1) + +/* IEEE, default value: 0x10 */ +#define REG_TMDS_CCTRL 0x0080 +#define BIT_TMDS_CCTRL_TMDS_OE BIT(4) + +/* TMDS Control #4, default value: 0x02 */ +#define REG_TMDS_CTRL4 0x0085 +#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1) +#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0) + +/* BIST CNTL, default value: 0x00 */ +#define REG_BIST_CTRL 0x00bb +#define BIT_RXBIST_VGB_EN BIT(7) +#define BIT_TXBIST_VGB_EN BIT(6) +#define BIT_BIST_START_SEL BIT(5) +#define BIT_BIST_START_BIT BIT(4) +#define BIT_BIST_ALWAYS_ON BIT(3) +#define BIT_BIST_TRANS BIT(2) +#define BIT_BIST_RESET BIT(1) +#define BIT_BIST_EN BIT(0) + +/* BIST DURATION0, default value: 0x00 */ +#define REG_BIST_TEST_SEL 0x00bd +#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f + +/* BIST VIDEO_MODE, default value: 0x00 */ +#define REG_BIST_VIDEO_MODE 0x00be +#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f + +/* BIST DURATION0, default value: 0x00 */ +#define REG_BIST_DURATION_0 0x00bf + +/* BIST DURATION1, default value: 0x00 */ +#define REG_BIST_DURATION_1 0x00c0 + +/* BIST DURATION2, default value: 0x00 */ +#define REG_BIST_DURATION_2 0x00c1 + +/* BIST 8BIT_PATTERN, default value: 0x00 */ +#define REG_BIST_8BIT_PATTERN 0x00c2 + +/* LM DDC, default value: 0x80 */ +#define REG_LM_DDC 0x00c7 +#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7) + +#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5) +#define BIT_LM_DDC_DDC_TPI_SW BIT(2) +#define BIT_LM_DDC_DDC_GRANT BIT(1) +#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0) + +/* DDC I2C Manual, default value: 0x03 */ +#define REG_DDC_MANUAL 0x00ec +#define BIT_DDC_MANUAL_MAN_DDC BIT(7) +#define BIT_DDC_MANUAL_VP_SEL BIT(6) +#define BIT_DDC_MANUAL_DSDA BIT(5) +#define BIT_DDC_MANUAL_DSCL BIT(4) +#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3) +#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2) +#define BIT_DDC_MANUAL_IO_DSDA BIT(1) +#define BIT_DDC_MANUAL_IO_DSCL BIT(0) + +/* DDC I2C Target Slave Address, default value: 0x00 */ +#define REG_DDC_ADDR 0x00ed +#define MSK_DDC_ADDR_DDC_ADDR 0xfe + +/* DDC I2C Target Segment Address, default value: 0x00 */ +#define REG_DDC_SEGM 0x00ee + +/* DDC I2C Target Offset Address, default value: 0x00 */ +#define REG_DDC_OFFSET 0x00ef + +/* DDC I2C Data In count #1, default value: 0x00 */ +#define REG_DDC_DIN_CNT1 0x00f0 + +/* DDC I2C Data In count #2, default value: 0x00 */ +#define REG_DDC_DIN_CNT2 0x00f1 +#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03 + +/* DDC I2C Status, default value: 0x04 */ +#define REG_DDC_STATUS 0x00f2 +#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6) +#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5) +#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4) +#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3) +#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2) +#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1) +#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0) + +/* DDC I2C Command, default value: 0x70 */ +#define REG_DDC_CMD 0x00f3 +#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6) +#define BIT_DDC_CMD_SDA_DEL_EN BIT(5) +#define BIT_DDC_CMD_DDC_FLT_EN BIT(4) + +#define MSK_DDC_CMD_DDC_CMD 0x0f +#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04 +#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09 +#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f + +/* DDC I2C FIFO Data In/Out, default value: 0x00 */ +#define REG_DDC_DATA 0x00f4 + +/* DDC I2C Data Out Counter, default value: 0x00 */ +#define REG_DDC_DOUT_CNT 0x00f5 +#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7) +#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f + +/* DDC I2C Delay Count, default value: 0x14 */ +#define REG_DDC_DELAY_CNT 0x00f6 + +/* Test Control, default value: 0x80 */ +#define REG_TEST_TXCTRL 0x00f7 +#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7) +#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6) +#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c +#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1) +#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0) + +/* CBUS Address, default value: 0x00 */ +#define REG_PAGE_CBUS_ADDR 0x00f8 + +/* I2C Device Address re-assignment */ +#define REG_PAGE1_ADDR 0x00fc +#define REG_PAGE2_ADDR 0x00fd +#define REG_PAGE3_ADDR 0x00fe +#define REG_HW_TPI_ADDR 0x00ff + +/* USBT CTRL0, default value: 0x00 */ +#define REG_UTSRST 0x0100 +#define BIT_UTSRST_FC_SRST BIT(5) +#define BIT_UTSRST_KEEPER_SRST BIT(4) +#define BIT_UTSRST_HTX_SRST BIT(3) +#define BIT_UTSRST_TRX_SRST BIT(2) +#define BIT_UTSRST_TTX_SRST BIT(1) +#define BIT_UTSRST_HRX_SRST BIT(0) + +/* HSIC RX Control3, default value: 0x07 */ +#define REG_HRXCTRL3 0x0104 +#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0 +#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2) +#define BIT_HRXCTRL3_STATUS_EN BIT(1) +#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0) + +/* HSIC RX INT Registers */ +#define REG_HRXINTL 0x0111 +#define REG_HRXINTH 0x0112 + +/* TDM TX NUMBITS, default value: 0x0c */ +#define REG_TTXNUMB 0x0116 +#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0 +#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3) +#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07 + +/* TDM TX NUMSPISYM, default value: 0x04 */ +#define REG_TTXSPINUMS 0x0117 + +/* TDM TX NUMHSICSYM, default value: 0x14 */ +#define REG_TTXHSICNUMS 0x0118 + +/* TDM TX NUMTOTSYM, default value: 0x18 */ +#define REG_TTXTOTNUMS 0x0119 + +/* TDM TX INT Low, default value: 0x00 */ +#define REG_TTXINTL 0x0136 +#define BIT_TTXINTL_TTX_INTR7 BIT(7) +#define BIT_TTXINTL_TTX_INTR6 BIT(6) +#define BIT_TTXINTL_TTX_INTR5 BIT(5) +#define BIT_TTXINTL_TTX_INTR4 BIT(4) +#define BIT_TTXINTL_TTX_INTR3 BIT(3) +#define BIT_TTXINTL_TTX_INTR2 BIT(2) +#define BIT_TTXINTL_TTX_INTR1 BIT(1) +#define BIT_TTXINTL_TTX_INTR0 BIT(0) + +/* TDM TX INT High, default value: 0x00 */ +#define REG_TTXINTH 0x0137 +#define BIT_TTXINTH_TTX_INTR15 BIT(7) +#define BIT_TTXINTH_TTX_INTR14 BIT(6) +#define BIT_TTXINTH_TTX_INTR13 BIT(5) +#define BIT_TTXINTH_TTX_INTR12 BIT(4) +#define BIT_TTXINTH_TTX_INTR11 BIT(3) +#define BIT_TTXINTH_TTX_INTR10 BIT(2) +#define BIT_TTXINTH_TTX_INTR9 BIT(1) +#define BIT_TTXINTH_TTX_INTR8 BIT(0) + +/* TDM RX Control, default value: 0x1c */ +#define REG_TRXCTRL 0x013b +#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4) +#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3) +#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07 + +/* TDM RX NUMSPISYM, default value: 0x04 */ +#define REG_TRXSPINUMS 0x013c + +/* TDM RX NUMHSICSYM, default value: 0x14 */ +#define REG_TRXHSICNUMS 0x013d + +/* TDM RX NUMTOTSYM, default value: 0x18 */ +#define REG_TRXTOTNUMS 0x013e + +/* TDM RX Status 2nd, default value: 0x00 */ +#define REG_TRXSTA2 0x015c + +/* TDM RX INT Low, default value: 0x00 */ +#define REG_TRXINTL 0x0163 + +/* TDM RX INT High, default value: 0x00 */ +#define REG_TRXINTH 0x0164 + +/* TDM RX INTMASK High, default value: 0x00 */ +#define REG_TRXINTMH 0x0166 + +/* HSIC TX CRTL, default value: 0x00 */ +#define REG_HTXCTRL 0x0169 +#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4) +#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3) +#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2) +#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1) +#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0) + +/* HSIC TX INT Low, default value: 0x00 */ +#define REG_HTXINTL 0x017d + +/* HSIC TX INT High, default value: 0x00 */ +#define REG_HTXINTH 0x017e + +/* HSIC Keeper, default value: 0x00 */ +#define REG_KEEPER 0x0181 +#define MSK_KEEPER_KEEPER_MODE_1_0 0x03 + +/* HSIC Flow Control General, default value: 0x02 */ +#define REG_FCGC 0x0183 +#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1) +#define BIT_FCGC_HSIC_FC_ENABLE BIT(0) + +/* HSIC Flow Control CTR13, default value: 0xfc */ +#define REG_FCCTR13 0x0191 + +/* HSIC Flow Control CTR14, default value: 0xff */ +#define REG_FCCTR14 0x0192 + +/* HSIC Flow Control CTR15, default value: 0xff */ +#define REG_FCCTR15 0x0193 + +/* HSIC Flow Control CTR50, default value: 0x03 */ +#define REG_FCCTR50 0x01b6 + +/* HSIC Flow Control INTR0, default value: 0x00 */ +#define REG_FCINTR0 0x01ec +#define REG_FCINTR1 0x01ed +#define REG_FCINTR2 0x01ee +#define REG_FCINTR3 0x01ef +#define REG_FCINTR4 0x01f0 +#define REG_FCINTR5 0x01f1 +#define REG_FCINTR6 0x01f2 +#define REG_FCINTR7 0x01f3 + +/* TDM Low Latency, default value: 0x20 */ +#define REG_TDMLLCTL 0x01fc +#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0 +#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30 +#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c +#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1) +#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0) + +/* TMDS 0 Clock Control, default value: 0x10 */ +#define REG_TMDS0_CCTRL1 0x0210 +#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0 +#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30 + +/* TMDS Clock Enable, default value: 0x00 */ +#define REG_TMDS_CLK_EN 0x0211 +#define BIT_TMDS_CLK_EN_CLK_EN BIT(0) + +/* TMDS Channel Enable, default value: 0x00 */ +#define REG_TMDS_CH_EN 0x0212 +#define BIT_TMDS_CH_EN_CH0_EN BIT(4) +#define BIT_TMDS_CH_EN_CH12_EN BIT(0) + +/* BGR_BIAS, default value: 0x07 */ +#define REG_BGR_BIAS 0x0215 +#define BIT_BGR_BIAS_BGR_EN BIT(7) +#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f + +/* TMDS 0 Digital I2C BW, default value: 0x0a */ +#define REG_ALICE0_BW_I2C 0x0231 + +/* TMDS 0 Digital Zone Control, default value: 0xe0 */ +#define REG_ALICE0_ZONE_CTRL 0x024c +#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7) +#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6) +#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30 +#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f + +/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */ +#define REG_ALICE0_MODE_CTRL 0x024d +#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c +#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03 + +/* MHL Tx Control 6th, default value: 0xa0 */ +#define REG_MHLTX_CTL6 0x0285 +#define MSK_MHLTX_CTL6_EMI_SEL 0xe0 +#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03 + +/* Packet Filter0, default value: 0x00 */ +#define REG_PKT_FILTER_0 0x0290 +#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7) +#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6) +#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5) +#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4) +#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3) +#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2) +#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1) +#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0) + +/* Packet Filter1, default value: 0x00 */ +#define REG_PKT_FILTER_1 0x0291 +#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7) +#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6) +#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3) +#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2) +#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1) +#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0) + +/* TMDS Clock Status, default value: 0x10 */ +#define REG_TMDS_CSTAT_P3 0x02a0 +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7) +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6) +#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5) +#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3) +#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2) +#define BIT_TMDS_CSTAT_P3_SCDT BIT(1) +#define BIT_TMDS_CSTAT_P3_CKDT BIT(0) + +/* RX_HDMI Control, default value: 0x10 */ +#define REG_RX_HDMI_CTRL0 0x02a1 +#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3) +#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2) +#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1) +#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0) + +/* RX_HDMI Control, default value: 0x38 */ +#define REG_RX_HDMI_CTRL2 0x02a3 +#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0 +#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4) +#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3) +#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0) + +/* RX_HDMI Control, default value: 0x0f */ +#define REG_RX_HDMI_CTRL3 0x02a4 +#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f + +/* rx_hdmi Clear Buffer, default value: 0x00 */ +#define REG_RX_HDMI_CLR_BUFFER 0x02ac +#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0 +#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3) +#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2) +#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1) +#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0) + +/* RX_HDMI VSI Header1, default value: 0x00 */ +#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8 + +/* RX_HDMI VSI MHL Monitor, default value: 0x3c */ +#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7 + +#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c +#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03 + +/* Interrupt Source 9, default value: 0x00 */ +#define REG_INTR9 0x02e0 +#define BIT_INTR9_EDID_ERROR BIT(6) +#define BIT_INTR9_EDID_DONE BIT(5) +#define BIT_INTR9_DEVCAP_DONE BIT(4) + +/* Interrupt 9 Mask, default value: 0x00 */ +#define REG_INTR9_MASK 0x02e1 + +/* TPI CBUS Start, default value: 0x00 */ +#define REG_TPI_CBUS_START 0x02e2 +#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7) +#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6) +#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5) +#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4) +#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3) +#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2) +#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1) +#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0) + +/* EDID Control, default value: 0x10 */ +#define REG_EDID_CTRL 0x02e3 +#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7) +#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6) +#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5) +#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4) +#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3) +#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2) +#define BIT_EDID_CTRL_INVALID_BKSV BIT(1) +#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0) + +/* EDID FIFO Addr, default value: 0x00 */ +#define REG_EDID_FIFO_ADDR 0x02e9 + +/* EDID FIFO Write Data, default value: 0x00 */ +#define REG_EDID_FIFO_WR_DATA 0x02ea + +/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */ +#define REG_EDID_FIFO_ADDR_MON 0x02eb + +/* EDID FIFO Read Data, default value: 0x00 */ +#define REG_EDID_FIFO_RD_DATA 0x02ec + +/* EDID DDC Segment Pointer, default value: 0x00 */ +#define REG_EDID_START_EXT 0x02ed + +/* TX IP BIST CNTL and Status, default value: 0x00 */ +#define REG_TX_IP_BIST_CNTLSTA 0x02f2 +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3) +#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1) +#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0) + +/* TX IP BIST INST LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_INST_LOW 0x02f3 +#define REG_TX_IP_BIST_INST_HIGH 0x02f4 + +/* TX IP BIST PATTERN LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_PAT_LOW 0x02f5 +#define REG_TX_IP_BIST_PAT_HIGH 0x02f6 + +/* TX IP BIST CONFIGURE LOW, default value: 0x00 */ +#define REG_TX_IP_BIST_CONF_LOW 0x02f7 +#define REG_TX_IP_BIST_CONF_HIGH 0x02f8 + +/* E-MSC General Control, default value: 0x80 */ +#define REG_GENCTL 0x0300 +#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7) +#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6) +#define BIT_GENCTL_SPI_MISO_EDGE BIT(5) +#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4) +#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3) +#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2) +#define BIT_GENCTL_START_TRAIN_SEQ BIT(1) +#define BIT_GENCTL_EMSC_EN BIT(0) + +/* E-MSC Comma ErrorCNT, default value: 0x03 */ +#define REG_COMMECNT 0x0305 +#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7) +#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f + +/* E-MSC RFIFO ByteCnt, default value: 0x00 */ +#define REG_EMSCRFIFOBCNTL 0x031a +#define REG_EMSCRFIFOBCNTH 0x031b + +/* SPI Burst Cnt Status, default value: 0x00 */ +#define REG_SPIBURSTCNT 0x031e + +/* SPI Burst Status and SWRST, default value: 0x00 */ +#define REG_SPIBURSTSTAT 0x0322 +#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7) +#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6) +#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5) +#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0) + +/* E-MSC 1st Interrupt, default value: 0x00 */ +#define REG_EMSCINTR 0x0323 +#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7) +#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6) +#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5) +#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4) +#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3) +#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2) +#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1) +#define BIT_EMSCINTR_SPI_DVLD BIT(0) + +/* E-MSC Interrupt Mask, default value: 0x00 */ +#define REG_EMSCINTRMASK 0x0324 + +/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */ +#define REG_EMSC_XMIT_WRITE_PORT 0x032a + +/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */ +#define REG_EMSC_RCV_READ_PORT 0x032b + +/* E-MSC 2nd Interrupt, default value: 0x00 */ +#define REG_EMSCINTR1 0x032c +#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0) + +/* E-MSC Interrupt Mask, default value: 0x00 */ +#define REG_EMSCINTRMASK1 0x032d +#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0) + +/* MHL Top Ctl, default value: 0x00 */ +#define REG_MHL_TOP_CTL 0x0330 +#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7) +#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6) +#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03 + +/* MHL DataPath 1st Ctl, default value: 0xbc */ +#define REG_MHL_DP_CTL0 0x0331 +#define BIT_MHL_DP_CTL0_DP_OE BIT(7) +#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6) +#define MSK_MHL_DP_CTL0_TX_OE 0x3f + +/* MHL DataPath 2nd Ctl, default value: 0xbb */ +#define REG_MHL_DP_CTL1 0x0332 +#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0 +#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f + +/* MHL DataPath 3rd Ctl, default value: 0x2f */ +#define REG_MHL_DP_CTL2 0x0333 +#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7) +#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30 +#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c +#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03 + +/* MHL DataPath 4th Ctl, default value: 0x48 */ +#define REG_MHL_DP_CTL3 0x0334 +#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0 +#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f + +/* MHL DataPath 5th Ctl, default value: 0x48 */ +#define REG_MHL_DP_CTL4 0x0335 +#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0 +#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f + +/* MHL DataPath 6th Ctl, default value: 0x3f */ +#define REG_MHL_DP_CTL5 0x0336 +#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7) +#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6) +#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30 +#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c +#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03 + +/* MHL PLL 1st Ctl, default value: 0x05 */ +#define REG_MHL_PLL_CTL0 0x0337 +#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7) + +#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10 +#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00 + +#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08 +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04 +#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00 + +#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1) +#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0) + +/* MHL PLL 3rd Ctl, default value: 0x80 */ +#define REG_MHL_PLL_CTL2 0x0339 +#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7) +#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3) +#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2) +#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03 + +/* MHL CBUS 1st Ctl, default value: 0x12 */ +#define REG_MHL_CBUS_CTL0 0x0340 +#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7) + +#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20 +#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30 + +#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c + +#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02 +#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03 + +/* MHL CBUS 2nd Ctl, default value: 0x03 */ +#define REG_MHL_CBUS_CTL1 0x0341 +#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07 +#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00 +#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04 +#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07 + +/* MHL CoC 1st Ctl, default value: 0xc3 */ +#define REG_MHL_COC_CTL0 0x0342 +#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7) +#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70 +#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07 + +/* MHL CoC 2nd Ctl, default value: 0x87 */ +#define REG_MHL_COC_CTL1 0x0343 +#define BIT_MHL_COC_CTL1_COC_EN BIT(7) +#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f + +/* MHL CoC 4th Ctl, default value: 0x00 */ +#define REG_MHL_COC_CTL3 0x0345 +#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0) + +/* MHL CoC 5th Ctl, default value: 0x28 */ +#define REG_MHL_COC_CTL4 0x0346 +#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0 +#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f + +/* MHL CoC 6th Ctl, default value: 0x0d */ +#define REG_MHL_COC_CTL5 0x0347 + +/* MHL DoC 1st Ctl, default value: 0x18 */ +#define REG_MHL_DOC_CTL0 0x0349 +#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7) +#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38 +#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06 +#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0) + +/* MHL DataPath 7th Ctl, default value: 0x2a */ +#define REG_MHL_DP_CTL6 0x0350 +#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5) +#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4) +#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3) +#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2) +#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1) +#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0) + +/* MHL DataPath 8th Ctl, default value: 0x06 */ +#define REG_MHL_DP_CTL7 0x0351 +#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0 +#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f + +/* Tx Zone Ctl1, default value: 0x00 */ +#define REG_TX_ZONE_CTL1 0x0361 +#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08 + +/* MHL3 Tx Zone Ctl, default value: 0x00 */ +#define REG_MHL3_TX_ZONE_CTL 0x0364 +#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7) +#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03 + +#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03 +#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00 +#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01 +#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02 + +/* HDCP Polling Control and Status, default value: 0x70 */ +#define REG_HDCP2X_POLL_CS 0x0391 + +#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6) +#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5) +#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4) +#define MSK_HDCP2X_POLL_CS_ 0x0c +#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1) +#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0) + +/* HDCP Interrupt 0, default value: 0x00 */ +#define REG_HDCP2X_INTR0 0x0398 + +/* HDCP Interrupt 0 Mask, default value: 0x00 */ +#define REG_HDCP2X_INTR0_MASK 0x0399 + +/* HDCP General Control 0, default value: 0x02 */ +#define REG_HDCP2X_CTRL_0 0x03a0 +#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7) +#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6) +#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5) +#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4) +#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3) +#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2) +#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1) +#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0) + +/* HDCP General Control 1, default value: 0x08 */ +#define REG_HDCP2X_CTRL_1 0x03a1 +#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0 +#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3) +#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2) +#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1) +#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0) + +/* HDCP Misc Control, default value: 0x00 */ +#define REG_HDCP2X_MISC_CTRL 0x03a5 +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1) +#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0) + +/* HDCP RPT SMNG K, default value: 0x00 */ +#define REG_HDCP2X_RPT_SMNG_K 0x03a6 + +/* HDCP RPT SMNG In, default value: 0x00 */ +#define REG_HDCP2X_RPT_SMNG_IN 0x03a7 + +/* HDCP Auth Status, default value: 0x00 */ +#define REG_HDCP2X_AUTH_STAT 0x03aa + +/* HDCP RPT RCVID Out, default value: 0x00 */ +#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac + +/* HDCP TP1, default value: 0x62 */ +#define REG_HDCP2X_TP1 0x03b4 + +/* HDCP GP Out 0, default value: 0x00 */ +#define REG_HDCP2X_GP_OUT0 0x03c7 + +/* HDCP Repeater RCVR ID 0, default value: 0x00 */ +#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1 + +/* HDCP DDCM Status, default value: 0x00 */ +#define REG_HDCP2X_DDCM_STS 0x03d8 +#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0 +#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f + +/* HDMI2MHL3 Control, default value: 0x0a */ +#define REG_M3_CTRL 0x03e0 +#define BIT_M3_CTRL_H2M_SWRST BIT(4) +#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3) +#define BIT_M3_CTRL_M3AV_EN BIT(2) +#define BIT_M3_CTRL_ENC_TMDS BIT(1) +#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0) + +#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ + | BIT_M3_CTRL_ENC_TMDS) +#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \ + | BIT_M3_CTRL_M3AV_EN \ + | BIT_M3_CTRL_ENC_TMDS \ + | BIT_M3_CTRL_MHL3_MASTER_EN) + +/* HDMI2MHL3 Port0 Control, default value: 0x04 */ +#define REG_M3_P0CTRL 0x03e1 +#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4) +#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3) +#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2) +#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1) +#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0) + +#define REG_M3_POSTM 0x03e2 +#define MSK_M3_POSTM_RRP_DECODE 0xf8 +#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07 + +/* HDMI2MHL3 Scramble Control, default value: 0x41 */ +#define REG_M3_SCTRL 0x03e6 +#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0 +#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0) + +/* HSIC Div Ctl, default value: 0x05 */ +#define REG_DIV_CTL_MAIN 0x03f2 +#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c +#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03 + +/* MHL Capability 1st Byte, default value: 0x00 */ +#define REG_MHL_DEVCAP_0 0x0400 + +/* MHL Interrupt 1st Byte, default value: 0x00 */ +#define REG_MHL_INT_0 0x0420 + +/* Device Status 1st byte, default value: 0x00 */ +#define REG_MHL_STAT_0 0x0430 + +/* CBUS Scratch Pad 1st Byte, default value: 0x00 */ +#define REG_MHL_SCRPAD_0 0x0440 + +/* MHL Extended Capability 1st Byte, default value: 0x00 */ +#define REG_MHL_EXTDEVCAP_0 0x0480 + +/* Device Extended Status 1st byte, default value: 0x00 */ +#define REG_MHL_EXTSTAT_0 0x0490 + +/* TPI DTD Byte2, default value: 0x00 */ +#define REG_TPI_DTD_B2 0x0602 + +#define VAL_TPI_QUAN_RANGE_LIMITED 0x01 +#define VAL_TPI_QUAN_RANGE_FULL 0x02 +#define VAL_TPI_FORMAT_RGB 0x00 +#define VAL_TPI_FORMAT_YCBCR444 0x01 +#define VAL_TPI_FORMAT_YCBCR422 0x02 +#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03 +#define VAL_TPI_FORMAT(_fmt, _qr) \ + (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2)) + +/* Input Format, default value: 0x00 */ +#define REG_TPI_INPUT 0x0609 +#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7) +#define BIT_TPI_INPUT_ENDITHER BIT(6) +#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c +#define MSK_TPI_INPUT_INPUT_FORMAT 0x03 + +/* Output Format, default value: 0x00 */ +#define REG_TPI_OUTPUT 0x060a +#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4) +#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c +#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03 + +/* TPI AVI Check Sum, default value: 0x00 */ +#define REG_TPI_AVI_CHSUM 0x060c + +/* TPI System Control, default value: 0x00 */ +#define REG_TPI_SC 0x061a +#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7) +#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6) +#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5) +#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4) +#define BIT_TPI_SC_TPI_AV_MUTE BIT(3) +#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2) +#define BIT_TPI_SC_DDC_TPI_SW BIT(1) +#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0) + +/* TPI COPP Query Data, default value: 0x00 */ +#define REG_TPI_COPP_DATA1 0x0629 +#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7) +#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6) +#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30 +#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00 +#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10 +#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20 +#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30 +#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3) +#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2) +#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1) +#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0) + +/* TPI COPP Control Data, default value: 0x00 */ +#define REG_TPI_COPP_DATA2 0x062a +#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5) +#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4) +#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3) +#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2) +#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1) +#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0) + +/* TPI Interrupt Enable, default value: 0x00 */ +#define REG_TPI_INTR_EN 0x063c + +/* TPI Interrupt Status Low Byte, default value: 0x00 */ +#define REG_TPI_INTR_ST0 0x063d +#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7) +#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6) +#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5) +#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3) +#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2) +#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1) +#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0) + +/* TPI DS BCAPS Status, default value: 0x00 */ +#define REG_TPI_DS_BCAPS 0x0644 + +/* TPI BStatus1, default value: 0x00 */ +#define REG_TPI_BSTATUS1 0x0645 +#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7) +#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f + +/* TPI BStatus2, default value: 0x10 */ +#define REG_TPI_BSTATUS2 0x0646 +#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0 +#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4) +#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3) +#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07 + +/* TPI HW Optimization Control #3, default value: 0x00 */ +#define REG_TPI_HW_OPT3 0x06bb +#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7) +#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3) +#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2) +#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03 + +/* TPI Info Frame Select, default value: 0x00 */ +#define REG_TPI_INFO_FSEL 0x06bf +#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7) +#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6) +#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5) +#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07 + +/* TPI Info Byte #0, default value: 0x00 */ +#define REG_TPI_INFO_B0 0x06c0 + +/* CoC Status, default value: 0x00 */ +#define REG_COC_STAT_0 0x0700 +#define REG_COC_STAT_1 0x0701 +#define REG_COC_STAT_2 0x0702 +#define REG_COC_STAT_3 0x0703 +#define REG_COC_STAT_4 0x0704 +#define REG_COC_STAT_5 0x0705 + +/* CoC 1st Ctl, default value: 0x40 */ +#define REG_COC_CTL0 0x0710 + +/* CoC 2nd Ctl, default value: 0x0a */ +#define REG_COC_CTL1 0x0711 +#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0 +#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f + +/* CoC 3rd Ctl, default value: 0x14 */ +#define REG_COC_CTL2 0x0712 +#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0 +#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f + +/* CoC 4th Ctl, default value: 0x40 */ +#define REG_COC_CTL3 0x0713 +#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7) +#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f + +/* CoC 7th Ctl, default value: 0x00 */ +#define REG_COC_CTL6 0x0716 +#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7) +#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6) +#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f + +/* CoC 8th Ctl, default value: 0x06 */ +#define REG_COC_CTL7 0x0717 +#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7) +#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6) +#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5) +#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18 +#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07 + +/* CoC 10th Ctl, default value: 0x00 */ +#define REG_COC_CTL9 0x0719 + +/* CoC 11th Ctl, default value: 0x00 */ +#define REG_COC_CTLA 0x071a + +/* CoC 12th Ctl, default value: 0x00 */ +#define REG_COC_CTLB 0x071b + +/* CoC 13th Ctl, default value: 0x0f */ +#define REG_COC_CTLC 0x071c + +/* CoC 14th Ctl, default value: 0x0a */ +#define REG_COC_CTLD 0x071d +#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7) +#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f + +/* CoC 15th Ctl, default value: 0x0a */ +#define REG_COC_CTLE 0x071e +#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7) +#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f + +/* CoC 16th Ctl, default value: 0x00 */ +#define REG_COC_CTLF 0x071f +#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8 +#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07 + +/* CoC 18th Ctl, default value: 0x32 */ +#define REG_COC_CTL11 0x0721 +#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0 +#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f + +/* CoC 21st Ctl, default value: 0x00 */ +#define REG_COC_CTL14 0x0724 +#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0 +#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f + +/* CoC 22nd Ctl, default value: 0x00 */ +#define REG_COC_CTL15 0x0725 +#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7) +#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70 +#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f + +/* CoC Interrupt, default value: 0x00 */ +#define REG_COC_INTR 0x0726 + +/* CoC Interrupt Mask, default value: 0x00 */ +#define REG_COC_INTR_MASK 0x0727 +#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0) +#define BIT_COC_CALIBRATION_DONE BIT(1) + +/* CoC Misc Ctl, default value: 0x00 */ +#define REG_COC_MISC_CTL0 0x0728 +#define BIT_COC_MISC_CTL0_FSM_MON BIT(7) + +/* CoC 24th Ctl, default value: 0x00 */ +#define REG_COC_CTL17 0x072a +#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0 +#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f + +/* CoC 25th Ctl, default value: 0x00 */ +#define REG_COC_CTL18 0x072b +#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0 +#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f + +/* CoC 26th Ctl, default value: 0x00 */ +#define REG_COC_CTL19 0x072c +#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0 +#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f + +/* CoC 27th Ctl, default value: 0x00 */ +#define REG_COC_CTL1A 0x072d +#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc +#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03 + +/* DoC 9th Status, default value: 0x00 */ +#define REG_DOC_STAT_8 0x0740 + +/* DoC 10th Status, default value: 0x00 */ +#define REG_DOC_STAT_9 0x0741 + +/* DoC 5th CFG, default value: 0x00 */ +#define REG_DOC_CFG4 0x074e +#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f + +/* DoC 1st Ctl, default value: 0x40 */ +#define REG_DOC_CTL0 0x0751 + +/* DoC 7th Ctl, default value: 0x00 */ +#define REG_DOC_CTL6 0x0757 +#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7) +#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6) +#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30 +#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f + +/* DoC 8th Ctl, default value: 0x00 */ +#define REG_DOC_CTL7 0x0758 +#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7) +#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6) +#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5) +#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18 +#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07 + +/* DoC 9th Ctl, default value: 0x00 */ +#define REG_DOC_CTL8 0x076c +#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7) +#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70 +#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c +#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03 + +/* DoC 10th Ctl, default value: 0x00 */ +#define REG_DOC_CTL9 0x076d + +/* DoC 11th Ctl, default value: 0x00 */ +#define REG_DOC_CTLA 0x076e + +/* DoC 15th Ctl, default value: 0x00 */ +#define REG_DOC_CTLE 0x0772 +#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7) +#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6) +#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30 +#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f + +/* Interrupt Mask 1st, default value: 0x00 */ +#define REG_MHL_INT_0_MASK 0x0580 + +/* Interrupt Mask 2nd, default value: 0x00 */ +#define REG_MHL_INT_1_MASK 0x0581 + +/* Interrupt Mask 3rd, default value: 0x00 */ +#define REG_MHL_INT_2_MASK 0x0582 + +/* Interrupt Mask 4th, default value: 0x00 */ +#define REG_MHL_INT_3_MASK 0x0583 + +/* MDT Receive Time Out, default value: 0x00 */ +#define REG_MDT_RCV_TIMEOUT 0x0584 + +/* MDT Transmit Time Out, default value: 0x00 */ +#define REG_MDT_XMIT_TIMEOUT 0x0585 + +/* MDT Receive Control, default value: 0x00 */ +#define REG_MDT_RCV_CTRL 0x0586 +#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7) +#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4) +#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3) +#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1) +#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0) + +/* MDT Receive Read Port, default value: 0x00 */ +#define REG_MDT_RCV_READ_PORT 0x0587 + +/* MDT Transmit Control, default value: 0x70 */ +#define REG_MDT_XMIT_CTRL 0x0588 +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4) +#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3) +#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2) +#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1) +#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0) + +/* MDT Receive WRITE Port, default value: 0x00 */ +#define REG_MDT_XMIT_WRITE_PORT 0x0589 + +/* MDT RFIFO Status, default value: 0x00 */ +#define REG_MDT_RFIFO_STAT 0x058a +#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0 +#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f + +/* MDT XFIFO Status, default value: 0x80 */ +#define REG_MDT_XFIFO_STAT 0x058b +#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0 +#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4) +#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f + +/* MDT Interrupt 0, default value: 0x0c */ +#define REG_MDT_INT_0 0x058c +#define BIT_MDT_RFIFO_DATA_RDY BIT(0) +#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2) +#define BIT_MDT_XFIFO_EMPTY BIT(3) + +/* MDT Interrupt 0 Mask, default value: 0x00 */ +#define REG_MDT_INT_0_MASK 0x058d + +/* MDT Interrupt 1, default value: 0x00 */ +#define REG_MDT_INT_1 0x058e +#define BIT_MDT_RCV_TIMEOUT BIT(0) +#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1) +#define BIT_MDT_RCV_SM_ERROR BIT(2) +#define BIT_MDT_XMIT_TIMEOUT BIT(5) +#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6) +#define BIT_MDT_XMIT_SM_ERROR BIT(7) + +/* MDT Interrupt 1 Mask, default value: 0x00 */ +#define REG_MDT_INT_1_MASK 0x058f + +/* CBUS Vendor ID, default value: 0x01 */ +#define REG_CBUS_VENDOR_ID 0x0590 + +/* CBUS Connection Status, default value: 0x00 */ +#define REG_CBUS_STATUS 0x0591 +#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4) +#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3) +#define BIT_CBUS_STATUS_CBUS_HPD BIT(2) +#define BIT_CBUS_STATUS_MHL_MODE BIT(1) +#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0) + +/* CBUS Interrupt 1st, default value: 0x00 */ +#define REG_CBUS_INT_0 0x0592 +#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7) +#define BIT_CBUS_MSC_MR_SET_INT BIT(6) +#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5) +#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4) +#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3) +#define BIT_CBUS_HPD_CHG BIT(2) +#define BIT_CBUS_MSC_MT_DONE BIT(1) +#define BIT_CBUS_CNX_CHG BIT(0) + +/* CBUS Interrupt Mask 1st, default value: 0x00 */ +#define REG_CBUS_INT_0_MASK 0x0593 + +/* CBUS Interrupt 2nd, default value: 0x00 */ +#define REG_CBUS_INT_1 0x0594 +#define BIT_CBUS_CMD_ABORT BIT(6) +#define BIT_CBUS_MSC_ABORT_RCVD BIT(3) +#define BIT_CBUS_DDC_ABORT BIT(2) +#define BIT_CBUS_CEC_ABORT BIT(1) + +/* CBUS Interrupt Mask 2nd, default value: 0x00 */ +#define REG_CBUS_INT_1_MASK 0x0595 + +/* CBUS DDC Abort Interrupt, default value: 0x00 */ +#define REG_DDC_ABORT_INT 0x0598 + +/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */ +#define REG_DDC_ABORT_INT_MASK 0x0599 + +/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */ +#define REG_MSC_MT_ABORT_INT 0x059a + +/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */ +#define REG_MSC_MT_ABORT_INT_MASK 0x059b + +/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */ +#define REG_MSC_MR_ABORT_INT 0x059c + +/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */ +#define REG_MSC_MR_ABORT_INT_MASK 0x059d + +/* CBUS RX DISCOVERY interrupt, default value: 0x00 */ +#define REG_CBUS_RX_DISC_INT0 0x059e + +/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */ +#define REG_CBUS_RX_DISC_INT0_MASK 0x059f + +/* CBUS_Link_Layer Control #8, default value: 0x00 */ +#define REG_CBUS_LINK_CTRL_8 0x05a7 + +/* MDT State Machine Status, default value: 0x00 */ +#define REG_MDT_SM_STAT 0x05b5 +#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0 +#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f + +/* CBUS MSC command trigger, default value: 0x00 */ +#define REG_MSC_COMMAND_START 0x05b8 +#define BIT_MSC_COMMAND_START_DEBUG BIT(5) +#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4) +#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3) +#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2) +#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1) +#define BIT_MSC_COMMAND_START_PEER BIT(0) + +/* CBUS MSC Command/Offset, default value: 0x00 */ +#define REG_MSC_CMD_OR_OFFSET 0x05b9 + +/* CBUS MSC Transmit Data */ +#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba +#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb + +/* CBUS MSC Requester Received Data */ +#define REG_MSC_MT_RCVD_DATA0 0x05bc +#define REG_MSC_MT_RCVD_DATA1 0x05bd + +/* CBUS MSC Responder MSC_MSG Received Data */ +#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf +#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0 + +/* CBUS MSC Heartbeat Control, default value: 0x27 */ +#define REG_MSC_HEARTBEAT_CTRL 0x05c4 +#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7) +#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70 +#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f + +/* CBUS MSC Compatibility Control, default value: 0x02 */ +#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7 +#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3) +#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2) + +/* CBUS3 Converter Control, default value: 0x24 */ +#define REG_CBUS3_CNVT 0x05dc +#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0 +#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c +#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1) +#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0) + +/* Discovery Control1, default value: 0x24 */ +#define REG_DISC_CTRL1 0x05e0 +#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7) +#define BIT_DISC_CTRL1_HB_ONLY BIT(6) +#define MSK_DISC_CTRL1_DISC_ATT 0x30 +#define MSK_DISC_CTRL1_DISC_CYC 0x0c +#define BIT_DISC_CTRL1_DISC_EN BIT(0) + +#define VAL_PUP_OFF 0 +#define VAL_PUP_20K 1 +#define VAL_PUP_5K 2 + +/* Discovery Control4, default value: 0x80 */ +#define REG_DISC_CTRL4 0x05e3 +#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0 +#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30 +#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4)) + +/* Discovery Control5, default value: 0x03 */ +#define REG_DISC_CTRL5 0x05e4 +#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3) +#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03 + +/* Discovery Control8, default value: 0x81 */ +#define REG_DISC_CTRL8 0x05e7 +#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7) +#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0) + +/* Discovery Control9, default value: 0x54 */ +#define REG_DISC_CTRL9 0x05e8 +#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7) +#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6) +#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4) +#define BIT_DISC_CTRL9_NOMHL_EST BIT(3) +#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2) +#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1) +#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0) + +/* Discovery Status1, default value: 0x00 */ +#define REG_DISC_STAT1 0x05eb +#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5) +#define MSK_DISC_STAT1_DISC_SM 0x0f + +/* Discovery Status2, default value: 0x00 */ +#define REG_DISC_STAT2 0x05ec +#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6) +#define BIT_DISC_STAT2_CBUS_SATUS BIT(5) +#define BIT_DISC_STAT2_RSEN BIT(4) + +#define MSK_DISC_STAT2_MHL_VRSN 0x0c +#define VAL_DISC_STAT2_DEFAULT 0x00 +#define VAL_DISC_STAT2_MHL1_2 0x04 +#define VAL_DISC_STAT2_MHL3 0x08 +#define VAL_DISC_STAT2_RESERVED 0x0c + +#define MSK_DISC_STAT2_RGND 0x03 +#define VAL_RGND_OPEN 0x00 +#define VAL_RGND_2K 0x01 +#define VAL_RGND_1K 0x02 +#define VAL_RGND_SHORT 0x03 + +/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */ +#define REG_CBUS_DISC_INTR0 0x05ed +#define BIT_RGND_READY_INT BIT(6) +#define BIT_CBUS_MHL12_DISCON_INT BIT(5) +#define BIT_CBUS_MHL3_DISCON_INT BIT(4) +#define BIT_NOT_MHL_EST_INT BIT(3) +#define BIT_MHL_EST_INT BIT(2) +#define BIT_MHL3_EST_INT BIT(1) +#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \ + | BIT_CBUS_MHL3_DISCON_INT \ + | BIT_NOT_MHL_EST_INT) + +/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */ +#define REG_CBUS_DISC_INTR0_MASK 0x05ee + +#endif /* __SIL_SII8620_H__ */ -- cgit v1.2.3 From 9ff7a1b0ba823b6cdefc10fc641b8289274d7d81 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 26 Oct 2016 19:29:19 +0300 Subject: drm: Print some debug/error info during DP dual mode detect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's at least one LSPCON device that occasionally returns an unexpected adaptor ID which leads to a failed detect. Print some debug info to help debugging this and future cases. Also print an error for an unexpected adaptor ID, so users can report it. v2: - s/adapter/adaptor/ and add code comment about incorrect type 1 adaptor IDs. (Ville) Cc: dri-devel@lists.freedesktop.org Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Signed-off-by: Sean Paul Link: http://patchwork.freedesktop.org/patch/msgid/1477499359-12001-1-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/drm_dp_dual_mode_helper.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 488355bdafb9..e02563966271 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) sizeof(dp_dual_mode_hdmi_id)) == 0; } +static bool is_type1_adaptor(uint8_t adaptor_id) +{ + return adaptor_id == 0 || adaptor_id == 0xff; +} + static bool is_type2_adaptor(uint8_t adaptor_id) { return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | @@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, hdmi_id, sizeof(hdmi_id)); + DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n", + ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret); if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; @@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); + DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", + adaptor_id, ret); if (ret == 0) { if (is_lspcon_adaptor(hdmi_id, adaptor_id)) return DRM_DP_DUAL_MODE_LSPCON; @@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) else return DRM_DP_DUAL_MODE_TYPE2_DVI; } + /* + * If neither a proper type 1 ID nor a broken type 1 adaptor + * as described above, assume type 1, but let the user know + * that we may have misdetected the type. + */ + if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", + adaptor_id); + } if (is_hdmi_adaptor(hdmi_id)) -- cgit v1.2.3 From 3a81e96094b769d9cf5dc58761b9b85fa889e3aa Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Thu, 27 Oct 2016 00:58:36 +0800 Subject: drm/bridge: fix semicolon.cocci warnings drivers/gpu/drm/bridge/sil-sii8620.c:988:2-3: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci CC: Andrzej Hajda Signed-off-by: Fengguang Wu Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/20161026165836.GA98907@lkp-sb04.lkp.intel.com --- drivers/gpu/drm/bridge/sil-sii8620.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 1345e7cc7bf0..4f0526e9edbe 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -985,7 +985,7 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) default: dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); break; - }; + } sii8620_set_auto_zone(ctx); -- cgit v1.2.3 From 56df51d003203f1c3a8eab05605973515aa15feb Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Thu, 27 Oct 2016 00:58:36 +0800 Subject: drm/bridge: fix platform_no_drv_owner.cocci warnings drivers/gpu/drm/bridge/sil-sii8620.c:1556:3-8: No need to set .owner here. The core will do it. Remove .owner field if calls are used which set it automatically Generated by: scripts/coccinelle/api/platform_no_drv_owner.cocci CC: Andrzej Hajda Signed-off-by: Fengguang Wu Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/20161026165836.GA98766@lkp-sb04.lkp.intel.com --- drivers/gpu/drm/bridge/sil-sii8620.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 4f0526e9edbe..b2c267df7ee7 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -1553,7 +1553,6 @@ MODULE_DEVICE_TABLE(i2c, sii8620_id); static struct i2c_driver sii8620_driver = { .driver = { .name = "sii8620", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(sii8620_dt_match), }, .probe = sii8620_probe, -- cgit v1.2.3