From cc0ba0d8624f210995924bb57a8b181ce8976606 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 29 May 2019 08:15:19 +0200 Subject: drm/vmwgfx: Use the backdoor port if the HB port is not available The HB port may not be available for various reasons. Either it has been disabled by a config option or by the hypervisor for other reasons. In that case, make sure we have a backup plan and use the backdoor port instead with a performance penalty. Cc: stable@vger.kernel.org Fixes: 89da76fde68d ("drm/vmwgfx: Add VMWare host messaging capability") Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | 146 +++++++++++++++++++++++++++++------- 1 file changed, 117 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 8b9270f31409..e4e09d47c5c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel) return 0; } +/** + * vmw_port_hb_out - Send the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @msg: NULL-terminated message. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_out(struct rpc_channel *channel, + const char *msg, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + unsigned long msg_len = strlen(msg); + + if (hb) { + unsigned long bp = channel->cookie_high; + + si = (uintptr_t) msg; + di = channel->cookie_low; + + VMW_PORT_HB_OUT( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + msg_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Send the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { + unsigned int bytes = min_t(size_t, msg_len, 4); + unsigned long word = 0; + + memcpy(&word, msg, bytes); + msg_len -= bytes; + msg += bytes; + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), + word, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + } + + return ecx; +} + +/** + * vmw_port_hb_in - Receive the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @reply: Pointer to buffer holding reply. + * @reply_len: Length of the reply. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, + unsigned long reply_len, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + + if (hb) { + unsigned long bp = channel->cookie_low; + + si = channel->cookie_high; + di = (uintptr_t) reply; + + VMW_PORT_HB_IN( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + reply_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Retrieve the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (reply_len) { + unsigned int bytes = min_t(unsigned long, reply_len, 4); + + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) + break; + + memcpy(reply, &ebx, bytes); + reply_len -= bytes; + reply += bytes; + } + + return ecx; +} /** @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel) */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; size_t msg_len = strlen(msg); int retries = 0; - while (retries < RETRIES) { retries++; @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - /* Expected success + high-bandwidth. Give up. */ + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { + /* Expected success. Give up. */ return -EINVAL; } /* Send msg */ - si = (uintptr_t) msg; - di = channel->cookie_low; - bp = channel->cookie_high; - - VMW_PORT_HB_OUT( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + ebx = vmw_port_hb_out(channel, msg, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; char *reply; size_t reply_len; int retries = 0; @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, /* Receive buffer */ - si = channel->cookie_high; - di = (uintptr_t) reply; - bp = channel->cookie_low; - - VMW_PORT_HB_IN( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); - + ebx = vmw_port_hb_in(channel, reply, reply_len, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); -- cgit v1.2.3 From bde15555ba61c7f664f40fd3c6fdbdb63f784c9b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 4 Jun 2019 13:54:26 +0200 Subject: drm/vmwgfx: Honor the sg list segment size limitation When building sg tables, honor the device sg list segment size limitation. Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index a6ea75b58a83..d8ea3dd10af0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -441,11 +441,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, - vsgt->num_pages, 0, - (unsigned long) - vsgt->num_pages << PAGE_SHIFT, - GFP_KERNEL); + ret = __sg_alloc_table_from_pages + (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + GFP_KERNEL); if (unlikely(ret != 0)) goto out_sg_alloc_fail; -- cgit v1.2.3 From 39916897cd815a0ee07ba1f6820cf88a63e459fc Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 3 Jun 2019 16:44:15 -0400 Subject: drm/vmwgfx: fix a warning due to missing dma_parms Booting up with DMA_API_DEBUG_SG=y generates a warning due to the driver forgot to set dma_parms appropriately. Set it just after vmw_dma_masks() in vmw_driver_load(). DMA-API: vmwgfx 0000:00:0f.0: mapping sg segment longer than device claims to support [len=2097152] [max=65536] WARNING: CPU: 2 PID: 261 at kernel/dma/debug.c:1232 debug_dma_map_sg+0x360/0x480 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/13/2018 RIP: 0010:debug_dma_map_sg+0x360/0x480 Call Trace: vmw_ttm_map_dma+0x3b1/0x5b0 [vmwgfx] vmw_bo_map_dma+0x25/0x30 [vmwgfx] vmw_otables_setup+0x2a8/0x750 [vmwgfx] vmw_request_device_late+0x78/0xc0 [vmwgfx] vmw_request_device+0xee/0x4e0 [vmwgfx] vmw_driver_load.cold+0x757/0xd84 [vmwgfx] drm_dev_register+0x1ff/0x340 [drm] drm_get_pci_dev+0x110/0x290 [drm] vmw_probe+0x15/0x20 [vmwgfx] local_pci_probe+0x7a/0xc0 pci_device_probe+0x1b9/0x290 really_probe+0x1b5/0x630 driver_probe_device+0xa3/0x1a0 device_driver_attach+0x94/0xa0 __driver_attach+0xdd/0x1c0 bus_for_each_dev+0xfe/0x150 driver_attach+0x2d/0x40 bus_add_driver+0x290/0x350 driver_register+0xdc/0x1d0 __pci_register_driver+0xda/0xf0 vmwgfx_init+0x34/0x1000 [vmwgfx] do_one_initcall+0xe5/0x40a do_init_module+0x10f/0x3a0 load_module+0x16a5/0x1a40 __se_sys_finit_module+0x183/0x1c0 __x64_sys_finit_module+0x43/0x50 do_syscall_64+0xc8/0x606 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: fb1d9738ca05 ("drm/vmwgfx: Add DRM driver for VMware Virtual GPU") Co-developed-by: Thomas Hellstrom Signed-off-by: Qian Cai Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4ff11a0077e1..9506190a0300 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -747,6 +747,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; + dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, + SCATTERLIST_MAX_SEGMENT)); + if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); -- cgit v1.2.3