summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2020-09-23 17:32:07 -0700
committerGerd Hoffmann <kraxel@redhat.com>2020-09-29 11:23:33 +0200
commit16845c5d540929d2f4dc845f11fbbb238e69170a (patch)
tree973396e50605edb1f16a176ce4945f06e970e7fc /drivers/gpu/drm/virtio/virtgpu_vq.c
parent5c824604e5601a7150e434931243ee9e820fb473 (diff)
downloadlinux-16845c5d540929d2f4dc845f11fbbb238e69170a.tar.bz2
drm/virtio: implement blob resources: implement vram object
A virtio-gpu vram object is based on range-based allocation. No guest shmemfs backing, so we call drm_gem_private_object_init. This is for host memory without any guest backing (atleast initially). Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Acked-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> Link: http://patchwork.freedesktop.org/patch/msgid/20200924003214.662-12-gurchetansingh@chromium.org Co-developed-by: Gurchetan Singh <gurchetansingh@chromium.org> Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c66
1 files changed, 66 insertions, 0 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 55529ed97d9c..41fd253d357e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1171,3 +1171,69 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_map_info *resp =
+ (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->host_visible_lock);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+ vram->map_info = resp->map_info;
+ vram->map_state = STATE_OK;
+ } else {
+ vram->map_state = STATE_ERR;
+ }
+
+ spin_unlock(&vgdev->host_visible_lock);
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+ struct virtio_gpu_resource_map_blob *cmd_p;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_map_info *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ vbuf->objs = objs;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_resource_unmap_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}