Skip to content

Commit

Permalink
drm/virtio: Import prime buffers from other devices as guest blobs
Browse files Browse the repository at this point in the history
By importing scanout buffers from other devices, we should be able
to use the virtio-gpu driver in KMS only mode. Note that we attach
dynamically and register a move_notify() callback so that we can
let the VMM know of any location changes associated with the backing
store of the imported object by sending detach_backing cmd.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gurchetan Singh <gurchetansingh@chromium.org>
Cc: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
[dmitry.osipenko@collabora.com: added kref check to move_notify]
Link: https://patchwork.freedesktop.org/patch/msgid/20241126031643.3490496-5-vivek.kasireddy@intel.com
  • Loading branch information
vivekkreddy authored and digetx committed Nov 26, 2024
1 parent 2885e57 commit ca77f27
Showing 1 changed file with 62 additions and 3 deletions.
65 changes: 62 additions & 3 deletions drivers/gpu/drm/virtio/virtgpu_prime.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,13 +189,18 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct dma_buf_attachment *attach = obj->import_attach;
struct dma_resv *resv = attach->dmabuf->resv;

if (attach) {
dma_resv_lock(resv, NULL);

virtio_gpu_detach_object_fenced(bo);

if (bo->sgt)
dma_buf_unmap_attachment_unlocked(attach, bo->sgt,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(attach, bo->sgt,
DMA_BIDIRECTIONAL);

dma_resv_unlock(resv);

dma_buf_detach(attach->dmabuf, attach);
dma_buf_put(attach->dmabuf);
Expand Down Expand Up @@ -259,10 +264,39 @@ static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
return ret;
}

static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
.free = virtgpu_dma_buf_free_obj,
};

static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->importer_priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);

if (bo->created && kref_read(&obj->refcount)) {
virtio_gpu_detach_object_fenced(bo);

if (bo->sgt)
dma_buf_unmap_attachment(attach, bo->sgt,
DMA_BIDIRECTIONAL);

bo->sgt = NULL;
}
}

static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
.allow_peer2peer = true,
.move_notify = virtgpu_dma_buf_move_notify
};

struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct dma_buf_attachment *attach;
struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
int ret;

if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
Expand All @@ -276,7 +310,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}

return drm_gem_prime_import(dev, buf);
if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
return drm_gem_prime_import(dev, buf);

bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);

obj = &bo->base.base;
obj->funcs = &virtgpu_gem_dma_buf_funcs;
drm_gem_private_object_init(dev, obj, buf->size);

attach = dma_buf_dynamic_attach(buf, dev->dev,
&virtgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
kfree(bo);
return ERR_CAST(attach);
}

obj->import_attach = attach;
get_dma_buf(buf);

ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
if (ret < 0)
return ERR_PTR(ret);

return obj;
}

struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
Expand Down

0 comments on commit ca77f27

Please sign in to comment.