Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "drm/xen-front: Remove CMA support" #46

Merged
merged 1 commit into from
Oct 22, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions Documentation/gpu/xen-front.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,18 @@ Buffers allocated by the frontend driver
.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
:doc: Buffers allocated by the frontend driver

With GEM CMA helpers
~~~~~~~~~~~~~~~~~~~~

.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
:doc: With GEM CMA helpers

Without GEM CMA helpers
~~~~~~~~~~~~~~~~~~~~~~~

.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
:doc: Without GEM CMA helpers

Buffers allocated by the backend
--------------------------------

Expand Down
13 changes: 13 additions & 0 deletions drivers/gpu/drm/xen/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,16 @@ config DRM_XEN_FRONTEND
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.

config DRM_XEN_FRONTEND_CMA
bool "Use DRM CMA to allocate dumb buffers"
depends on DRM_XEN_FRONTEND
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
Use DRM CMA helpers to allocate display buffers.
This is useful for the use-cases when guest driver needs to
share or export buffers to other drivers which only expect
contiguous buffers.
Note: in this mode driver cannot use buffers allocated
by the backend.
9 changes: 7 additions & 2 deletions drivers/gpu/drm/xen/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o
xen_drm_front_cfg.o

ifeq ($(CONFIG_DRM_XEN_FRONTEND_CMA),y)
drm_xen_front-objs += xen_drm_front_gem_cma.o
else
drm_xen_front-objs += xen_drm_front_gem.o
endif

obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
62 changes: 52 additions & 10 deletions drivers/gpu/drm/xen/xen_drm_front.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>

#include <linux/of_device.h>

Expand Down Expand Up @@ -166,9 +167,10 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
return ret;
}

int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
static int be_dbuf_create_int(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct page **pages)
u32 bpp, u64 size, struct page **pages,
struct sg_table *sgt)
{
struct xen_drm_front_evtchnl *evtchnl;
struct xen_drm_front_shbuf *shbuf;
Expand All @@ -185,6 +187,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
buf_cfg.xb_dev = front_info->xb_dev;
buf_cfg.pages = pages;
buf_cfg.size = size;
buf_cfg.sgt = sgt;
buf_cfg.be_alloc = front_info->cfg.be_alloc;

shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
Expand Down Expand Up @@ -234,6 +237,22 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
return ret;
}

int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct sg_table *sgt)
{
return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
bpp, size, NULL, sgt);
}

int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct page **pages)
{
return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
bpp, size, pages, NULL);
}

static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
u64 dbuf_cookie)
{
Expand Down Expand Up @@ -415,11 +434,24 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
goto fail;
}

ret = xen_drm_front_dbuf_create(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(obj),
args->width, args->height, args->bpp,
args->size,
xen_drm_front_gem_get_pages(obj));
/*
* In case of CONFIG_DRM_XEN_FRONTEND_CMA gem_obj is constructed
* via DRM CMA helpers and doesn't have ->pages allocated
* (xendrm_gem_get_pages will return NULL), but instead can provide
* sg table
*/
if (xen_drm_front_gem_get_pages(obj))
ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(obj),
args->width, args->height, args->bpp,
args->size,
xen_drm_front_gem_get_pages(obj));
else
ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(obj),
args->width, args->height, args->bpp,
args->size,
xen_drm_front_gem_get_sg_table(obj));
if (ret)
goto fail_backend;

Expand Down Expand Up @@ -491,7 +523,11 @@ static const struct file_operations xen_drm_dev_fops = {
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
.mmap = drm_gem_cma_mmap,
#else
.mmap = xen_drm_front_gem_mmap,
#endif
};

static const struct vm_operations_struct xen_drm_drv_vm_ops = {
Expand All @@ -511,9 +547,6 @@ static struct drm_driver xen_drm_driver = {
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
.gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
.gem_prime_vmap = xen_drm_front_gem_prime_vmap,
.gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
.gem_prime_mmap = xen_drm_front_gem_prime_mmap,
.dumb_create = xen_drm_drv_dumb_create,
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
Expand All @@ -522,6 +555,15 @@ static struct drm_driver xen_drm_driver = {
.major = 1,
.minor = 0,

#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
#else
.gem_prime_vmap = xen_drm_front_gem_prime_vmap,
.gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
.gem_prime_mmap = xen_drm_front_gem_prime_mmap,
#endif
};

static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
Expand Down
42 changes: 36 additions & 6 deletions drivers/gpu/drm/xen/xen_drm_front.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,40 @@
*
* Depending on the requirements for the para-virtualized environment, namely
* requirements dictated by the accompanying DRM/(v)GPU drivers running in both
* host and guest environments, display buffers can be allocated by either
* frontend driver or backend.
* host and guest environments, number of operating modes of para-virtualized
* display driver are supported:
*
* - display buffers can be allocated by either frontend driver or backend
* - display buffers can be allocated to be contiguous in memory or not
*
* Note! Frontend driver itself has no dependency on contiguous memory for
* its operation.
*/

/**
* DOC: Buffers allocated by the frontend driver
*
* In this mode of operation driver allocates buffers from system memory.
* The below modes of operation are configured at compile-time via
* frontend driver's kernel configuration:
*/

/**
* DOC: With GEM CMA helpers
*
* This use-case is useful when used with accompanying DRM/vGPU driver in
* guest domain which was designed to only work with contiguous buffers,
* e.g. DRM driver based on GEM CMA helpers: such drivers can only import
* contiguous PRIME buffers, thus requiring frontend driver to provide
* such. In order to implement this mode of operation para-virtualized
* frontend driver can be configured to use GEM CMA helpers.
*/

/**
* DOC: Without GEM CMA helpers
*
* If accompanying drivers can cope with non-contiguous memory then, to
* lower pressure on CMA subsystem of the kernel, driver can allocate
* buffers from system memory.
*
* Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
* may require IOMMU support on the platform, so accompanying DRM/vGPU
Expand Down Expand Up @@ -138,9 +164,13 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
u32 x, u32 y, u32 width, u32 height,
u32 bpp, u64 fb_cookie);

int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct page **pages);
int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct sg_table *sgt);

int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u32 width, u32 height,
u32 bpp, u64 size, struct page **pages);

int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
u64 dbuf_cookie, u64 fb_cookie, u32 width,
Expand Down
12 changes: 9 additions & 3 deletions drivers/gpu/drm/xen/xen_drm_front_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,9 +210,15 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
if (ret < 0)
return ERR_PTR(ret);

ret = xen_drm_front_dbuf_create(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(&xen_obj->base),
0, 0, 0, size, xen_obj->pages);
/*
* N.B. Although we have an API to create display buffer from sgt
* we use pages API, because we still need those for GEM handling,
* e.g. for mapping etc.
*/
ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(&xen_obj->base),
0, 0, 0, size,
xen_obj->pages);
if (ret < 0)
return ERR_PTR(ret);

Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/xen/xen_drm_front_gem.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);

void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);

#ifndef CONFIG_DRM_XEN_FRONTEND_CMA

int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);

void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj);
Expand All @@ -36,5 +38,6 @@ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,

int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
struct vm_area_struct *vma);
#endif

#endif /* __XEN_DRM_FRONT_GEM_H */
79 changes: 79 additions & 0 deletions drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT

/*
* Xen para-virtual DRM device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/

#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>

#include "xen_drm_front.h"
#include "xen_drm_front_gem.h"

struct drm_gem_object *
xen_drm_front_gem_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
struct drm_gem_object *gem_obj;
struct drm_gem_cma_object *cma_obj;
int ret;

gem_obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR_OR_NULL(gem_obj))
return gem_obj;

cma_obj = to_drm_gem_cma_obj(gem_obj);

ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
xen_drm_front_dbuf_to_cookie(gem_obj),
0, 0, 0, gem_obj->size,
drm_gem_cma_prime_get_sg_table(gem_obj));
if (ret < 0)
return ERR_PTR(ret);

DRM_DEBUG("Imported CMA buffer of size %zu\n", gem_obj->size);

return gem_obj;
}

struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
{
return drm_gem_cma_prime_get_sg_table(gem_obj);
}

struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
size_t size)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
struct drm_gem_cma_object *cma_obj;

if (drm_info->front_info->cfg.be_alloc) {
/* This use-case is not yet supported and probably won't be */
DRM_ERROR("Backend allocated buffers and CMA helpers are not supported at the same time\n");
return ERR_PTR(-EINVAL);
}

cma_obj = drm_gem_cma_create(dev, size);
if (IS_ERR_OR_NULL(cma_obj))
return ERR_CAST(cma_obj);

return &cma_obj->base;
}

void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
{
drm_gem_cma_free_object(gem_obj);
}

struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
{
return NULL;
}
22 changes: 22 additions & 0 deletions drivers/gpu/drm/xen/xen_drm_front_shbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,10 @@ void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
}
kfree(buf->grefs);
kfree(buf->directory);
if (buf->sgt) {
sg_free_table(buf->sgt);
kvfree(buf->pages);
}
kfree(buf);
}

Expand Down Expand Up @@ -346,6 +350,17 @@ static int grant_references(struct xen_drm_front_shbuf *buf)

static int alloc_storage(struct xen_drm_front_shbuf *buf)
{
if (buf->sgt) {
buf->pages = kvmalloc_array(buf->num_pages,
sizeof(struct page *), GFP_KERNEL);
if (!buf->pages)
return -ENOMEM;

if (drm_prime_sg_to_page_addr_arrays(buf->sgt, buf->pages,
NULL, buf->num_pages) < 0)
return -EINVAL;
}

buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
if (!buf->grefs)
return -ENOMEM;
Expand Down Expand Up @@ -381,6 +396,12 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
struct xen_drm_front_shbuf *buf;
int ret;

/* either pages or sgt, not both */
if (unlikely(cfg->pages && cfg->sgt)) {
DRM_ERROR("Cannot handle buffer allocation with both pages and sg table provided\n");
return NULL;
}

buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
Expand All @@ -392,6 +413,7 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)

buf->xb_dev = cfg->xb_dev;
buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
buf->sgt = cfg->sgt;
buf->pages = cfg->pages;

buf->ops->calc_num_grefs(buf);
Expand Down
Loading