Add a virtio-gpu 2D driver

Add a driver to connect vt to the VirtIO GPU device in 2D mode. This
provides a output on the display when a qemu virtio gpu device is
added, e.g. with -device virtio-gpu-pci.

Tested on qemu using UTM, and a Hetzner arm64 VM instance.

Reviewed by:	bryanv (earlier version)
Sponsored by:	Arm Ltd
Differential Revision:	https://reviews.freebsd.org/D40094
This commit is contained in:
Andrew Turner 2023-08-17 12:26:57 +01:00
parent 67c26eb2a5
commit 02f2706606
7 changed files with 1213 additions and 0 deletions

View file

@ -570,6 +570,7 @@ MAN= aac.4 \
virtio_balloon.4 \
virtio_blk.4 \
virtio_console.4 \
virtio_gpu.4 \
virtio_random.4 \
virtio_scsi.4 \
${_vmci.4} \

View file

@ -83,6 +83,10 @@ A pseudo-device to allow the VM to release memory back to the hypervisor is
provided by the
.Xr virtio_balloon 4
device driver.
.It Sy GPU
Graphics support is provided by the
.Xr virtio_gpu 4
device driver.
.It Sy SCSI
An emulated SCSI HBA is provided by the
.Xr virtio_scsi 4
@ -92,6 +96,7 @@ device driver.
.Xr virtio_balloon 4 ,
.Xr virtio_blk 4 ,
.Xr virtio_console 4 ,
.Xr virtio_gpu 4 ,
.Xr virtio_random 4 ,
.Xr virtio_scsi 4 ,
.Xr vtnet 4

View file

@ -0,0 +1,54 @@
.\"-
.\" SPDX-License-Identifier: BSD-2-Clause
.\"
.\" Copyright (c) 2014 Bryan Venteicher
.\" All rights reserved.
.\" Copyright (c) 2023 Arm Ltd
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.Dd August 14, 2023
.Dt VIRTIO_GPU 4
.Os
.Sh NAME
.Nm virtio_gpu
.Nd VirtIO GPU driver
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device virtio_gpu"
.Ed
.Sh DESCRIPTION
The
.Nm
device driver provides support for VirtIO gpu devices to create a
.Xr vt 4
console.
.Sh SEE ALSO
.Xr virtio 4
.Xr vt 4
.Sh HISTORY
The
.Nm
driver first appeared in FreeBSD 14.0.

View file

@ -19,6 +19,7 @@ device virtio # Generic VirtIO bus (required)
device virtio_pci # VirtIO PCI device
device virtio_mmio # VirtIO Memory Mapped IO device
device virtio_blk # VirtIO Block device
device virtio_gpu # VirtIO GPU device
device virtio_scsi # VirtIO SCSI device
device vtnet # VirtIO Ethernet device

View file

@ -3399,6 +3399,7 @@ dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/block/virtio_blk.c optional virtio_blk
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/gpu/virtio_gpu.c optional virtio_gpu
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
dev/virtio/random/virtio_random.c optional virtio_random
dev/virtio/console/virtio_console.c optional virtio_console

View file

@ -0,0 +1,697 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
* Copyright (c) 2023, Arm Ltd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO GPU device. */
#include <sys/param.h>
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/callout.h>
#include <sys/fbio.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/gpu/virtio_gpu.h>
#include <dev/vt/vt.h>
#include <dev/vt/hw/fb/vt_fb.h>
#include <dev/vt/colors/vt_termcolors.h>
#include "fb_if.h"
#define VTGPU_FEATURES 0
/* The guest can allocate resource IDs, we only need one */
#define VTGPU_RESOURCE_ID 1
struct vtgpu_softc {
/* Must be first so we can cast from info -> softc */
struct fb_info vtgpu_fb_info;
struct virtio_gpu_config vtgpu_gpucfg;
device_t vtgpu_dev;
uint64_t vtgpu_features;
struct virtqueue *vtgpu_ctrl_vq;
uint64_t vtgpu_next_fence;
bool vtgpu_have_fb_info;
};
static int vtgpu_modevent(module_t, int, void *);
static int vtgpu_probe(device_t);
static int vtgpu_attach(device_t);
static int vtgpu_detach(device_t);
static int vtgpu_negotiate_features(struct vtgpu_softc *);
static int vtgpu_setup_features(struct vtgpu_softc *);
static void vtgpu_read_config(struct vtgpu_softc *,
struct virtio_gpu_config *);
static int vtgpu_alloc_virtqueue(struct vtgpu_softc *);
static int vtgpu_get_display_info(struct vtgpu_softc *);
static int vtgpu_create_2d(struct vtgpu_softc *);
static int vtgpu_attach_backing(struct vtgpu_softc *);
static int vtgpu_set_scanout(struct vtgpu_softc *, uint32_t, uint32_t,
uint32_t, uint32_t);
static int vtgpu_transfer_to_host_2d(struct vtgpu_softc *, uint32_t,
uint32_t, uint32_t, uint32_t);
static int vtgpu_resource_flush(struct vtgpu_softc *, uint32_t, uint32_t,
uint32_t, uint32_t);
static vd_blank_t vtgpu_fb_blank;
static vd_bitblt_text_t vtgpu_fb_bitblt_text;
static vd_bitblt_bmp_t vtgpu_fb_bitblt_bitmap;
static vd_drawrect_t vtgpu_fb_drawrect;
static vd_setpixel_t vtgpu_fb_setpixel;
static struct vt_driver vtgpu_fb_driver = {
.vd_name = "virtio_gpu",
.vd_init = vt_fb_init,
.vd_fini = vt_fb_fini,
.vd_blank = vtgpu_fb_blank,
.vd_bitblt_text = vtgpu_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap,
.vd_drawrect = vtgpu_fb_drawrect,
.vd_setpixel = vtgpu_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
.vd_priority = VD_PRIORITY_GENERIC+10,
.vd_fb_ioctl = vt_fb_ioctl,
.vd_fb_mmap = NULL, /* No mmap as we need to signal the host */
.vd_suspend = vt_fb_suspend,
.vd_resume = vt_fb_resume,
};
VT_DRIVER_DECLARE(vt_vtgpu, vtgpu_fb_driver);
static void
vtgpu_fb_blank(struct vt_device *vd, term_color_t color)
{
struct vtgpu_softc *sc;
struct fb_info *info;
info = vd->vd_softc;
sc = (struct vtgpu_softc *)info;
vt_fb_blank(vd, color);
vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
sc->vtgpu_fb_info.fb_height);
vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
sc->vtgpu_fb_info.fb_height);
}
static void
vtgpu_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw,
const term_rect_t *area)
{
struct vtgpu_softc *sc;
struct fb_info *info;
int x, y, width, height;
info = vd->vd_softc;
sc = (struct vtgpu_softc *)info;
vt_fb_bitblt_text(vd, vw, area);
x = area->tr_begin.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col;
y = area->tr_begin.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row;
width = area->tr_end.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col - x;
height = area->tr_end.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row - y;
vtgpu_transfer_to_host_2d(sc, x, y, width, height);
vtgpu_resource_flush(sc, x, y, width, height);
}
static void
vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
const uint8_t *pattern, const uint8_t *mask,
unsigned int width, unsigned int height,
unsigned int x, unsigned int y, term_color_t fg, term_color_t bg)
{
struct vtgpu_softc *sc;
struct fb_info *info;
info = vd->vd_softc;
sc = (struct vtgpu_softc *)info;
vt_fb_bitblt_bitmap(vd, vw, pattern, mask, width, height, x, y, fg, bg);
vtgpu_transfer_to_host_2d(sc, x, y, width, height);
vtgpu_resource_flush(sc, x, y, width, height);
}
static void
vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2,
int fill, term_color_t color)
{
struct vtgpu_softc *sc;
struct fb_info *info;
int width, height;
info = vd->vd_softc;
sc = (struct vtgpu_softc *)info;
vt_fb_drawrect(vd, x1, y1, x2, y2, fill, color);
width = x2 - x1 + 1;
height = y2 - y1 + 1;
vtgpu_transfer_to_host_2d(sc, x1, y1, width, height);
vtgpu_resource_flush(sc, x1, y1, width, height);
}
static void
vtgpu_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color)
{
struct vtgpu_softc *sc;
struct fb_info *info;
info = vd->vd_softc;
sc = (struct vtgpu_softc *)info;
vt_fb_setpixel(vd, x, y, color);
vtgpu_transfer_to_host_2d(sc, x, y, 1, 1);
vtgpu_resource_flush(sc, x, y, 1, 1);
}
static struct virtio_feature_desc vtgpu_feature_desc[] = {
{ VIRTIO_GPU_F_VIRGL, "VirGL" },
{ VIRTIO_GPU_F_EDID, "EDID" },
{ VIRTIO_GPU_F_RESOURCE_UUID, "ResUUID" },
{ VIRTIO_GPU_F_RESOURCE_BLOB, "ResBlob" },
{ VIRTIO_GPU_F_CONTEXT_INIT, "ContextInit" },
{ 0, NULL }
};
static device_method_t vtgpu_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtgpu_probe),
DEVMETHOD(device_attach, vtgpu_attach),
DEVMETHOD(device_detach, vtgpu_detach),
DEVMETHOD_END
};
static driver_t vtgpu_driver = {
"vtgpu",
vtgpu_methods,
sizeof(struct vtgpu_softc)
};
VIRTIO_DRIVER_MODULE(virtio_gpu, vtgpu_driver, vtgpu_modevent, NULL);
MODULE_VERSION(virtio_gpu, 1);
MODULE_DEPEND(virtio_gpu, virtio, 1, 1, 1);
VIRTIO_SIMPLE_PNPINFO(virtio_gpu, VIRTIO_ID_GPU,
"VirtIO GPU");
static int
vtgpu_modevent(module_t mod, int type, void *unused)
{
int error;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtgpu_probe(device_t dev)
{
return (VIRTIO_SIMPLE_PROBE(dev, virtio_gpu));
}
static int
vtgpu_attach(device_t dev)
{
struct vtgpu_softc *sc;
int error;
sc = device_get_softc(dev);
sc->vtgpu_have_fb_info = false;
sc->vtgpu_dev = dev;
sc->vtgpu_next_fence = 1;
virtio_set_feature_desc(dev, vtgpu_feature_desc);
error = vtgpu_setup_features(sc);
if (error != 0) {
device_printf(dev, "cannot setup features\n");
goto fail;
}
vtgpu_read_config(sc, &sc->vtgpu_gpucfg);
error = vtgpu_alloc_virtqueue(sc);
if (error != 0) {
device_printf(dev, "cannot allocate virtqueue\n");
goto fail;
}
virtio_setup_intr(dev, INTR_TYPE_TTY);
/* Read the device info to get the display size */
error = vtgpu_get_display_info(sc);
if (error != 0) {
goto fail;
}
/*
* TODO: This doesn't need to be contigmalloc as we
* can use scatter-gather lists.
*/
sc->vtgpu_fb_info.fb_vbase = (vm_offset_t)contigmalloc(
sc->vtgpu_fb_info.fb_size, M_DEVBUF, M_WAITOK|M_ZERO, 0, ~0, 4, 0);
sc->vtgpu_fb_info.fb_pbase = pmap_kextract(sc->vtgpu_fb_info.fb_vbase);
/* Create the 2d resource */
error = vtgpu_create_2d(sc);
if (error != 0) {
goto fail;
}
/* Attach the backing memory */
error = vtgpu_attach_backing(sc);
if (error != 0) {
goto fail;
}
/* Set the scanout to link the framebuffer to the display scanout */
error = vtgpu_set_scanout(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
sc->vtgpu_fb_info.fb_height);
if (error != 0) {
goto fail;
}
vt_allocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
sc->vtgpu_have_fb_info = true;
error = vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
sc->vtgpu_fb_info.fb_height);
if (error != 0)
goto fail;
error = vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
sc->vtgpu_fb_info.fb_height);
fail:
if (error != 0)
vtgpu_detach(dev);
return (error);
}
static int
vtgpu_detach(device_t dev)
{
struct vtgpu_softc *sc;
sc = device_get_softc(dev);
if (sc->vtgpu_have_fb_info)
vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
if (sc->vtgpu_fb_info.fb_vbase != 0) {
MPASS(sc->vtgpu_fb_info.fb_size != 0);
contigfree((void *)sc->vtgpu_fb_info.fb_vbase,
sc->vtgpu_fb_info.fb_size, M_DEVBUF);
}
/* TODO: Tell the host we are detaching */
return (0);
}
static int
vtgpu_negotiate_features(struct vtgpu_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtgpu_dev;
features = VTGPU_FEATURES;
sc->vtgpu_features = virtio_negotiate_features(dev, features);
return (virtio_finalize_features(dev));
}
static int
vtgpu_setup_features(struct vtgpu_softc *sc)
{
int error;
error = vtgpu_negotiate_features(sc);
if (error != 0)
return (error);
return (0);
}
static void
vtgpu_read_config(struct vtgpu_softc *sc,
struct virtio_gpu_config *gpucfg)
{
device_t dev;
dev = sc->vtgpu_dev;
bzero(gpucfg, sizeof(struct virtio_gpu_config));
#define VTGPU_GET_CONFIG(_dev, _field, _cfg) \
virtio_read_device_config(_dev, \
offsetof(struct virtio_gpu_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)) \
VTGPU_GET_CONFIG(dev, events_read, gpucfg);
VTGPU_GET_CONFIG(dev, events_clear, gpucfg);
VTGPU_GET_CONFIG(dev, num_scanouts, gpucfg);
VTGPU_GET_CONFIG(dev, num_capsets, gpucfg);
#undef VTGPU_GET_CONFIG
}
static int
vtgpu_alloc_virtqueue(struct vtgpu_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info[2];
int nvqs;
dev = sc->vtgpu_dev;
nvqs = 1;
VQ_ALLOC_INFO_INIT(&vq_info[0], 0, NULL, sc, &sc->vtgpu_ctrl_vq,
"%s control", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
}
static int
vtgpu_req_resp(struct vtgpu_softc *sc, void *req, size_t reqlen,
void *resp, size_t resplen)
{
struct sglist sg;
struct sglist_seg segs[2];
int error;
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, req, reqlen);
if (error != 0) {
device_printf(sc->vtgpu_dev,
"Unable to append the request to the sglist: %d\n", error);
return (error);
}
error = sglist_append(&sg, resp, resplen);
if (error != 0) {
device_printf(sc->vtgpu_dev,
"Unable to append the response buffer to the sglist: %d\n",
error);
return (error);
}
error = virtqueue_enqueue(sc->vtgpu_ctrl_vq, resp, &sg, 1, 1);
if (error != 0) {
device_printf(sc->vtgpu_dev, "Enqueue failed: %d\n", error);
return (error);
}
virtqueue_notify(sc->vtgpu_ctrl_vq);
virtqueue_poll(sc->vtgpu_ctrl_vq, NULL);
return (0);
}
static int
vtgpu_get_display_info(struct vtgpu_softc *sc)
{
struct {
struct virtio_gpu_ctrl_hdr req;
char pad;
struct virtio_gpu_resp_display_info resp;
} s = { 0 };
int error;
s.req.type = htole32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
s.req.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.fence_id = htole64(atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
for (int i = 0; i < sc->vtgpu_gpucfg.num_scanouts; i++) {
if (s.resp.pmodes[i].enabled != 0)
MPASS(i == 0);
sc->vtgpu_fb_info.fb_name =
device_get_nameunit(sc->vtgpu_dev);
sc->vtgpu_fb_info.fb_width =
le32toh(s.resp.pmodes[i].r.width);
sc->vtgpu_fb_info.fb_height =
le32toh(s.resp.pmodes[i].r.height);
/* 32 bits per pixel */
sc->vtgpu_fb_info.fb_bpp = 32;
sc->vtgpu_fb_info.fb_depth = 32;
sc->vtgpu_fb_info.fb_size = sc->vtgpu_fb_info.fb_width *
sc->vtgpu_fb_info.fb_height * 4;
sc->vtgpu_fb_info.fb_stride =
sc->vtgpu_fb_info.fb_width * 4;
return (0);
}
return (ENXIO);
}
static int
vtgpu_create_2d(struct vtgpu_softc *sc)
{
struct {
struct virtio_gpu_resource_create_2d req;
char pad;
struct virtio_gpu_ctrl_hdr resp;
} s = { 0 };
int error;
s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.hdr.fence_id = htole64(
atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
s.req.format = htole32(VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
s.req.width = htole32(sc->vtgpu_fb_info.fb_width);
s.req.height = htole32(sc->vtgpu_fb_info.fb_height);
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
return (0);
}
static int
vtgpu_attach_backing(struct vtgpu_softc *sc)
{
struct {
struct {
struct virtio_gpu_resource_attach_backing backing;
struct virtio_gpu_mem_entry mem[1];
} req;
char pad;
struct virtio_gpu_ctrl_hdr resp;
} s = { 0 };
int error;
s.req.backing.hdr.type =
htole32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
s.req.backing.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.backing.hdr.fence_id = htole64(
atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
s.req.backing.resource_id = htole32(VTGPU_RESOURCE_ID);
s.req.backing.nr_entries = htole32(1);
s.req.mem[0].addr = htole32(sc->vtgpu_fb_info.fb_pbase);
s.req.mem[0].length = htole32(sc->vtgpu_fb_info.fb_size);
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
return (0);
}
static int
vtgpu_set_scanout(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
{
struct {
struct virtio_gpu_set_scanout req;
char pad;
struct virtio_gpu_ctrl_hdr resp;
} s = { 0 };
int error;
s.req.hdr.type = htole32(VIRTIO_GPU_CMD_SET_SCANOUT);
s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.hdr.fence_id = htole64(
atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
s.req.r.x = htole32(x);
s.req.r.y = htole32(y);
s.req.r.width = htole32(width);
s.req.r.height = htole32(height);
s.req.scanout_id = 0;
s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
return (0);
}
static int
vtgpu_transfer_to_host_2d(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
{
struct {
struct virtio_gpu_transfer_to_host_2d req;
char pad;
struct virtio_gpu_ctrl_hdr resp;
} s = { 0 };
int error;
s.req.hdr.type = htole32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.hdr.fence_id = htole64(
atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
s.req.r.x = htole32(x);
s.req.r.y = htole32(y);
s.req.r.width = htole32(width);
s.req.r.height = htole32(height);
s.req.offset = htole64((y * sc->vtgpu_fb_info.fb_width + x)
* (sc->vtgpu_fb_info.fb_bpp / 8));
s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
return (0);
}
static int
vtgpu_resource_flush(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
{
struct {
struct virtio_gpu_resource_flush req;
char pad;
struct virtio_gpu_ctrl_hdr resp;
} s = { 0 };
int error;
s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
s.req.hdr.fence_id = htole64(
atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
s.req.r.x = htole32(x);
s.req.r.y = htole32(y);
s.req.r.width = htole32(width);
s.req.r.height = htole32(height);
s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
sizeof(s.resp));
if (error != 0)
return (error);
if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n",
le32toh(s.resp.type));
return (EINVAL);
}
return (0);
}

View file

@ -0,0 +1,454 @@
/*
* Virtio GPU Device
*
* Copyright Red Hat, Inc. 2013-2014
*
* Authors:
* Dave Airlie <airlied@redhat.com>
* Gerd Hoffmann <kraxel@redhat.com>
*
* This header is BSD licensed so anyone can use the definitions
* to implement compatible drivers/servers:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef VIRTIO_GPU_HW_H
#define VIRTIO_GPU_HW_H
/*
* VIRTIO_GPU_CMD_CTX_*
* VIRTIO_GPU_CMD_*_3D
*/
#define VIRTIO_GPU_F_VIRGL 0
/*
* VIRTIO_GPU_CMD_GET_EDID
*/
#define VIRTIO_GPU_F_EDID 1
/*
* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
*/
#define VIRTIO_GPU_F_RESOURCE_UUID 2
/*
* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB 3
/*
* VIRTIO_GPU_CMD_CREATE_CONTEXT with
* context_init and multiple timelines
*/
#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
/* 2d commands */
VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
VIRTIO_GPU_CMD_RESOURCE_UNREF,
VIRTIO_GPU_CMD_SET_SCANOUT,
VIRTIO_GPU_CMD_RESOURCE_FLUSH,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
VIRTIO_GPU_CMD_CTX_DESTROY,
VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE,
VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE,
VIRTIO_GPU_CMD_RESOURCE_CREATE_3D,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
VIRTIO_GPU_CMD_MOVE_CURSOR,
/* success responses */
VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
enum virtio_gpu_shm_id {
VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
/*
* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
*/
VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
};
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
/*
* If the following flag is set, then ring_idx contains the index
* of the command ring that needs to used when creating the fence
*/
#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
uint32_t type;
uint32_t flags;
uint64_t fence_id;
uint32_t ctx_id;
uint8_t ring_idx;
uint8_t padding[3];
};
/* data passed in the cursor vq */
struct virtio_gpu_cursor_pos {
uint32_t scanout_id;
uint32_t x;
uint32_t y;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
struct virtio_gpu_update_cursor {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_cursor_pos pos; /* update & move */
uint32_t resource_id; /* update only */
uint32_t hot_x; /* update only */
uint32_t hot_y; /* update only */
uint32_t padding;
};
/* data passed in the control vq, 2d related */
struct virtio_gpu_rect {
uint32_t x;
uint32_t y;
uint32_t width;
uint32_t height;
};
/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
struct virtio_gpu_resource_unref {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
struct virtio_gpu_resource_create_2d {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t format;
uint32_t width;
uint32_t height;
};
/* VIRTIO_GPU_CMD_SET_SCANOUT */
struct virtio_gpu_set_scanout {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
uint32_t scanout_id;
uint32_t resource_id;
};
/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
struct virtio_gpu_resource_flush {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
uint32_t resource_id;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
struct virtio_gpu_transfer_to_host_2d {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
uint64_t offset;
uint32_t resource_id;
uint32_t padding;
};
struct virtio_gpu_mem_entry {
uint64_t addr;
uint32_t length;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
struct virtio_gpu_resource_attach_backing {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t nr_entries;
};
/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
struct virtio_gpu_resource_detach_backing {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
};
/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
#define VIRTIO_GPU_MAX_SCANOUTS 16
struct virtio_gpu_resp_display_info {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_display_one {
struct virtio_gpu_rect r;
uint32_t enabled;
uint32_t flags;
} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
};
/* data passed in the control vq, 3d related */
struct virtio_gpu_box {
uint32_t x, y, z;
uint32_t w, h, d;
};
/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */
struct virtio_gpu_transfer_host_3d {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_box box;
uint64_t offset;
uint32_t resource_id;
uint32_t level;
uint32_t stride;
uint32_t layer_stride;
};
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */
#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0)
struct virtio_gpu_resource_create_3d {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t target;
uint32_t format;
uint32_t bind;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t array_size;
uint32_t last_level;
uint32_t nr_samples;
uint32_t flags;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t nlen;
uint32_t context_init;
char debug_name[64];
};
/* VIRTIO_GPU_CMD_CTX_DESTROY */
struct virtio_gpu_ctx_destroy {
struct virtio_gpu_ctrl_hdr hdr;
};
/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */
struct virtio_gpu_ctx_resource {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_SUBMIT_3D */
struct virtio_gpu_cmd_submit {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t size;
uint32_t padding;
};
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t capset_index;
uint32_t padding;
};
/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */
struct virtio_gpu_resp_capset_info {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t capset_id;
uint32_t capset_max_version;
uint32_t capset_max_size;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_GET_CAPSET */
struct virtio_gpu_get_capset {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t capset_id;
uint32_t capset_version;
};
/* VIRTIO_GPU_RESP_OK_CAPSET */
struct virtio_gpu_resp_capset {
struct virtio_gpu_ctrl_hdr hdr;
uint8_t capset_data[];
};
/* VIRTIO_GPU_CMD_GET_EDID */
struct virtio_gpu_cmd_get_edid {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t scanout;
uint32_t padding;
};
/* VIRTIO_GPU_RESP_OK_EDID */
struct virtio_gpu_resp_edid {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t size;
uint32_t padding;
uint8_t edid[1024];
};
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
uint32_t events_read;
uint32_t events_clear;
uint32_t num_scanouts;
uint32_t num_capsets;
};
/* simple formats for fbcon/X use */
enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
struct virtio_gpu_resource_assign_uuid {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
};
/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
struct virtio_gpu_resp_resource_uuid {
struct virtio_gpu_ctrl_hdr hdr;
uint8_t uuid[16];
};
/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
struct virtio_gpu_resource_create_blob {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
/* zero is invalid blob mem */
uint32_t blob_mem;
uint32_t blob_flags;
uint32_t nr_entries;
uint64_t blob_id;
uint64_t size;
/*
* sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
*/
};
/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
struct virtio_gpu_set_scanout_blob {
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
uint32_t scanout_id;
uint32_t resource_id;
uint32_t width;
uint32_t height;
uint32_t format;
uint32_t padding;
uint32_t strides[4];
uint32_t offsets[4];
};
/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
struct virtio_gpu_resource_map_blob {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
uint64_t offset;
};
/* VIRTIO_GPU_RESP_OK_MAP_INFO */
#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
#define VIRTIO_GPU_MAP_CACHE_WC 0x03
struct virtio_gpu_resp_map_info {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t map_info;
uint32_t padding;
};
/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
struct virtio_gpu_resource_unmap_blob {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t resource_id;
uint32_t padding;
};
#endif