FFmpeg Looking Glass Frame Buffer Device

I’ve been doing a bit of development with FFmpeg to try and add a device for decoding the Looking Glass framebuffer. Long story short I now have working code that appears to be presentable. I’m making this post to attempt to gauge interest to see if it would be worth the trouble of trying to get it added into the main branch of FFmpeg.

diff --git a/libavdevice/KVMFR.h b/libavdevice/KVMFR.h
new file mode 100644
index 0000000000..c4c3d19495
--- /dev/null
+++ b/libavdevice/KVMFR.h
"no new code just copy KVMFR.h from looking glass project"

diff --git a/libavdevice/Makefile b/libavdevice/Makefile
index 6ea62b914e..cffd1b8e79 100644
--- a/libavdevice/Makefile
+++ b/libavdevice/Makefile
@@ -31,6 +31,7 @@ OBJS-$(CONFIG_GDIGRAB_INDEV)             += gdigrab.o
 OBJS-$(CONFIG_IEC61883_INDEV)            += iec61883.o
 OBJS-$(CONFIG_JACK_INDEV)                += jack.o timefilter.o
 OBJS-$(CONFIG_KMSGRAB_INDEV)             += kmsgrab.o
+OBJS-$(CONFIG_FBDEV_INDEV)               += kvmfr_dec.o
 OBJS-$(CONFIG_LAVFI_INDEV)               += lavfi.o
 OBJS-$(CONFIG_OPENAL_INDEV)              += openal-dec.o
 OBJS-$(CONFIG_OPENGL_OUTDEV)             += opengl_enc.o
diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c
index 8633433254..61fe350997 100644
--- a/libavdevice/alldevices.c
+++ b/libavdevice/alldevices.c
@@ -39,6 +39,7 @@ extern AVInputFormat  ff_gdigrab_demuxer;
 extern AVInputFormat  ff_iec61883_demuxer;
 extern AVInputFormat  ff_jack_demuxer;
 extern AVInputFormat  ff_kmsgrab_demuxer;
+extern AVInputFormat  ff_kvmfr_demuxer;
 extern AVInputFormat  ff_lavfi_demuxer;
 extern AVInputFormat  ff_openal_demuxer;
 extern AVOutputFormat ff_opengl_muxer;
diff --git a/libavdevice/kvmfr_dec.c b/libavdevice/kvmfr_dec.c
new file mode 100644
index 0000000000..aa0e5097cb
--- /dev/null
+++ b/libavdevice/kvmfr_dec.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2009 Giliard B. de Freitas <[email protected]>
+ * Copyright (C) 2002 Gunnar Monell <[email protected]>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Looking-Glass framebuffer input device,
+ * Mirrors some code from fbdev_dec.c.
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/time.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+#include "KVMFR.h"
+
+typedef struct KVMFRContext {
+    AVClass *class;          ///< class for private options
+    int frame_size;          ///< size in bytes of a grabbed frame
+    AVRational framerate_q;  ///< framerate
+    int64_t time_frame;      ///< time for the next frame to output (in 1/1000000 units)
+
+    int                  shmFD; // KVMFR framebuffer device file
+    struct KVMFRHeader * shm;
+    int shmFD_size;                  ///< framebuffer device file descriptor
+    int width, height;       ///< assumed frame resolution
+    int frame_linesize;      ///< linesize of the output frame, it is assumed to be constant
+    int bytes_per_pixel;
+
+    uint8_t *data;           ///< framebuffer data
+
+} KVMFRContext;
+
+static av_cold int kvmfr_read_header(AVFormatContext *avctx)
+{
+    KVMFRContext *kvmfr = avctx->priv_data;
+    AVStream *st = NULL;
+    enum AVPixelFormat pix_fmt;
+    int ret, bpp, flags = O_RDWR;
+    const char* shared_mem;
+    struct stat file_stat;
+    int bpp_map[6] = {0, 32, 32, 32, 12, 0};
+    enum AVPixelFormat pix_map[6] = {
+            AV_PIX_FMT_NONE, AV_PIX_FMT_BGRA, AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
+
+
+    if (!(st = avformat_new_stream(avctx, NULL)))
+        return AVERROR(ENOMEM);
+    avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
+
+    if (avctx->url[0])
+        shared_mem = avctx->url;
+    else
+        shared_mem = "/dev/shm/looking-glass";
+
+    if ((kvmfr->shmFD = avpriv_open(shared_mem, flags)) == -1) {
+        ret = AVERROR(errno);
+        av_log(avctx, AV_LOG_ERROR,
+               "Could not open looking-glass file '%s': %s\n",
+			   shared_mem, av_err2str(ret));
+        return ret;
+    }
+
+    stat(shared_mem, &file_stat);
+    kvmfr->shmFD_size = file_stat.st_size;
+    kvmfr->shm = (struct KVMFRHeader *)mmap(0, kvmfr->shmFD_size, PROT_READ | PROT_WRITE, MAP_SHARED, kvmfr->shmFD, 0);
+    if (kvmfr->shm == MAP_FAILED) {
+        ret = AVERROR(errno);
+        av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
+        goto fail;
+    }
+
+    __sync_or_and_fetch(&kvmfr->shm->flags, KVMFR_HEADER_FLAG_RESTART);
+
+    pix_fmt = pix_map[kvmfr->shm->frame.type];
+    if (pix_fmt == AV_PIX_FMT_NONE) {
+        ret = AVERROR(EINVAL);
+        av_log(avctx, AV_LOG_ERROR,
+               "Framebuffer pixel format not supported.\n");
+        munmap(kvmfr->shm, kvmfr->shmFD_size);
+        goto fail;
+    }
+    bpp = bpp_map[kvmfr->shm->frame.type];
+
+    kvmfr->width           = kvmfr->shm->frame.width;
+    kvmfr->height          = kvmfr->shm->frame.height;
+    kvmfr->bytes_per_pixel = (bpp + 7) >> 3;
+    kvmfr->frame_linesize  = kvmfr->width * kvmfr->bytes_per_pixel;
+    kvmfr->frame_size      = kvmfr->frame_linesize * kvmfr->height;
+    kvmfr->time_frame      = AV_NOPTS_VALUE;
+    kvmfr->data            = (uint8_t *)kvmfr->shm + kvmfr->shm->frame.dataPos;
+
+    st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+    st->codecpar->codec_id   = AV_CODEC_ID_RAWVIDEO;
+    st->codecpar->width      = kvmfr->width;
+    st->codecpar->height     = kvmfr->height;
+    st->codecpar->format     = pix_fmt;
+    st->avg_frame_rate       = kvmfr->framerate_q;
+    st->codecpar->bit_rate   = kvmfr->frame_size * av_q2d(kvmfr->framerate_q) * 8;
+
+    av_log(avctx, AV_LOG_INFO,
+           "w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%"PRId64"\n",
+		   kvmfr->width, kvmfr->height, bpp,
+           av_get_pix_fmt_name(pix_fmt),
+           kvmfr->framerate_q.num, kvmfr->framerate_q.den,
+           st->codecpar->bit_rate);
+    return 0;
+
+fail:
+    close(kvmfr->shmFD);
+    return ret;
+}
+
+static int kvmfr_read_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+    KVMFRContext *kvmfr = avctx->priv_data;
+    int64_t curtime, delay;
+    struct timespec ts;
+    int ret;
+
+    if (kvmfr->time_frame == AV_NOPTS_VALUE)
+        kvmfr->time_frame = av_gettime();
+
+    /* wait based on the frame rate */
+    while (1) {
+        curtime = av_gettime();
+        delay = kvmfr->time_frame - curtime;
+        av_log(avctx, AV_LOG_TRACE,
+                "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
+                kvmfr->time_frame, curtime, delay);
+        if (delay <= 0) {
+            kvmfr->time_frame += INT64_C(1000000) / av_q2d(kvmfr->framerate_q);
+            break;
+        }
+        if (avctx->flags & AVFMT_FLAG_NONBLOCK)
+            return AVERROR(EAGAIN);
+        ts.tv_sec  =  delay / 1000000;
+        ts.tv_nsec = (delay % 1000000) * 1000;
+        while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
+    }
+
+    if ((ret = av_new_packet(pkt, kvmfr->frame_size)) < 0)
+        return ret;
+
+    // refresh where KVMFR framebuffer pointer due to double buffer
+    kvmfr->data = (uint8_t *)kvmfr->shm + kvmfr->shm->frame.dataPos;
+
+    __sync_and_and_fetch(&kvmfr->shm->frame.flags, ~KVMFR_FRAME_FLAG_UPDATE);
+
+    pkt->pts = curtime;
+
+    memcpy(pkt->data, kvmfr->data, kvmfr->frame_size);
+
+    return kvmfr->frame_size;
+}
+
+static av_cold int kvmfr_read_close(AVFormatContext *avctx)
+{
+    KVMFRContext *kvmfr = avctx->priv_data;
+
+    munmap(kvmfr->shm, kvmfr->shmFD_size);
+    close(kvmfr->shmFD);
+
+    return 0;
+}
+
+#define OFFSET(x) offsetof(KVMFRContext, x)
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+    { "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, DEC },
+    { NULL },
+};
+
+static const AVClass kvmfr_class = {
+    .class_name = "kvmfr indev",
+    .item_name  = av_default_item_name,
+    .option     = options,
+    .version    = LIBAVUTIL_VERSION_INT,
+    .category   = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+};
+
+AVInputFormat ff_kvmfr_demuxer = {
+    .name           = "kvmfr",
+    .long_name      = NULL_IF_CONFIG_SMALL("KVM frame Relay"),
+    .priv_data_size = sizeof(KVMFRContext),
+    .read_header    = kvmfr_read_header,
+    .read_packet    = kvmfr_read_packet,
+    .read_close     = kvmfr_read_close,
+    .flags          = AVFMT_NOFILE,
+    .priv_class     = &kvmfr_class,
+};

kvmfr.diff.txt (8.9 KB)

Edits: Embedded git diff into post. Same content as file.

4 Likes

Hi, Do you have a link to a github or something?

No offence, but not gonna download random stuff off a forum post

What would you use it for?

I’m absolutely interested!

I know @gnif had talked about building an OBS plugin, and if I understand OBS correctly, this might be a step to that end.

I’ve been hoping to be able to use something like this to stream my VM to twitch, so I don’t use the VM’s CPU for encoding.

Make a PR and see if the maintainers would accept it. As long as the code passes muster and doesn’t introduce any outrageous security holes, I don’t see that it wouldn’t be introduced.

I started on this because I was hoping to do a custom in home streaming with AMD GPUs. I have a 56 on windows and a 480 on linux in my setup. I couldn’t get better than 900p encoding trying to use steam in home streaming from my vega 56 because the hardware decoder does not perform that well. I had hoped I could use my 480 on the linux side to use as dedicated hardware to stream 1080p. Unfortunately I did latency testing and there was usually 200ms of latency vs steam that reported 60-30ms latency.

So ultimately it seemed cool but not so great for my Initial use case.

It’s a solid idea but please note that the KVMFR format is still in flux. For example, this will not work on the latest version of LG as the frame is now streamed in chunks. At some point when I find the time the KVMFR protocol/interface is likely to be redesigned based on lessons learned (likely using mailboxes, etc). This is needed for multi -client support such as OBS + the LG client, etc.

1 Like

My branch works fine for me, currently using the beta 1 host:

You may use that one until there’s an update, but as gnif said, you can only connect one client with this version and it does not aim for lowest latency like the official client does.
I didn’t check the protocol changes as i can just lean back and keep using this version for now :stuck_out_tongue_closed_eyes:

( I think this is a bit off topic because it does not touch ffmpeg at all ^^ )

1 Like

A direct pipe to DNxHR encoding would be pretty handy if there’s a way to make FFmpeg real-time encode the frame buffer. You can game in Windows, then directly take the MOV file FFmpeg spits out into Davinci Resolve, and all of a sudden, you have a Let’s Play setup!

The other thing would be Decklink output of the actual framebuffer rather than capturing the screen using HDMI splitters.

Exactly, since Beta 1 the KVMFR format has changed.

That would be nice, but damn, those file sizes. You’d need a fucking array for a single let’s play, even at dnxhd_sq.

That’s why 2TB PCI-E Gen 4 SSDs exist. I really want to try to do this with a TRX40 setup, where you game on the VM, then combine captures on the guest in Davinci Resolve, so that your Ninja Inferno can focus on facecam and your FFmpeg pipe does the encoding for this “psuedo FRAPS” solution. (framebuffer capture is going to be so much better than capturing the GPU’s HDMI)

If I can get enough traction behind the project it would help a lot if I had the hardware to work with this. Using the framebuffer in OBS helps streaming, and using the framebuffer directly in FFmpeg allows for a direct pipe for recording. (since OBS’ FFmpeg DNxHD/DNxHR encoding is currently broken, using vanilla FFmpeg would be ideal)

Ultimately, this is down to Davinci Resolve not licensed to use MPEG LA stuff for H264 and H265 on Linux, hence why the file sizes are so big.

Or just buy the license and H264 works.

This is getting off topic, if you’d like to continue, let’s branch off.

Technically not 100% true. It has to be a SPECIFIC container and you can only export in specific containers for it if you get Studio. You’re still limited, and it’s NVENC based encoding for export. They can’t include libx264 for the same reason Valve can’t include WMV/WMA in Proton.

HI SOMEONE MANAGED TO USE IT IN A MUXLESS SETUP?
IM STUCK WITH THIS MX150 PC AND I WANT TO PASSTHROUGH THE WGPU BUT EVERYTHING LACK DOCUMENTATINOstrong text

I have already asked you once to stop typing in all capitals and you already have a thread open on this topic, this is your now second warning.

With the advent of the bulletin board system, or BBS, and later the Internet, typing messages in all caps commonly became closely identified with “shouting” or attention-seeking behavior, and may be considered rude. Its equivalence to shouting traces back to at least 1984 and before the Internet, back to printed typography usage of all capitals to mean shouting.

Thread closed as it’s been dead for four months anyway.